metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jimtheplant/qraphql-booster",
"score": 3
} |
#### File: frazzl/types/entity.py
```python
from ariadne import ObjectType
class Entity:
pass
def create_entity(name, key_resolvers, key_name):
object_type = ObjectType(name)
setattr(object_type, "key_resolver", key_resolvers)
object_type.set_field(key_name, key_resolvers)
return object_type
```
#### File: frazzl/types/service.py
```python
from ariadne import ObjectType, make_executable_schema, UnionType
from .directive import *
from .scalars import _FieldSet, _Any
ENTITY_QUERY = "_entities(representations: [_Any!]!): [_Entity]!"
SERVICE_TEMPLATE = """
scalar _Any
scalar _FieldSet
# a union of all types that use the @key directive
{union_entities}
type _Service {{
sdl: String
name: String!
}}
{query_str}
extend type Query {{
{entity_query}
_service: _Service!
}}
directive @external on FIELD_DEFINITION
directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
directive @key(fields: _FieldSet!) on OBJECT | INTERFACE
# this is an optional directive discussed below
directive @extends on OBJECT | INTERFACE
"""
class Service:
def __init__(self, service_name, schema, query):
self.gql_type = ObjectType("_Service")
self.service_name = service_name
self.query = query
self.directives = {"key": KeyDirective, "requires": RequiresDirective, "provides": ProvidesDirective,
"external": ExternalDirective, "extends": ExtendsDirective}
self.sdl = schema
self.entities = {}
self.federation_types = [self.gql_type]
self.query.set_field("_service", self.resolve_service)
self.gql_type.set_field("sdl", self.resolve_sdl)
self.gql_type.set_field("name", self.resolve_name)
def resolve_name(self, info, context):
return self.service_name
def resolve_service(self, info, context):
return self.gql_type
def resolve_sdl(self, info, context):
return self.sdl
def resolve_entities(self, obj, *_):
return obj["__typename"]
def _entities(self, *_, representations=None):
rv = []
for representation in representations:
entity_type = self.entities[representation["__typename"]]
key = {k: v for (k, v) in representation.items() if k != "__typename"}
entity = entity_type.key_resolver(key)
entity["__typename"] = representation["__typename"]
rv.append(entity)
return rv
def create_schema_from_template(self):
template = SERVICE_TEMPLATE
entity_union_str = self._make_entity_str()
entity_query_str = ""
if entity_union_str != "":
entity_union = UnionType("_Entity")
entity_union.set_type_resolver(self.resolve_entities)
self.query.set_field("_entities", self._entities)
entity_query_str = ENTITY_QUERY
self.federation_types.append(entity_union)
template = template.format(union_entities=entity_union_str, entity_query=entity_query_str, query_str=self.sdl)
return make_executable_schema(template,
[self.query, _Any, _FieldSet] + [ObjectType(entity_name) for entity_name in
self.entities.keys()] + self.federation_types,
directives=self.directives)
def _make_entity_str(self):
if len(self.entities) <= 0:
return ""
entity_names = list(self.entities.keys())
union_str = "union _Entity = " + entity_names[0]
for entity_name in entity_names[1:]:
union_str += " | "
union_str += entity_name
return union_str
def add_entity(self, entity_type):
self.entities[entity_type.name] = entity_type
``` |
{
"source": "jimthethief/network",
"score": 2
} |
#### File: network/network/admin.py
```python
from django.contrib import admin
# import model here
# Register your models here.
"""
# Example of customising admin view:
class UserProfileAdmin(admin.ModelAdmin):
# Display these columns
list_display = ["user", "user_info", "city"]
# Renaming an admin column from desc to user_info:
def user_info(self, obj):
return obj.desc
# Register models:
admin.site.register(User, UserProfile)
"""
``` |
{
"source": "jimthompson5802/ludwig",
"score": 2
} |
#### File: ludwig/automl/automl.py
```python
import argparse
import copy
import os
import warnings
from typing import Dict, List, Union
import numpy as np
import pandas as pd
import yaml
from ludwig.api import LudwigModel
from ludwig.automl.auto_tune_config import memory_tune_config
from ludwig.automl.base_config import _create_default_config, DatasetInfo, get_dataset_info, infer_type
from ludwig.automl.utils import _ray_init, get_model_name
from ludwig.constants import COMBINER, HYPEROPT, NUMERICAL, TYPE
from ludwig.contrib import add_contrib_callback_args
from ludwig.globals import LUDWIG_VERSION
from ludwig.hyperopt.run import hyperopt
from ludwig.utils.misc_utils import merge_dict
from ludwig.utils.print_utils import print_ludwig
try:
import dask.dataframe as dd
import ray
from ray.tune import ExperimentAnalysis
except ImportError:
raise ImportError(" ray is not installed. " "In order to use auto_train please run " "pip install ludwig[ray]")
OUTPUT_DIR = "."
class AutoTrainResults:
def __init__(self, experiment_analysis: ExperimentAnalysis):
self._experiment_analysis = experiment_analysis
@property
def experiment_analysis(self):
return self._experiment_analysis
@property
def path_to_best_model(self) -> str:
return self._experiment_analysis.best_checkpoint
@property
def best_trial_id(self) -> str:
return self._experiment_analysis.best_trial.trial_id
@property
def best_model(self) -> LudwigModel:
return LudwigModel.load(os.path.join(self.path_to_best_model, "model"))
def auto_train(
dataset: Union[str, pd.DataFrame, dd.core.DataFrame],
target: str,
time_limit_s: Union[int, float],
output_directory: str = OUTPUT_DIR,
tune_for_memory: bool = False,
user_config: Dict = None,
**kwargs,
) -> AutoTrainResults:
"""Main auto train API that first builds configs for each model type (e.g. concat, tabnet, transformer). Then
selects model based on dataset attributes. And finally runs a hyperparameter optimization experiment.
All batch and learning rate tuning is done @ training time.
# Inputs
:param dataset: (str, pd.DataFrame, dd.core.DataFrame) data source to train over.
:param target: (str) name of target feature
:param time_limit_s: (int, float) total time allocated to auto_train. acts
as the stopping parameter
:param output_directory: (str) directory into which to write results, defaults to
current working directory.
# Returns
:return: (AutoTrainResults) results containing hyperopt experiments and best model
"""
config = create_auto_config(dataset, target, time_limit_s, tune_for_memory, user_config, **kwargs)
return train_with_config(dataset, config, output_directory=output_directory, **kwargs)
def create_auto_config(
dataset: Union[str, pd.DataFrame, dd.core.DataFrame, DatasetInfo],
target: Union[str, List[str]],
time_limit_s: Union[int, float],
tune_for_memory: bool,
user_config: Dict = None,
) -> dict:
"""Returns an auto-generated Ludwig config with the intent of training the best model on given given dataset /
target in the given time limit.
# Inputs
:param dataset: (str, pd.DataFrame, dd.core.DataFrame, DatasetInfo) data source to train over.
:param target: (str, List[str]) name of target feature
:param time_limit_s: (int, float) total time allocated to auto_train. acts
as the stopping parameter
:param tune_for_memroy: (bool) refine hyperopt search space for available
host / GPU memory
# Return
:return: (dict) selected model configuration
"""
default_configs = _create_default_config(dataset, target, time_limit_s)
model_config = _model_select(dataset, default_configs, user_config)
if tune_for_memory:
if ray.is_initialized():
model_config, _ = ray.get(ray.remote(num_cpus=1)(memory_tune_config).remote(model_config, dataset))
else:
model_config, _ = memory_tune_config(model_config, dataset)
return model_config
def train_with_config(
dataset: Union[str, pd.DataFrame, dd.core.DataFrame],
config: dict,
output_directory: str = OUTPUT_DIR,
**kwargs,
) -> AutoTrainResults:
"""Performs hyperparameter optimization with respect to the given config and selects the best model.
# Inputs
:param dataset: (str) filepath to dataset.
:param config: (dict) optional Ludwig configuration to use for training, defaults
to `create_auto_config`.
:param output_directory: (str) directory into which to write results, defaults to
current working directory.
# Returns
:return: (AutoTrainResults) results containing hyperopt experiments and best model
"""
_ray_init()
model_name = get_model_name(config)
hyperopt_results = _train(config, dataset, output_directory=output_directory, model_name=model_name, **kwargs)
# catch edge case where metric_score is nan
# TODO (ASN): Decide how we want to proceed if at least one trial has
# completed
for trial in hyperopt_results.ordered_trials:
if np.isnan(trial.metric_score):
warnings.warn(
"There was an error running the experiment. "
"A trial failed to start. "
"Consider increasing the time budget for experiment. "
)
experiment_analysis = hyperopt_results.experiment_analysis
return AutoTrainResults(experiment_analysis)
def _model_select(
dataset: Union[str, pd.DataFrame, dd.core.DataFrame, DatasetInfo],
default_configs,
user_config,
):
"""Performs model selection based on dataset or user specified model.
Note: Current implementation returns tabnet by default.
"""
dataset_info = get_dataset_info(dataset) if not isinstance(dataset, DatasetInfo) else dataset
fields = dataset_info.fields
base_config = default_configs["base_config"]
# tabular dataset heuristics
if len(fields) > 3:
base_config = merge_dict(base_config, default_configs["combiner"]["tabnet"])
# override combiner heuristic if explicitly provided by user
if user_config is not None:
if "combiner" in user_config.keys():
model_type = user_config["combiner"]["type"]
base_config = merge_dict(base_config, default_configs["combiner"][model_type])
else:
# text heuristics
for input_feature in base_config["input_features"]:
# default text encoder is bert
# TODO (ASN): add more robust heuristics
if input_feature["type"] == "text":
input_feature["encoder"] = "bert"
base_config = merge_dict(base_config, default_configs["text"]["bert"])
# TODO (ASN): add image heuristics
# override and constrain automl config based on user specified values
if user_config is not None:
base_config = merge_dict(base_config, user_config)
# remove all parameters from hyperparameter search that user has
# provided explicit values for
hyperopt_params = copy.deepcopy(base_config["hyperopt"]["parameters"])
for hyperopt_params in hyperopt_params.keys():
config_section, param = hyperopt_params.split(".")[0], hyperopt_params.split(".")[1]
if config_section in user_config.keys():
if param in user_config[config_section]:
del base_config["hyperopt"]["parameters"][hyperopt_params]
return base_config
def _train(
config: Dict, dataset: Union[str, pd.DataFrame, dd.core.DataFrame], output_directory: str, model_name: str, **kwargs
):
hyperopt_results = hyperopt(
config, dataset=dataset, output_directory=output_directory, model_name=model_name, backend="local", **kwargs
)
return hyperopt_results
def init_config(
dataset: str,
target: Union[str, List[str]],
time_limit_s: Union[int, float],
tune_for_memory: bool,
hyperopt: bool = False,
output: str = None,
**kwargs,
):
config = create_auto_config(
dataset=dataset,
target=target,
time_limit_s=time_limit_s,
tune_for_memory=tune_for_memory,
)
if HYPEROPT in config and not hyperopt:
del config[HYPEROPT]
if output is None:
print(yaml.safe_dump(config, None, sort_keys=False))
else:
with open(output, "w") as f:
yaml.safe_dump(config, f, sort_keys=False)
def cli_init_config(sys_argv):
parser = argparse.ArgumentParser(
description="This script initializes a valid config from a dataset.",
prog="ludwig init_config",
usage="%(prog)s [options]",
)
parser.add_argument(
"-d",
"--dataset",
type=str,
help="input data file path",
)
parser.add_argument(
"-t",
"--target",
type=str,
help="target(s) to predict as output features of the model",
action="append",
required=False,
)
parser.add_argument(
"--time_limit_s",
type=int,
help="time limit to train the model in seconds when using hyperopt",
required=False,
)
parser.add_argument(
"--tune_for_memory",
type=bool,
help="refine hyperopt search space based on available host / GPU memory",
default=False,
required=False,
)
parser.add_argument(
"--hyperopt",
type=bool,
help="include automl hyperopt config",
default=False,
required=False,
)
parser.add_argument(
"-o",
"--output",
type=str,
help="output initialized YAML config path",
required=False,
)
add_contrib_callback_args(parser)
args = parser.parse_args(sys_argv)
args.callbacks = args.callbacks or []
for callback in args.callbacks:
callback.on_cmdline("init_config", *sys_argv)
print_ludwig("Init Config", LUDWIG_VERSION)
init_config(**vars(args))
```
#### File: ludwig/automl/auto_tune_config.py
```python
import copy
from collections import OrderedDict
import psutil
import ray
try:
import GPUtil
except ImportError:
raise ImportError(" ray is not installed. " "In order to use auto_train please run " "pip install ludwig[ray]")
from ludwig.api import LudwigModel
from ludwig.automl.utils import get_available_resources, get_model_name
from ludwig.constants import BATCH_SIZE, COMBINER, HYPEROPT, PREPROCESSING, SPACE, TRAINING, TYPE
from ludwig.data.preprocessing import preprocess_for_training
from ludwig.features.feature_registries import update_config_with_metadata
from ludwig.utils.defaults import merge_with_defaults
# maps variable search space that can be modified to minimum permissible value for the range
RANKED_MODIFIABLE_PARAM_LIST = {
"tabnet": OrderedDict(
{
"training.batch_size": 32,
"combiner.size": 8,
"combiner.output_size": 8,
}
),
"concat": OrderedDict(
{
"training.batch_size": 32,
"combiner.fc_size": 64,
"combiner.num_fc_layers": 1,
}
),
"tabtransformer": OrderedDict(
{
"training.batch_size": 32,
"combiner.num_heads:": 4,
"combiner.output_size": 8,
"combiner.num_layers": 4,
"combiner.num_fc_layers": 1,
}
),
}
BYTES_PER_MiB = 1048576
def get_trainingset_metadata(config, dataset):
(_, _, _, training_set_metadata) = preprocess_for_training(
config, dataset=dataset, preprocessing_params=config[PREPROCESSING]
)
return training_set_metadata
def get_machine_memory():
if ray.is_initialized(): # using ray cluster
@ray.remote(num_gpus=1)
def get_remote_gpu():
gpus = GPUtil.getGPUs()
total_mem_mb = gpus[0].memory_total
return total_mem_mb * BYTES_PER_MiB
@ray.remote(num_cpus=1)
def get_remote_cpu():
total_mem = psutil.virtual_memory().total
return total_mem
resources = get_available_resources() # check if cluster has GPUS
if resources["gpu"] > 0:
machine_mem = ray.get(get_remote_gpu.remote())
else:
machine_mem = ray.get(get_remote_cpu.remote())
else: # not using ray cluster
if GPUtil.getGPUs():
machine_mem = GPUtil.getGPUs()[0].memory_total * BYTES_PER_MiB
else:
machine_mem = psutil.virtual_memory().total
return machine_mem
def compute_memory_usage(config, training_set_metadata) -> int:
update_config_with_metadata(config, training_set_metadata)
lm = LudwigModel.create_model(config)
model_tensors = lm.collect_weights()
total_size = 0
batch_size = config[TRAINING][BATCH_SIZE]
for tnsr in model_tensors:
total_size += tnsr[1].numpy().size * batch_size
total_bytes = total_size * 32 # assumes 32-bit precision
return total_bytes
def sub_new_params(config: dict, new_param_vals: dict):
new_config = copy.deepcopy(config)
for param, val in new_param_vals.items():
config_section = param.split(".")[0]
param_name = param.split(".")[1]
new_config[config_section][param_name] = val
return new_config
def get_new_params(current_param_values, hyperparam_search_space, params_to_modify):
for param, _ in params_to_modify.items():
if hyperparam_search_space[param][SPACE] == "choice":
current_param_values[param] = hyperparam_search_space[param]["categories"][-1]
else:
current_param_values[param] = hyperparam_search_space[param]["upper"]
return current_param_values
def memory_tune_config(config, dataset):
fits_in_memory = False
raw_config = merge_with_defaults(config)
training_set_metadata = get_trainingset_metadata(raw_config, dataset)
modified_hyperparam_search_space = copy.deepcopy(raw_config[HYPEROPT]["parameters"])
params_to_modify = RANKED_MODIFIABLE_PARAM_LIST[get_model_name(raw_config)]
param_list = list(params_to_modify.keys())
current_param_values = {}
max_memory = get_machine_memory()
while param_list is not None:
# compute memory utilization
current_param_values = get_new_params(current_param_values, modified_hyperparam_search_space, params_to_modify)
temp_config = sub_new_params(raw_config, current_param_values)
if compute_memory_usage(temp_config, training_set_metadata) < max_memory:
fits_in_memory = True
break
# check if we have exhausted tuning of current param (e.g. we can no longer reduce the param value)
param, min_value = param_list[0], params_to_modify[param_list[0]]
if param in modified_hyperparam_search_space.keys():
param_space = modified_hyperparam_search_space[param]["space"]
if param_space == "choice":
if (
len(modified_hyperparam_search_space[param]["categories"]) > 2
and modified_hyperparam_search_space[param]["categories"][-2] > min_value
):
modified_hyperparam_search_space[param]["categories"] = modified_hyperparam_search_space[param][
"categories"
][:-1]
else:
param_list.pop(0) # exhausted reduction of this parameter
else:
# reduce by 10%
upper_bound, lower_bound = (
modified_hyperparam_search_space[param]["upper"],
modified_hyperparam_search_space[param]["lower"],
)
reduction_val = (upper_bound - lower_bound) * 0.1
new_upper_bound = upper_bound - reduction_val
if (new_upper_bound) > lower_bound and new_upper_bound > min_value:
modified_hyperparam_search_space[param]["upper"] = new_upper_bound
else:
param_list.pop(0) # exhausted reduction of this parameter
else:
param_list.pop(0) # param not in hyperopt search space
modified_config = copy.deepcopy(config)
modified_config[HYPEROPT]["parameters"] = modified_hyperparam_search_space
return modified_config, fits_in_memory
```
#### File: ludwig/backend/base.py
```python
from abc import ABC, abstractmethod
from contextlib import contextmanager
from ludwig.data.cache.manager import CacheManager
from ludwig.data.dataframe.pandas import PANDAS
from ludwig.data.dataset.base import DatasetManager
from ludwig.data.dataset.pandas import PandasDatasetManager
from ludwig.models.ecd import ECD
from ludwig.models.predictor import Predictor
from ludwig.models.trainer import Trainer
from ludwig.utils.torch_utils import initialize_pytorch
class Backend(ABC):
def __init__(self, dataset_manager: DatasetManager, cache_dir: str = None):
self._dataset_manager = dataset_manager
self._cache_manager = CacheManager(self._dataset_manager, cache_dir)
@property
def cache(self):
return self._cache_manager
@property
def dataset_manager(self):
return self._dataset_manager
@abstractmethod
def initialize(self):
raise NotImplementedError()
@abstractmethod
def initialize_pytorch(self, *args, **kwargs):
raise NotImplementedError()
@contextmanager
@abstractmethod
def create_trainer(self, **kwargs):
raise NotImplementedError()
@abstractmethod
def sync_model(self, model):
raise NotImplementedError()
@abstractmethod
def broadcast_return(self, fn):
raise NotImplementedError()
@abstractmethod
def is_coordinator(self):
raise NotImplementedError()
@property
@abstractmethod
def df_engine(self):
raise NotImplementedError()
@property
@abstractmethod
def supports_multiprocessing(self):
raise NotImplementedError()
@abstractmethod
def check_lazy_load_supported(self, feature):
raise NotImplementedError()
class LocalPreprocessingMixin:
@property
def df_engine(self):
return PANDAS
@property
def supports_multiprocessing(self):
return True
def check_lazy_load_supported(self, feature):
pass
class LocalTrainingMixin:
def initialize_pytorch(self, *args, **kwargs):
initialize_pytorch(*args, **kwargs)
def create_trainer(self, **kwargs):
return Trainer(**kwargs)
def create_predictor(self, model: ECD, **kwargs):
return Predictor(model, **kwargs)
def sync_model(self, model):
pass
def broadcast_return(self, fn):
return fn()
def is_coordinator(self):
return True
class RemoteTrainingMixin:
def sync_model(self, model):
pass
def broadcast_return(self, fn):
return fn()
def is_coordinator(self):
return True
class LocalBackend(LocalPreprocessingMixin, LocalTrainingMixin, Backend):
def __init__(self, **kwargs):
super().__init__(dataset_manager=PandasDatasetManager(self), **kwargs)
def initialize(self):
pass
```
#### File: ludwig/modules/fully_connected_modules.py
```python
import logging
import torch
from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, LayerNorm, Linear, ModuleList
from ludwig.utils.torch_utils import activations, initializer_registry, LudwigModule
logger = logging.getLogger(__name__)
class FCLayer(LudwigModule):
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.input_size])
@property
def output_shape(self) -> torch.Size:
return torch.Size([self.output_size])
def __init__(
self,
input_size,
input_rank=2,
output_size=256,
use_bias=True,
weights_initializer="xavier_uniform",
bias_initializer="zeros",
norm=None,
norm_params=None,
activation="relu",
dropout=0,
):
super().__init__()
self.layers = ModuleList()
self.input_size = input_size
self.output_size = output_size
fc = Linear(in_features=input_size, out_features=output_size, bias=use_bias)
self.layers.append(fc)
weights_initializer = initializer_registry[weights_initializer]
weights_initializer(fc.weight)
if use_bias:
bias_initializer = initializer_registry[bias_initializer]
bias_initializer(fc.bias)
if norm and norm_params is None:
norm_params = {}
if norm == "batch":
# might need if statement for 1d vs 2d? like images
if input_rank == 2:
self.layers.append(BatchNorm1d(output_size, **norm_params))
elif input_rank == 3:
self.layers.append(BatchNorm2d(output_size, **norm_params))
else:
ValueError(
f"input_rank parameter expected to be either 2 or 3, " f"however valued found to be {input_rank}."
)
elif norm == "layer":
self.layers.append(LayerNorm(output_size, **norm_params))
# Dict for activation objects in pytorch?
self.layers.append(activations[activation]())
self.activation_index = len(self.layers) - 1
if dropout > 0:
self.layers.append(Dropout(dropout))
def forward(self, inputs, training=None, mask=None):
self.training = training
batch_size = inputs.shape[0]
hidden = inputs
for i, layer in enumerate(self.layers):
hidden = layer(hidden)
return hidden
class FCStack(LudwigModule):
def __init__(
self,
first_layer_input_size,
layers=None,
num_layers=1,
default_input_rank=2,
default_fc_size=256,
default_use_bias=True,
default_weights_initializer="xavier_uniform",
default_bias_initializer="zeros",
default_norm=None,
default_norm_params=None,
default_activation="relu",
default_dropout=0,
residual=False,
**kwargs,
):
super().__init__()
self.input_size = first_layer_input_size
if layers is None:
self.layers = []
for i in range(num_layers):
self.layers.append({})
else:
self.layers = layers
if len(self.layers) > 0:
self.layers[0]["input_size"] = first_layer_input_size
for i, layer in enumerate(self.layers):
if i != 0:
layer["input_size"] = self.layers[i - 1]["fc_size"]
if "input_rank" not in layer:
layer["input_rank"] = default_input_rank
if "fc_size" not in layer:
layer["fc_size"] = default_fc_size
if "use_bias" not in layer:
layer["use_bias"] = default_use_bias
if "weights_initializer" not in layer:
layer["weights_initializer"] = default_weights_initializer
if "bias_initializer" not in layer:
layer["bias_initializer"] = default_bias_initializer
if "norm" not in layer:
layer["norm"] = default_norm
if "norm_params" not in layer:
layer["norm_params"] = default_norm_params
if "activation" not in layer:
layer["activation"] = default_activation
if "dropout" not in layer:
layer["dropout"] = default_dropout
self.stack = ModuleList()
for i, layer in enumerate(self.layers):
self.stack.append(
FCLayer(
input_size=layer["input_size"],
input_rank=layer["input_rank"],
output_size=layer["fc_size"],
use_bias=layer["use_bias"],
weights_initializer=layer["weights_initializer"],
bias_initializer=layer["bias_initializer"],
norm=layer["norm"],
norm_params=layer["norm_params"],
activation=layer["activation"],
dropout=layer["dropout"],
)
)
self.residual = residual
def forward(self, inputs, mask=None):
hidden = inputs
prev_fc_layer_size = self.input_size
for layer in self.stack:
out = layer(hidden)
if self.residual and layer.fc_size == prev_fc_layer_size:
hidden = hidden + out
else:
hidden = out
prev_fc_layer_size = layer.layers[0].out_features
return hidden
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.input_size])
@property
def output_shape(self) -> torch.Size:
if len(self.stack) > 0:
return self.stack[-1].output_shape
return torch.Size([self.input_size])
```
#### File: ludwig/ludwig/serve.py
```python
import argparse
import io
import json
import logging
import os
import sys
import tempfile
import pandas as pd
import torch
from torchvision.io import decode_image
from ludwig.api import LudwigModel
from ludwig.constants import AUDIO, COLUMN
from ludwig.contrib import add_contrib_callback_args
from ludwig.globals import LUDWIG_VERSION
from ludwig.utils.print_utils import logging_level_registry, print_ludwig
logger = logging.getLogger(__name__)
try:
import uvicorn
from fastapi import FastAPI
from starlette.datastructures import UploadFile
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse
except ImportError as e:
logger.error(e)
logger.error(
" fastapi and other serving dependencies cannot be loaded"
"and may have not been installed. "
"In order to install all serving dependencies run "
"pip install ludwig[serve]"
)
sys.exit(-1)
ALL_FEATURES_PRESENT_ERROR = {"error": "entry must contain all input features"}
COULD_NOT_RUN_INFERENCE_ERROR = {"error": "Unexpected Error: could not run inference on model"}
def server(model, allowed_origins=None):
middleware = [Middleware(CORSMiddleware, allow_origins=allowed_origins)] if allowed_origins else None
app = FastAPI(middleware=middleware)
input_features = {f[COLUMN] for f in model.config["input_features"]}
@app.get("/")
def check_health():
return JSONResponse({"message": "Ludwig server is up"})
@app.post("/predict")
async def predict(request: Request):
try:
form = await request.form()
entry, files = convert_input(form, model.model.input_features)
except Exception:
logger.exception("Failed to parse predict form")
return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR, status_code=500)
try:
if (entry.keys() & input_features) != input_features:
return JSONResponse(ALL_FEATURES_PRESENT_ERROR, status_code=400)
try:
resp, _ = model.predict(dataset=[entry], data_format=dict)
resp = resp.to_dict("records")[0]
return JSONResponse(resp)
except Exception as exc:
logger.exception(f"Failed to run predict: {exc}")
return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR, status_code=500)
finally:
for f in files:
os.remove(f.name)
@app.post("/batch_predict")
async def batch_predict(request: Request):
try:
form = await request.form()
data, files = convert_batch_input(form, model.model.input_features)
data_df = pd.DataFrame.from_records(data["data"], index=data.get("index"), columns=data["columns"])
except Exception:
logger.exception("Failed to parse batch_predict form")
return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR, status_code=500)
if (set(data_df.columns) & input_features) != input_features:
return JSONResponse(ALL_FEATURES_PRESENT_ERROR, status_code=400)
try:
resp, _ = model.predict(dataset=data_df)
resp = resp.to_dict("split")
return JSONResponse(resp)
except Exception:
logger.exception("Failed to run batch_predict: {}")
return JSONResponse(COULD_NOT_RUN_INFERENCE_ERROR, status_code=500)
return app
def _write_file(v, files):
# Convert UploadFile to a NamedTemporaryFile to ensure it's on the disk
suffix = os.path.splitext(v.filename)[1]
named_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
files.append(named_file)
named_file.write(v.file.read())
named_file.close()
return named_file.name
def _read_image_buffer(v):
# read bytes sent via REST API and convert to image tensor
# in [channels, height, width] format
byte_string = io.BytesIO(v.file.read()).read()
image = decode_image(torch.frombuffer(byte_string, dtype=torch.uint8))
return image # channels, height, width
def convert_input(form, input_features):
"""Returns a new input and a list of files to be cleaned up."""
new_input = {}
files = []
for k, v in form.multi_items():
if type(v) == UploadFile:
# check if audio or image file
if input_features[k].type == AUDIO:
new_input[k] = _write_file(v, files)
else:
new_input[k] = _read_image_buffer(v)
else:
new_input[k] = v
return new_input, files
def convert_batch_input(form, input_features):
"""Returns a new input and a list of files to be cleaned up."""
file_index = {}
files = []
for k, v in form.multi_items():
if type(v) == UploadFile:
file_index[v.filename] = v
data = json.loads(form["dataset"])
for row in data["data"]:
for i in range(len(row)):
if row[i] in file_index:
feature_name = data["columns"][i]
if input_features[feature_name].type == AUDIO:
row[i] = _write_file(file_index[row[i]], files)
else:
row[i] = _read_image_buffer(file_index[row[i]])
return data, files
def run_server(
model_path: str,
host: str,
port: int,
allowed_origins: list,
) -> None:
"""Loads a pre-trained model and serve it on an http server.
# Inputs
:param model_path: (str) filepath to pre-trained model.
:param host: (str, default: `0.0.0.0`) host ip address for the server to use.
:param port: (int, default: `8000`) port number for the server to use.
:param allowed_origins: (list) list of origins allowed to make cross-origin requests.
# Return
:return: (`None`)
"""
model = LudwigModel.load(model_path)
app = server(model, allowed_origins)
uvicorn.run(app, host=host, port=port)
def cli(sys_argv):
parser = argparse.ArgumentParser(
description="This script serves a pretrained model", prog="ludwig serve", usage="%(prog)s [options]"
)
# ----------------
# Model parameters
# ----------------
parser.add_argument("-m", "--model_path", help="model to load", required=True)
parser.add_argument(
"-l",
"--logging_level",
default="info",
help="the level of logging to use",
choices=["critical", "error", "warning", "info", "debug", "notset"],
)
# ----------------
# Server parameters
# ----------------
parser.add_argument(
"-p",
"--port",
help="port for server (default: 8000)",
default=8000,
type=int,
)
parser.add_argument("-H", "--host", help="host for server (default: 0.0.0.0)", default="0.0.0.0")
parser.add_argument(
"-ao",
"--allowed_origins",
nargs="*",
help="A list of origins that should be permitted to make cross-origin requests. "
'Use "*" to allow any origin. See https://www.starlette.io/middleware/#corsmiddleware.',
)
add_contrib_callback_args(parser)
args = parser.parse_args(sys_argv)
args.callbacks = args.callbacks or []
for callback in args.callbacks:
callback.on_cmdline("serve", *sys_argv)
args.logging_level = logging_level_registry[args.logging_level]
logging.getLogger("ludwig").setLevel(args.logging_level)
global logger
logger = logging.getLogger("ludwig.serve")
print_ludwig("Serve", LUDWIG_VERSION)
run_server(args.model_path, args.host, args.port, args.allowed_origins)
if __name__ == "__main__":
cli(sys.argv[1:])
``` |
{
"source": "jimtje/fairlay",
"score": 2
} |
#### File: fairlay/fairlay/utils.py
```python
from xml.dom import minidom
from Crypto.Util import number
from Crypto.Util.asn1 import DerSequence
from Crypto.PublicKey import RSA
from binascii import a2b_base64
import base64
def GetLong(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
string = ''.join(rc)
return number.bytes_to_long(base64.b64decode(string))
def pubkey_xml_to_pem(xmlkeydata):
rsaKeyValue = minidom.parseString(xmlkeydata)
modulus = GetLong(rsaKeyValue.getElementsByTagName('Modulus')[0].childNodes)
exponent = GetLong(rsaKeyValue.getElementsByTagName('Exponent')[0].childNodes)
publicKey = RSA.construct((modulus, exponent))
return publicKey
def privkey_xml_to_pem(xmlkeydata):
rsaKeyValue = minidom.parseString(xmlkeydata)
modulus = GetLong(rsaKeyValue.getElementsByTagName('Modulus')[0].childNodes)
exponent = GetLong(rsaKeyValue.getElementsByTagName('Exponent')[0].childNodes)
d = GetLong(rsaKeyValue.getElementsByTagName('D')[0].childNodes)
p = GetLong(rsaKeyValue.getElementsByTagName('P')[0].childNodes)
q = GetLong(rsaKeyValue.getElementsByTagName('Q')[0].childNodes)
qInv = GetLong(rsaKeyValue.getElementsByTagName('InverseQ')[0].childNodes)
privateKey = RSA.construct((modulus, exponent, d, p, q, qInv))
return privateKey
def pubkey_pem_to_xml(pubkeypem):
publicKey = RSA.importKey(pubkeypem)
xml = '<RSAKeyValue>'
xml += '<Modulus>'
xml += base64.standard_b64encode(number.long_to_bytes(publicKey.n))
xml += '</Modulus>'
xml += '<Exponent>'
xml += base64.standard_b64encode(number.long_to_bytes(publicKey.e))
xml += '</Exponent>'
xml += '</RSAKeyValue>'
return xml
def privkey_pem_to_xml(privkeypem):
lines = privkeypem.replace(" ", '').split()
keyDer = DerSequence()
keyDer.decode(a2b_base64(''.join(lines[1:-1])))
xml = '<RSAKeyValue>'
xml += '<Modulus>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[1]))
xml += '</Modulus>'
xml += '<Exponent>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[2]))
xml += '</Exponent>'
xml += '<D>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[3]))
xml += '</D>'
xml += '<P>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[4]))
xml += '</P>'
xml += '<Q>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[5]))
xml += '</Q>'
xml += '<DP>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[6]))
xml += '</DP>'
xml += '<DQ>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[7]))
xml += '</DQ>'
xml += '<InverseQ>'
xml += base64.standard_b64encode(number.long_to_bytes(keyDer[8]))
xml += '</InverseQ>'
xml += '</RSAKeyValue>'
return xml
``` |
{
"source": "jimttaylor/world_news_wallpaper",
"score": 2
} |
#### File: jimttaylor/world_news_wallpaper/construct_wallpaper.py
```python
def _coord_to_pixal(map_type, locus):
if locus == None:
return None
def _construct_articles_by_locus():
pass
``` |
{
"source": "jimtufts/bpmfwfft",
"score": 2
} |
#### File: bpmfwfft/bpmfwfft/fft_sampling.py
```python
from __future__ import print_function
import numpy as np
import netCDF4
try:
from bpmfwfft.grids import RecGrid
from bpmfwfft.grids import LigGrid
except:
from grids import RecGrid
from grids import LigGrid
KB = 0.001987204134799235
class Sampling(object):
def __init__(self, rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.):
"""
:param rec_prmtop: str, name of receptor prmtop file
:param lj_sigma_scal_fact: float, used to check consitency when loading receptor and ligand grids
:param rec_inpcrd: str, name of receptor inpcrd file
:param bsite_file: None or str, name of file defining the box, the same as
from AlGDock pipeline. "measured_binding_site.py"
:param grid_nc_file: str, name of receptor precomputed grid netCDF file
:param lig_prmtop: str, name of ligand prmtop file
:param lig_inpcrd: str, name of ligand inpcrd file
:param lig_coord_ensemble: list of 2d array, each array is an ligand coordinate
:param energy_sample_size_per_ligand: int, number of energies and translational vectors to store for each ligand crd
:param output_nc: str, name of nc file
:param temperature: float
"""
self._energy_sample_size_per_ligand = energy_sample_size_per_ligand
self._beta = 1./ temperature / KB
rec_grid = self._create_rec_grid(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file)
self._rec_crd = rec_grid.get_crd()
self._lig_grid = self._create_lig_grid(lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid)
self._lig_coord_ensemble = self._load_ligand_coor_ensemble(lig_coord_ensemble)
self._nc_handle = self._initialize_nc(output_nc)
def _create_rec_grid(self, rec_prmtop, lj_sigma_scal_fact, rec_inpcrd, bsite_file, grid_nc_file):
rec_grid = RecGrid(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd, bsite_file,
grid_nc_file, new_calculation=False)
return rec_grid
def _create_lig_grid(self, lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid):
lig_grid = LigGrid(lig_prmtop, lj_sigma_scal_fact, lig_inpcrd, rec_grid)
return lig_grid
def _load_ligand_coor_ensemble(self, lig_coord_ensemble):
assert len(lig_coord_ensemble.shape) == 3, "lig_coord_ensemble must be 3-D array."
ensemble = lig_coord_ensemble
natoms = self._lig_grid.get_natoms()
for i in range(len(ensemble)):
if (ensemble[i].shape[0] != natoms) or (ensemble[i].shape[1] != 3):
raise RuntimeError("Ligand crd %d does not have correct shape"%i)
return ensemble
def _initialize_nc(self, output_nc):
nc_handle = netCDF4.Dataset(output_nc, mode="w", format="NETCDF4")
nc_handle.createDimension("three", 3)
rec_natoms = self._rec_crd.shape[0]
nc_handle.createDimension("rec_natoms", rec_natoms)
lig_natoms = self._lig_grid.get_natoms()
nc_handle.createDimension("lig_natoms", lig_natoms)
nc_handle.createDimension("lig_sample_size", self._lig_coord_ensemble.shape[0])
nc_handle.createDimension("energy_sample_size_per_ligand", self._energy_sample_size_per_ligand)
nc_handle.createVariable("rec_positions", "f8", ("rec_natoms", "three"))
nc_handle.variables["rec_positions"][:,:] = self._rec_crd
nc_handle.createVariable("lig_positions", "f8", ("lig_sample_size", "lig_natoms", "three"))
nc_handle.createVariable("lig_com", "f8", ("lig_sample_size", "three"))
nc_handle.createVariable("volume", "f8", ("lig_sample_size"))
nc_handle.createVariable("nr_grid_points", "i8", ("lig_sample_size"))
nc_handle.createVariable("exponential_sums", "f8", ("lig_sample_size"))
nc_handle.createVariable("log_of_divisors", "f8", ("lig_sample_size"))
nc_handle.createVariable("mean_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("min_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("energy_std", "f8", ("lig_sample_size"))
nc_handle.createVariable("resampled_energies", "f8", ("lig_sample_size", "energy_sample_size_per_ligand"))
nc_handle.createVariable("resampled_trans_vectors", "i8", ("lig_sample_size", "energy_sample_size_per_ligand", "three"))
nc_handle = self._write_grid_info(nc_handle)
return nc_handle
def _write_grid_info(self, nc_handle):
"""
write grid info, "x", "y", "z" ...
"""
data = self._lig_grid.get_grids()
grid_func_names = self._lig_grid.get_grid_func_names()
keys = [key for key in data.keys() if key not in grid_func_names]
for key in keys:
for dim in data[key].shape:
dim_name = "%d"%dim
if dim_name not in nc_handle.dimensions.keys():
nc_handle.createDimension(dim_name, dim)
for key in keys:
if data[key].dtype == int:
store_format = "i8"
elif data[key].dtype == float:
store_format = "f8"
else:
raise RuntimeError( "Unsupported dtype %s"%data[key].dtype )
dimensions = tuple([ "%d"%dim for dim in data[key].shape ])
nc_handle.createVariable(key, store_format, dimensions)
for key in keys:
nc_handle.variables[key][:] = data[key]
return nc_handle
def _save_data_to_nc(self, step):
self._nc_handle.variables["lig_positions"][step, :, :] = self._lig_grid.get_crd()
self._nc_handle.variables["lig_com"][step, :] = self._lig_grid.get_initial_com()
self._nc_handle.variables["volume"][step] = self._lig_grid.get_box_volume()
self._nc_handle.variables["nr_grid_points"][step] = self._lig_grid.get_number_translations()
self._nc_handle.variables["exponential_sums"][step] = self._exponential_sum
self._nc_handle.variables["log_of_divisors"][step] = self._log_of_divisor
self._nc_handle.variables["mean_energy"][step] = self._mean_energy
self._nc_handle.variables["min_energy"][step] = self._min_energy
self._nc_handle.variables["energy_std"][step] = self._energy_std
self._nc_handle.variables["resampled_energies"][step,:] = self._resampled_energies
self._nc_handle.variables["resampled_trans_vectors"][step,:,:] = self._resampled_trans_vectors
return None
def _do_fft(self, step):
print("Doing FFT for step %d"%step, "test")
lig_conf = self._lig_coord_ensemble[step]
self._lig_grid.cal_grids(molecular_coord = lig_conf)
energies = self._lig_grid.get_meaningful_energies()
print("Energies shape:", energies.shape)
self._mean_energy = energies.mean()
self._min_energy = energies.min()
self._energy_std = energies.std()
print("Number of finite energy samples", energies.shape[0])
exp_energies = -self._beta * energies
print(f"Max exp energy {exp_energies.max()}, Min exp energy {exp_energies.min()}")
self._log_of_divisor = exp_energies.max()
exp_energies[exp_energies < 0] = 0
exp_energies = np.exp(exp_energies - self._log_of_divisor)
self._exponential_sum = exp_energies.sum()
exp_energies /= self._exponential_sum
print("Number of exponential energy samples", exp_energies.sum())
# sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=True)
try:
sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=False)
except:
print(f"Only {np.count_nonzero(exp_energies)} non-zero entries in p, falling back to replacement")
sel_ind = np.random.choice(exp_energies.shape[0], size=self._energy_sample_size_per_ligand, p=exp_energies, replace=True)
del exp_energies
self._resampled_energies = [energies[ind] for ind in sel_ind]
del energies
self._lig_grid.set_meaningful_energies_to_none()
trans_vectors = self._lig_grid.get_meaningful_corners()
self._resampled_trans_vectors = [trans_vectors[ind] for ind in sel_ind]
del trans_vectors
self._resampled_energies = np.array(self._resampled_energies, dtype=float)
self._resampled_trans_vectors = np.array(self._resampled_trans_vectors, dtype=int)
self._save_data_to_nc(step)
return None
def run_sampling(self):
"""
"""
for step in range(self._lig_coord_ensemble.shape[0]):
self._do_fft(step)
print("Min energy", self._min_energy)
print("Mean energy", self._mean_energy)
print("STD energy", self._energy_std)
print("Initial center of mass", self._lig_grid.get_initial_com())
print("Grid volume", self._lig_grid.get_box_volume())
print("Number of translations", self._lig_grid.get_number_translations())
print("-------------------------------\n\n")
self._nc_handle.close()
return None
#
#TODO the class above assumes that the resample size is smaller than number of meaningful energies
# in general, the number of meaningful energies can be very smaller or even zero (no energy)
# when the number of meaningful energies is zero, that stratum contributes n_points zeros to the exponential mean
#
# so when needs to consider separately 3 cases:
# len(meaningful energies) == 0
# 0< len(meaningful energies) <= resample size
# len(meaningful energies) > resample size
#
class Sampling_PL(Sampling):
def _write_data_key_2_nc(self, data, key):
if data.shape[0] == 0:
return None
for dim in data.shape:
dim_name = "%d"%dim
if dim_name not in self._nc_handle.dimensions.keys():
self._nc_handle.createDimension(dim_name, dim)
if data.dtype == int:
store_format = "i8"
elif data.dtype == float:
store_format = "f8"
else:
raise RuntimeError("unsupported dtype %s"%data.dtype)
dimensions = tuple(["%d"%dim for dim in data.shape])
self._nc_handle.createVariable(key, store_format, dimensions)
self._nc_handle.variables[key][:] = data
return None
def _initialize_nc(self, output_nc):
"""
"""
nc_handle = netCDF4.Dataset(output_nc, mode="w", format="NETCDF4")
nc_handle.createDimension("three", 3)
rec_natoms = self._rec_crd.shape[0]
nc_handle.createDimension("rec_natoms", rec_natoms)
lig_natoms = self._lig_grid.get_natoms()
nc_handle.createDimension("lig_natoms", lig_natoms)
nc_handle.createDimension("lig_sample_size", self._lig_coord_ensemble.shape[0])
#nc_handle.createDimension("energy_sample_size_per_ligand", self._energy_sample_size_per_ligand)
nc_handle.createVariable("rec_positions", "f8", ("rec_natoms", "three"))
nc_handle.variables["rec_positions"][:,:] = self._rec_crd
nc_handle.createVariable("lig_positions", "f8", ("lig_sample_size", "lig_natoms", "three"))
nc_handle.createVariable("lig_com", "f8", ("lig_sample_size", "three"))
nc_handle.createVariable("volume", "f8", ("lig_sample_size"))
nc_handle.createVariable("nr_grid_points", "i8", ("lig_sample_size"))
nc_handle.createVariable("nr_finite_energy", "i8", ("lig_sample_size"))
nc_handle.createVariable("exponential_sums", "f8", ("lig_sample_size"))
nc_handle.createVariable("log_of_divisors", "f8", ("lig_sample_size"))
nc_handle.createVariable("mean_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("min_energy", "f8", ("lig_sample_size"))
nc_handle.createVariable("energy_std", "f8", ("lig_sample_size"))
#nc_handle.createVariable("resampled_energies", "f8", ("lig_sample_size", "energy_sample_size_per_ligand"))
#nc_handle.createVariable("resampled_trans_vectors", "i8", ("lig_sample_size", "energy_sample_size_per_ligand", "three"))
nc_handle = self._write_grid_info(nc_handle)
return nc_handle
def _save_data_to_nc(self, step):
self._nc_handle.variables["lig_positions"][step, :, :] = self._lig_grid.get_crd()
self._nc_handle.variables["lig_com"][step, :] = self._lig_grid.get_initial_com()
self._nc_handle.variables["volume"][step] = self._lig_grid.get_box_volume()
self._nc_handle.variables["nr_grid_points"][step] = self._lig_grid.get_number_translations()
self._nc_handle.variables["nr_finite_energy"][step] = self._nr_finite_energy
self._nc_handle.variables["exponential_sums"][step] = self._exponential_sum
self._nc_handle.variables["log_of_divisors"][step] = self._log_of_divisor
self._nc_handle.variables["mean_energy"][step] = self._mean_energy
self._nc_handle.variables["min_energy"][step] = self._min_energy
self._nc_handle.variables["energy_std"][step] = self._energy_std
self._write_data_key_2_nc(self._resampled_energies, "resampled_energies_%d"%step)
self._write_data_key_2_nc(self._resampled_trans_vectors, "resampled_trans_vectors_%d"%step)
return None
def _do_fft(self, step):
print("Doing FFT for step %d"%step)
lig_conf = self._lig_coord_ensemble[step]
print(self._lig_grid["SASAr"])
self._lig_grid.cal_grids(molecular_coord = lig_conf)
energies = self._lig_grid.get_meaningful_energies()
self._nr_finite_energy = energies.shape[0]
print("Number of finite energy samples", self._nr_finite_energy)
if energies.shape[0] > 0:
self._mean_energy = energies.mean()
self._min_energy = energies.min()
self._energy_std = energies.std()
exp_energies = -self._beta * energies
self._log_of_divisor = exp_energies.max()
exp_energies = np.exp(exp_energies - self._log_of_divisor)
self._exponential_sum = exp_energies.sum()
exp_energies /= self._exponential_sum
sample_size = min(exp_energies.shape[0], self._energy_sample_size_per_ligand)
sel_ind = np.random.choice(exp_energies.shape[0], size=sample_size, p=exp_energies, replace=True)
del exp_energies
self._resampled_energies = [energies[ind] for ind in sel_ind]
del energies
self._lig_grid.set_meaningful_energies_to_none()
trans_vectors = self._lig_grid.get_meaningful_corners()
self._resampled_trans_vectors = [trans_vectors[ind] for ind in sel_ind]
del trans_vectors
self._resampled_energies = np.array(self._resampled_energies, dtype=float)
self._resampled_trans_vectors = np.array(self._resampled_trans_vectors, dtype=int)
else:
self._mean_energy = np.inf
self._min_energy = np.inf
self._energy_std = np.inf
self._log_of_divisor = 1.
self._exponential_sum = 0.
self._resampled_energies = np.array([], dtype=float)
del energies
self._lig_grid.set_meaningful_energies_to_none()
self._resampled_trans_vectors = np.array([], dtype=float)
self._save_data_to_nc(step)
return None
if __name__ == "__main__":
# test
rec_prmtop = "../examples/amber/ubiquitin_ligase/receptor.prmtop"
lj_sigma_scal_fact = 0.8
rec_inpcrd = "../examples/amber/ubiquitin_ligase/receptor.inpcrd"
# bsite_file = "../examples/amber/t4_lysozyme/measured_binding_site.py"
bsite_file = None
grid_nc_file = "../examples/grid/ubiquitin_ligase/grid.nc"
lig_prmtop = "../examples/amber/ubiquitin/ligand.prmtop"
lig_inpcrd = "../examples/amber/ubiquitin/ligand.inpcrd"
energy_sample_size_per_ligand = 200
output_nc = "../examples/fft_sampling/ubql_ubiquitin/fft_sampling.nc"
ligand_md_trj_file = "../examples/ligand_md/ubiquitin/rotation.nc"
lig_coord_ensemble = netCDF4.Dataset(ligand_md_trj_file, "r").variables["positions"][:]
sampler = Sampling(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
bsite_file, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.)
sampler.run_sampling()
```
#### File: bpmfwfft/protein_ligand_scripts/ligand_tremd.py
```python
from __future__ import print_function
import os
import sys
import argparse
import numpy as np
import netCDF4
# change this
sys.path.append("../bpmfwfft/")
from md_openmm import OpenMM_TREMD
parser = argparse.ArgumentParser()
parser.add_argument( "--ligand_prmtop", type=str, default="ligand.prmtop")
parser.add_argument( "--ligand_inpcrd", type=str, default="ligand.inpcrd")
parser.add_argument( "--phase", type=str, default = "OpenMM_Gas")
parser.add_argument( "--steps_per_iteration", type=int, default = 500)
parser.add_argument( "--niterations", type=int, default = 1000)
parser.add_argument( "--rotations_per_iteration", type=int, default = 500)
parser.add_argument( "--low_temperature", type=float, default = 300.)
parser.add_argument( "--high_temperature", type=float, default = 600.)
parser.add_argument( "--ntemperatures", type=int, default = 8)
parser.add_argument( "--nc_traj_file", type=str, default = "traj.nc")
args = parser.parse_args()
def is_md_done(nc_file, niterations):
if not os.path.isfile(nc_file):
return False
if os.path.getsize(nc_file) == 0:
return False
nc_handle = netCDF4.Dataset(nc_file)
if nc_handle.variables["positions"].shape[0] < niterations:
return False
if type(nc_handle.variables["positions"][-1]) == np.ma.core.MaskedArray:
return False
return True
def geometric_progression(low, high, n):
assert low > 0 and high > 0, "low and high must be positive"
assert high > low, "high must be higher than low"
log_scale = np.linspace(np.log(low), np.log(high), n)
return np.exp(log_scale)
if not is_md_done(args.nc_traj_file, args.niterations):
temperatures = geometric_progression(args.low_temperature, args.high_temperature, args.ntemperatures)
tremd = OpenMM_TREMD(args.ligand_prmtop, args.ligand_inpcrd, args.phase, temperatures)
tremd.run(args.nc_traj_file, args.steps_per_iteration, args.niterations, args.rotations_per_iteration)
else:
print(args.nc_traj_file + " is good, so nothing to be done!")
```
#### File: bpmfwfft/protein_protein_scripts/_fft_sampling.py
```python
from __future__ import print_function
import sys
import os
import netCDF4
import numpy as np
sys.path.append("../bpmfwfft")
from bpmfwfft.fft_sampling import Sampling
BSITE_FILE = None
def sampling(rec_prmtop, lj_sigma_scal_fact,
rec_inpcrd, grid_nc_file,
lig_prmtop, lig_inpcrd,
lig_coor_nc, nr_lig_conf, start_index,
energy_sample_size_per_ligand,
output_nc):
lig_nc_handle = netCDF4.Dataset(lig_coor_nc, "r")
lig_coord_ensemble = lig_nc_handle.variables["positions"][start_index : start_index + nr_lig_conf]
lig_nc_handle.close()
sampler = Sampling(rec_prmtop, lj_sigma_scal_fact, rec_inpcrd,
BSITE_FILE, grid_nc_file, lig_prmtop, lig_inpcrd,
lig_coord_ensemble,
energy_sample_size_per_ligand,
output_nc,
temperature=300.)
sampler.run_sampling()
print("Sampling Done")
return None
def is_sampling_nc_good(nc_file, nr_extracted_lig_conf):
if not os.path.exists(nc_file):
return False
try:
nc_handle = netCDF4.Dataset(nc_file, "r")
except RuntimeError as e:
print(nc_file)
print(e)
return True
else:
pass
cond1 = nc_handle.variables["lig_positions"][:].shape[0] == nr_extracted_lig_conf
if not cond1:
return False
cond2 = type(nc_handle.variables["lig_positions"][:]) == np.ndarray
if not cond2:
return False
return True
def parse_nr_ligand_confs(submit_file):
if os.path.exists(submit_file):
with open(submit_file, "r") as F:
for line in F:
if "--nr_lig_conf" in line:
nr_confs = line.split("--nr_lig_conf")[-1]
nr_confs = nr_confs.split()[0]
nr_confs = int(nr_confs)
return nr_confs
return None
```
#### File: bpmfwfft/protein_protein_scripts/_modeller_model.py
```python
import glob
from modeller import environ
from modeller import selection
from modeller.automodel import automodel
from modeller.automodel import loopmodel
from modeller.automodel import refine
class ModellingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run_modeller(id, chain, missing_res_ranges, sequence_len, align_file):
"""
:param id:
:param chain:
:param missing_res_ranges:
:param sequence_len:
:param align_file:
:return:
"""
env = environ()
env.io.atom_files_directory = ['.']
modifying_string = _redefine_loopmodel_string(missing_res_ranges, sequence_len)
exec(modifying_string)
model = MyModel(env, alnfile=align_file, knowns=(id), sequence = id+chain+"_full")
model.make()
_copy_result(id, chain, missing_res_ranges)
return None
def _redefine_loopmodel_string(missing_res_ranges, sequence_len):
"""
retrun a str to be run by exec to modyfy the class loopmodel.
:param missing_res_ranges:
:param sequence_len:
:return:
"""
out_string = """class MyModel(automodel):\n\tdef select_atoms(self):\n\t\treturn selection( """
if len(missing_res_ranges) == 0:
out_string += "self.residue_range(0, 0) )\n"
else:
for i in range(len(missing_res_ranges)-1):
out_string += "self.residue_range(%d, %d), "%missing_res_ranges[i]
out_string += "self.residue_range(%d, %d) )\n"%missing_res_ranges[-1]
return out_string
def _copy_result(id, chain, missing_res_ranges):
"""
:param id:
:param chain:
:param missing_res_ranges:
:return:
"""
missing_res_ranges = [(star+1, end+1) for star, end in missing_res_ranges]
output_pdbs = glob.glob(id+chain+"_full.B*.pdb")
if len(output_pdbs) > 0:
pdb_text = open(output_pdbs[0], "r").read()
remark_str = "REMARK MODELLED RESIDUES: " + str(missing_res_ranges) + "\n"
pdb_text = remark_str + pdb_text
open(id+chain+"_modelled.pdb", "w").write(pdb_text)
return None
``` |
{
"source": "jimtuttle/flac-to-mp3",
"score": 4
} |
#### File: jimtuttle/flac-to-mp3/flac-to-mp3.py
```python
import argparse
from os import walk, makedirs
from os.path import join, splitext, isdir, isfile, exists
from shutil import copy2
from pydub import AudioSegment
import errno
def create_directory(d):
"""Create directories in destination if they don't already exist"""
try:
makedirs(d, exist_ok=True)
except FileExistsError as exc:
if exc.errno == errno.EEXIST and isdir(d):
print("Already exists: {}".format(d))
pass
else:
raise
def convert(r, f, p, fn):
"""Convert flac to mp3 using ffmpeg if the derivative doesn't already exist"""
sourcefile = join(r, f)
destfile = p.replace(".flac", ".mp3")
if not isfile(destfile):
flac_audio = AudioSegment.from_file(sourcefile, "flac")
flac_audio.export(destfile, format="mp3")
print("Converting {}".format(destfile))
else:
print("Already exists {}".format(destfile))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create MP3s from FLAC files")
parser.add_argument("-s", '--source', help="Path to flac directory", required=True)
parser.add_argument("-d", '--destination', help="Path to MP3 directory", required=True)
args = vars(parser.parse_args())
source = args["source"]
destination = args["destination"]
for root, dirs, files in walk(source, topdown=True):
for directory in dirs:
path = join(root, directory).replace(source, destination)
create_directory(path)
for file in files:
newname = file.strip()
path = join(root, newname).replace(source, destination)
fname, extension = splitext(file)
if extension == '.jpg' or extension == '.mp3':
dest = join(root, file).replace(source, destination)
if not exists(dest):
copy2(join(root, file), dest)
elif extension == '.flac':
convert(root, file, path, fname)
``` |
{
"source": "jimtuttle/S3FileRenamer",
"score": 4
} |
#### File: jimtuttle/S3FileRenamer/S3FileRenamer.py
```python
from os import walk, remove
from os.path import exists, join, splitext, isfile
from shutil import rmtree, move
from argparse import ArgumentParser
def needs_rename(string):
"""Search input string for illegal characters. If found, return True, list of characters found, cleaned name.
If not, return False and two empty strings"""
bad_characters = ["&", "$", "@", "=", ";", "+", ",", "?", "\\", "{", "^", "}", "%", "`", "]", "\"", ">", "[", "~",
"<", "#", "|"]
replacement_character = "_"
found_bad_characters = []
cleaned_name = ""
for char in string:
if char in bad_characters:
found_bad_characters.append(char)
cleaned_name += replacement_character
else:
cleaned_name += char
if len(found_bad_characters) > 0:
return True, found_bad_characters, cleaned_name
else:
return False, "", ""
def is_temp_file(fname):
"""Determine if file matches pattern of temp files"""
if fname.startswith("~$") or fname.startswith(".") or fname == "thumbs.db" or \
splitext(fname)[1].lower() == ".tmp":
return True
else:
return False
def is_temp_dir(dname):
"""Determine if directory matches pattern for temp directory"""
if dname.startswith("."):
return True
else:
return False
def generate_log(log_dir, path):
"""Generate double-pipe delimited log file of all actions to be taken."""
log_file = open(join(log_dir, "rename.log"), "w")
log_file.write("ACTION||CHARACTERS||ORIGINAL NAME||NEW NAME||TYPE\n")
for root, dirs, files in walk(path):
for file in files:
log_line = False
if is_temp_file(file):
log_line = "DELETE||''||{}||''||FILE\n".format(join(root, file))
else:
found, characters_found, new_name = needs_rename(file)
if found:
log_line = "RENAME||{}||{}||{}||FILE\n".format(" ".join(characters_found), join(root, file),
join(root, new_name))
if log_line:
log_file.write(log_line)
for dir in dirs:
log_line = False
if is_temp_dir(dir):
log_line = "DELETE||''||{}||''||DIR\n".format(join(root, dir))
else:
found, characters_found, new_name = needs_rename(dir)
if found:
log_line = "RENAME||{}||{}||{}||FILE\n".format(" ".join(characters_found), join(root, dir),
join(root, new_name))
if log_line:
log_file.write(log_line)
log_file.close()
return join(log_dir, "rename.log")
if __name__ == "__main__":
parser = ArgumentParser(description="Find and replace S3-illegal characters")
parser.add_argument("-r", "--rename", help="Perform rename action.", action="store_true", default=False)
parser.add_argument("-p", '--path', help="Path to data directory", required=True)
parser.add_argument("-l", '--log', help="Path to log file directory", required=True)
parser.add_argument("-d", "--delete", help="Delete temp files and directories.", action="store_true", default=False)
args = vars(parser.parse_args())
path = args["path"]
do_rename = args["rename"]
do_delete = args["delete"]
log_directory = args["log"]
logfile = generate_log(log_directory, path)
if do_delete or do_rename:
log = open(logfile, "r")
for line in log:
line = line.strip()
action, chars, original, new, objtype = line.split("||")
if do_delete and action == "DELETE":
print("DELETING {} {}".format(objtype, original))
if objtype == "DIR":
rmtree(original)
if objtype == "FILE":
try:
remove(original)
except FileNotFoundError:
pass
if do_rename and action == "RENAME":
print("Renaming {} {}".format(objtype, original))
try:
move(original, new)
except FileNotFoundError:
pass
``` |
{
"source": "jimtyhurst/DSGoPipeline",
"score": 3
} |
#### File: components/pages/base.py
```python
import dash_html_components as html
import dash_core_components as dcc
from components.elements import get_card, get_rolling_plot, get_predictions
def get_location_base(app):
card1 = get_card("col-sm-12", component=dcc.Markdown("""
# Power Prediction Dashboard
Some super simple Dash app here. Im just writing
this section out in *markdown* because I hate the way
you're supposed to define HTML components in python.
If you also dislike it, its easy to rip out Dash and have this
as a base flask app, which will allow you to create some get
endpoints which will be more efficiently cached.
"""))
card2 = get_rolling_plot(app, "card_rolling", class_name="col-xl-6")
card3 = get_predictions(app, "card_predictions", class_name="col-xl-6")
return [html.Div(className="row", children=[card1, card2, card3])]
```
#### File: DSGoPipeline/4_airflow/mlflow_dag.py
```python
from datetime import datetime
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import pandas as pd
import mlflow
import mlflow.sklearn
from mlflow.tracking.client import MlflowClient
import inspect
import os
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
def eval_metrics(actual, pred):
""" Youve seen this before """
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
def process_data(**kwargs):
""" Task 1 - turn the raw data into a data product. """
# Because I want to keep this simple and not connect a bucket or require you to download a large dataset, the process data
# will simply load the *already* processed data, when ideally it should - you know - actually do the processing
# To keep it in the MLflow framework, I am going to log the output data product
with mlflow.start_run(run_name="process") as run:
this_dir = os.path.abspath(os.path.dirname(inspect.stack()[0][1]))
mlflow.log_artifact(os.path.join(this_dir, "germany.csv"), "processed_data") # Log artifact in specific dir
# Xcom is how tasks can send messages to each other
kwargs["ti"].xcom_push(key="run_id", value=run.info.run_id)
def make_lr(**kwargs):
""" Create a linear regression model and log it to mlflow """
data_run_id = kwargs["ti"].xcom_pull(task_ids="process_data", key="run_id")
client = MlflowClient()
path = client.download_artifacts(data_run_id, "processed_data") # Overkill in our case, but imagine they are on different servers, infrastructures
df = pd.read_csv(path + "/germany.csv", parse_dates=[0], index_col=0)
X = df[["windspeed", "temperature", "rad_horizontal", "rad_diffuse"]]
y = df[["solar_GW", "wind_GW"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
with mlflow.start_run(run_name="lr") as run:
model = LinearRegression()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
rmse, mae, r2 = eval_metrics(y_test, y_predict)
mlflow.log_metric("rmse", rmse) # New
mlflow.log_metric("mae", mae) # New
mlflow.log_metric("r2", r2) # New
mlflow.sklearn.log_model(model, "model") # New
kwargs["ti"].xcom_push(key="run_id", value=[run.info.run_id])
def make_rf(**kwargs):
""" Create a random forest model and log it to mlflow """
data_run_id = kwargs["ti"].xcom_pull(task_ids="process_data", key="run_id")
client = MlflowClient()
path = client.download_artifacts(data_run_id, "processed_data") # Overkill in our case, but imagine they are on different servers, infrastructures
df = pd.read_csv(path + "/germany.csv", parse_dates=[0], index_col=0)
X = df[["windspeed", "temperature", "rad_horizontal", "rad_diffuse"]]
y = df[["solar_GW", "wind_GW"]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
runs = []
for n_estimators in [4, 25]:
for max_depth in [4, 10]:
with mlflow.start_run(run_name="rf") as run:
model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
rmse, mae, r2 = eval_metrics(y_test, y_predict)
mlflow.log_param("n_estimators", n_estimators) # New
mlflow.log_param("max_depth", max_depth) # New
mlflow.log_metric("rmse", rmse) # New
mlflow.log_metric("mae", mae) # New
mlflow.log_metric("r2", r2) # New
mlflow.sklearn.log_model(model, "model") # New
runs.append(run.info.run_id)
kwargs["ti"].xcom_push(key="run_id", value=runs)
def get_best_model(**kwargs):
""" For all the models we logged, determine the best performing run """
ids = [r for ids in kwargs["ti"].xcom_pull(task_ids=["model_lr", "model_rf"], key="run_id") for r in ids]
client = MlflowClient()
runs = [client.get_run(run_id) for run_id in ids]
run_r2 = [run.data.metrics["r2"] for run in runs]
best_run = runs[np.argmax(run_r2)]
kwargs["ti"].xcom_push(key="best_model_run_id", value=best_run.info.run_id)
def register_best_model(**kwargs):
""" Take the best performing model, register it under the BestModel name, and ship it to prod """
run_id = kwargs["ti"].xcom_pull(task_ids="get_best_model", key="best_model_run_id")
model_uri = f"runs:/{run_id}/model"
model_details = mlflow.register_model(model_uri, "BestModel") # note this doesnt put it in prod, but updates the registered model in the model repo
# This is what would make it prod, but probably shouldnt automate this without some testing and eyes on
client = MlflowClient()
client.transition_model_version_stage(name=model_details.name, version=model_details.version, stage='Production')
mlflow.tracking.set_tracking_uri("http://127.0.0.1:5000")
mlflow.set_experiment("airflow") # Notice, diff experiment name to keep things clear
# The airflow code is everything below here. We define the DAG in general, then its tasks, and how the tasks depend on each other
dag = DAG("DSGo",
description="Lets turn our little project into a DAG that is set to run every single day at 6am",
schedule_interval="0 6 * * *",
start_date=datetime(2020, 6, 13),
catchup=False)
# provide_context=True allows pushing and pulling variables
task_process = PythonOperator(task_id="process_data", python_callable=process_data, dag=dag, provide_context=True)
task_lr = PythonOperator(task_id="model_lr", python_callable=make_lr, dag=dag, provide_context=True)
task_rf = PythonOperator(task_id="model_rf", python_callable=make_rf, dag=dag, provide_context=True)
task_get_best_model = PythonOperator(task_id="get_best_model", python_callable=get_best_model, dag=dag, provide_context=True)
task_register_best_model = PythonOperator(task_id="register_best_model", python_callable=register_best_model, dag=dag, provide_context=True)
task_process >> task_lr # So the linear regression needs to wait on the data processing
task_process >> task_rf
[task_lr, task_rf] >> task_get_best_model # And the "best model" task waits on everything which makes models
task_get_best_model >> task_register_best_model # You get it
``` |
{
"source": "JiMu-Bao/mesos",
"score": 2
} |
#### File: plugins/task/main.py
```python
import json
from cli.exceptions import CLIException
from cli.mesos import get_tasks
from cli.plugins import PluginBase
from cli.util import Table
from cli.mesos import TaskIO
PLUGIN_NAME = "task"
PLUGIN_CLASS = "Task"
VERSION = "Mesos CLI Task Plugin"
SHORT_HELP = "Interacts with the tasks running in a Mesos cluster"
class Task(PluginBase):
"""
The task plugin.
"""
COMMANDS = {
"attach": {
"arguments": ['<task-id>'],
"flags": {
"--no-stdin": "do not attach a stdin [default: False]"
},
"short_help": "Attach the CLI to the stdio of a running task",
"long_help": """
Attach the CLI to the stdio of a running task
To detach type the sequence CTRL-p CTRL-q."""
},
"exec": {
"arguments": ['<task-id>', '<command>', '[<args>...]'],
"flags": {
"-i --interactive" : "interactive [default: False]",
"-t --tty": "tty [default: False]"
},
"short_help": "Execute commands in a task's container",
"long_help": "Execute commands in a task's container"
},
"list": {
"arguments": [],
"flags": {
"-a --all": "list all tasks, not only running [default: False]"
},
"short_help": "List all running tasks in a Mesos cluster",
"long_help": "List all running tasks in a Mesos cluster"
},
"inspect": {
"arguments": ['<task_id>'],
"flags": {},
"short_help": "Return low-level information on the task",
"long_help": "Return low-level information on the task"
}
}
def attach(self, argv):
"""
Attach the stdin/stdout/stderr of the CLI to the
STDIN/STDOUT/STDERR of a running task.
"""
try:
master = self.config.master()
except Exception as exception:
raise CLIException("Unable to get leading master address: {error}"
.format(error=exception))
task_io = TaskIO(master, self.config, argv["<task-id>"])
return task_io.attach(argv["--no-stdin"])
def exec(self, argv):
"""
Launch a process inside a task's container.
"""
try:
master = self.config.master()
except Exception as exception:
raise CLIException("Unable to get leading master address: {error}"
.format(error=exception))
task_io = TaskIO(master, self.config, argv["<task-id>"])
return task_io.exec(argv["<command>"],
argv["<args>"],
argv["--interactive"],
argv["--tty"])
def list(self, argv):
"""
List the tasks running in a cluster by checking the /tasks endpoint.
"""
# pylint: disable=unused-argument
try:
master = self.config.master()
except Exception as exception:
raise CLIException("Unable to get leading master address: {error}"
.format(error=exception))
try:
tasks = get_tasks(master, self.config)
except Exception as exception:
raise CLIException("Unable to get tasks from leading"
" master '{master}': {error}"
.format(master=master, error=exception))
if not tasks:
print("There are no tasks running in the cluster.")
return
try:
table = Table(["ID", "State", "Framework ID", "Executor ID"])
for task in tasks:
task_state = "UNKNOWN"
if task["statuses"]:
task_state = task["statuses"][-1]["state"]
if not argv["--all"] and task_state != "TASK_RUNNING":
continue
table.add_row([task["id"],
task_state,
task["framework_id"],
task["executor_id"]])
except Exception as exception:
raise CLIException("Unable to build table of tasks: {error}"
.format(error=exception))
print(str(table))
def inspect(self, argv):
"""
Show the low-level information on the task.
"""
try:
master = self.config.master()
except Exception as exception:
raise CLIException("Unable to get leading master address: {error}"
.format(error=exception))
data = get_tasks(master, self.config)
for task in data:
if task["id"] != argv["<task_id>"]:
continue
print(json.dumps(task, indent=4))
``` |
{
"source": "jimustafa/codiag",
"score": 2
} |
#### File: codiag/codiag/qpqc.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
from . import qep
def _solve_qp_s2_Q(Q):
# get the eigenvector corresponding to the largest eigenvalue
eigvals, eigvecs = np.linalg.eigh(Q)
eval_imax = np.argmax(eigvals)
# if eigvals[eval_imax] < 0:
# raise Exception
x = eigvecs[:, eval_imax]
x /= np.linalg.norm(x)
return x
def _solve_qp_s2_Qp(Q, p):
A2 = np.eye(3)
A1 = -2*Q
A0 = np.dot(Q, Q) - 1.0/4 * np.outer(p, p)
eigvals_Q, eigvecs_Q = np.linalg.eigh(Q)
eigvecs_Q = eigvecs_Q.T
eigvals = qep.quadeigvals(A0, A1, A2)
eigvals_real = np.real(eigvals[np.abs(eigvals.imag) < 1e-6])
eigval_max = np.max(eigvals_real)
# for (i, eigval) in enumerate(np.sort(eigvals_real)[::-1]):
if np.any(np.abs(eigvals_Q-eigval_max) < 1e-6):
print('WARNING')
iq = np.nonzero(np.abs(eigvals_Q-eigval_max) < 1e-6)[0][0]
# u = np.dot(np.linalg.pinv(Q-eigval*np.eye(3)), -p/2)
# if np.sum((np.dot(Q-eigval*np.eye(3), u) - (-p/2))**2) > 1e-6 or np.sum(u**2) > 1:
u = np.dot(np.linalg.pinv(Q-eigval_max*np.eye(3)), -p/2)
if np.sum((np.dot(Q-eigval_max*np.eye(3), u) - (-p/2))**2) > 1e-6 or np.sum(u**2) > 1:
return None
else:
if abs(np.sum(u**2) - 1) < 1e-6:
return u
else:
v = eigvecs_Q[iq]
c = np.sqrt(np.dot(v.conj(), v)/(1-np.dot(u, u)))
return u + v/c
else:
return np.dot(np.linalg.inv(Q - eigval_max*np.eye(3)), -p/2)
def solve_qp_s2(Q, p=None):
if p is None:
return _solve_qp_s2_Q(Q)
else:
return _solve_qp_s2_Qp(Q, p)
def solve_qp_s2_brute(Q, p=None, ntheta=1025, nphi=1025):
r"""
Solve the quadratic program xQx + px on :math:`S^{2}`
"""
theta = np.linspace(0, np.pi, ntheta)
phi = np.linspace(0, 2*np.pi, nphi)
theta, phi = np.meshgrid(phi, theta, indexing='ij')
theta = theta.ravel()
phi = phi.ravel()
x1 = np.cos(theta)
x2 = np.sin(theta)*np.cos(phi)
x3 = np.sin(theta)*np.sin(phi)
x = np.column_stack((x1, x2, x3))
f = np.einsum('ij,jk,ik->i', x, Q, x)
if p is not None:
f += np.einsum('ij,j->i', x, p)
imax = np.argmax(f)
return x[imax]
``` |
{
"source": "jimustafa/gdstk",
"score": 2
} |
#### File: gdstk/docs/curve_images.py
```python
import pathlib
import numpy
import gdstk
from tutorial_images import draw
def init_image():
curve = gdstk.Curve((3, 4), tolerance=1e-3)
curve.segment((1, 1), True)
curve.turn(1, -numpy.pi / 2)
curve.segment((1, -1), True)
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("init").add(polygon)
def segment_image():
curve = gdstk.Curve((1, 0))
curve.segment((0, 1))
curve.segment([0j, -1 + 0j])
curve.segment([(0, -1), (2, -1)], True)
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("segment").add(polygon)
def cubic_image():
curve = gdstk.Curve((0, 0), tolerance=1e-3)
curve.cubic([(1, -2), (2, -2), (3, 0)])
curve.cubic([(2.7, 1), (1.8, 1), (1.5, 0), (1.3, -0.2), (0.3, -0.2), (0, 0)])
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("cubic").add(polygon)
def cubic_smooth_image():
curve = gdstk.Curve((0, 0), tolerance=1e-3)
curve.cubic([1 + 0j, 1.5 + 0.5j, 1 + 1j])
curve.cubic_smooth([1j, 0j])
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("cubic_smooth").add(polygon)
def bezier_image():
points = [(4, 1), (4, 3), (0, 5), (-4, 3), (-4, -2), (0, -4), (0, 0)]
curve = gdstk.Curve((0, 0))
curve.segment(points)
control_poly = gdstk.Polygon(curve.points(), datatype=1)
curve = gdstk.Curve((0, 0), tolerance=1e-3)
curve.bezier(points)
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("bezier").add(polygon, control_poly)
def interpolation_image():
points = [(4, 1), (4, 3), (0, 5), (-4, 3), (-4, -2), (0, -4)]
curve = gdstk.Curve((0, 0))
curve.segment(points)
control_poly_1 = gdstk.Polygon(curve.points(), datatype=1)
curve = gdstk.Curve((0, 0), tolerance=1e-3)
curve.interpolation(points, cycle=True)
polygon_1 = gdstk.Polygon(curve.points())
half_pi = numpy.pi / 2
angles = [half_pi, None, None, None, -half_pi, -half_pi, None]
curve = gdstk.Curve((4, -9))
curve.segment(points, relative=True)
control_poly_2 = gdstk.Polygon(curve.points(), datatype=1)
curve = gdstk.Curve((4, -9), tolerance=1e-3)
curve.interpolation(points, angles, cycle=True, relative=True)
polygon_2 = gdstk.Polygon(curve.points())
return gdstk.Cell("interpolation").add(
polygon_1, control_poly_1, polygon_2, control_poly_2
)
def arc_image():
curve = gdstk.Curve((-0.6, 0), tolerance=1e-3)
curve.segment((1, 0), True)
curve.arc(1, 0, numpy.pi / 2)
polygon_1 = gdstk.Polygon(curve.points())
curve = gdstk.Curve((0.6, 0), tolerance=1e-3)
curve.segment((1, 0), True)
curve.arc((2 ** -0.5, 0.4), -numpy.pi / 4, 3 * numpy.pi / 4, -numpy.pi / 4)
polygon_2 = gdstk.Polygon(curve.points())
return gdstk.Cell("arc").add(polygon_1, polygon_2)
def parametric_image():
def top(u):
x = 4 * u
y = 1 - numpy.cos(4 * numpy.pi * u)
return (x, y)
curve = gdstk.Curve((-2, 0), tolerance=1e-3)
curve.parametric(top)
curve.parametric(lambda u: (4 - 2 * u ** 0.5) * numpy.exp(-1.5j * numpy.pi * u) - 4)
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("parametric").add(polygon)
def commands_image():
curve = gdstk.Curve((0, 0), tolerance=1e-3)
curve.commands("l", 1, 1, "a", 1, -numpy.pi / 2, "l", 1, -1, "S", 1, -2, 0, -2)
polygon = gdstk.Polygon(curve.points())
return gdstk.Cell("commands").add(polygon)
def tolerance_image():
curve = gdstk.Curve((-2.5, 0), tolerance=1e-1)
curve.arc((2, 3), 0, numpy.pi)
polygon_1 = gdstk.Polygon(curve.points())
assert polygon_1.size == 7
curve = gdstk.Curve((2.5, 0), tolerance=1e-3)
curve.arc((2, 3), 0, numpy.pi)
polygon_2 = gdstk.Polygon(curve.points())
assert polygon_2.size == 62
return gdstk.Cell("tolerance").add(polygon_1, polygon_2)
if __name__ == "__main__":
path = pathlib.Path(__file__).parent.absolute() / "curve"
path.mkdir(parents=True, exist_ok=True)
draw(init_image(), path)
draw(segment_image(), path)
draw(cubic_image(), path)
draw(cubic_smooth_image(), path)
draw(bezier_image(), path)
draw(interpolation_image(), path)
draw(arc_image(), path)
draw(parametric_image(), path)
draw(commands_image(), path)
draw(tolerance_image(), path)
```
#### File: gdstk/docs/fonts.py
```python
import pathlib
from tutorial_images import draw
import gdstk
from matplotlib.font_manager import FontProperties
from matplotlib.textpath import TextPath
def render_text(text, size=None, position=(0, 0), font_prop=None, tolerance=0.1):
tol = 0.1 * tolerance
path = TextPath(position, text, size=size, prop=font_prop)
polys = []
xmax = position[0]
for points, code in path.iter_segments():
if code == path.MOVETO:
c = gdstk.Curve(points, tolerance=tolerance)
elif code == path.LINETO:
c.segment(points.reshape(points.size // 2, 2))
elif code == path.CURVE3:
c.quadratic(points.reshape(points.size // 2, 2))
elif code == path.CURVE4:
c.cubic(points.reshape(points.size // 2, 2))
elif code == path.CLOSEPOLY:
poly = c.points()
if poly.size > 0:
if poly[:, 0].min() < xmax:
i = len(polys) - 1
while i >= 0:
if gdstk.inside(poly[:1], [polys[i]], precision=tol)[0]:
p = polys.pop(i)
b = gdstk.boolean([p], [poly], "xor", tol)
poly = b[0].points
break
elif gdstk.inside(polys[i][:1], [poly], precision=tol)[0]:
p = polys.pop(i)
b = gdstk.boolean([p], [poly], "xor", tol)
poly = b[0].points
i -= 1
xmax = max(xmax, poly[:, 0].max())
polys.append(poly)
return polys
if __name__ == "__main__":
cell = gdstk.Cell("fonts")
fp = FontProperties(family="serif", style="italic")
point_list = render_text("Text rendering", 10, font_prop=fp)
cell.add(gdstk.Polygon(pts) for pts in point_list)
path = pathlib.Path(__file__).parent.absolute() / "how-tos"
draw(cell, path)
```
#### File: gdstk/docs/tutorial_images.py
```python
import pathlib
import numpy
import gdstk
def draw(cell, path):
bb = cell.bounding_box()
scaling = 300 / (1.1 * (bb[1][0] - bb[0][0]))
name = path / (cell.name + ".svg")
cell.write_svg(
name,
scaling=scaling,
background="none",
style={(0, 1): {"fill": "none", "stroke": "black", "stroke-dasharray": "8,8"}},
fontstyle={(3, 2): {"stroke": "red", "fill": "none", "font-size": "32px"}},
pad="5%",
)
print(f"Saving {name} (scaling {scaling})")
if __name__ == "__main__":
path = pathlib.Path(__file__).parent.absolute() / "tutorial"
path.mkdir(parents=True, exist_ok=True)
# Polygons
# Create a polygon from a list of vertices
points = [(0, 0), (2, 2), (2, 6), (-6, 6), (-6, -6), (-4, -4), (-4, 4), (0, 4)]
poly = gdstk.Polygon(points)
draw(gdstk.Cell("polygons").add(poly), path)
# Holes
# Manually connect the hole to the outer boundary
cutout = gdstk.Polygon(
[(0, 0), (5, 0), (5, 5), (0, 5), (0, 0), (2, 2), (2, 3), (3, 3), (3, 2), (2, 2)]
)
draw(gdstk.Cell("holes").add(cutout), path)
# Circles
# Circle centered at (0, 0), with radius 2 and tolerance 0.1
circle = gdstk.ellipse((0, 0), 2, tolerance=0.01)
# To create an ellipse, simply pass a list with 2 radii.
# Because the tolerance is small (resulting a large number of
# vertices), the ellipse is fractured in 2 polygons.
ellipse = gdstk.ellipse((4, 0), [1, 2], tolerance=1e-4)
# Circular arc example
arc = gdstk.ellipse(
(2, 4),
2,
inner_radius=1,
initial_angle=-0.2 * numpy.pi,
final_angle=1.2 * numpy.pi,
tolerance=0.01,
)
draw(gdstk.Cell("circles").add(circle, ellipse, arc), path)
# Curves
# Construct a curve made of a sequence of line segments
c1 = gdstk.Curve((0, 0)).segment([(1, 0), (2, 1), (2, 2), (0, 2)])
p1 = gdstk.Polygon(c1.points())
# Construct another curve using relative coordinates
c2 = gdstk.Curve((3, 1)).segment([(1, 0), (2, 1), (2, 2), (0, 2)], relative=True)
p2 = gdstk.Polygon(c2.points())
draw(gdstk.Cell("curves").add(p1, p2), path)
# Curves 1
# Use complex numbers to facilitate writing polar coordinates
c3 = gdstk.Curve(2j).segment(4 * numpy.exp(1j * numpy.pi / 6), relative=True)
# Elliptical arcs have syntax similar to gdstk.ellipse
c3.arc((4, 2), 0.5 * numpy.pi, -0.5 * numpy.pi)
p3 = gdstk.Polygon(c3.points())
draw(gdstk.Cell("curves_1").add(p3), path)
# Curves 2
# Cubic Bezier curves can be easily created
c4 = gdstk.Curve((0, 0), tolerance=1e-3)
c4.cubic([(0, 1), (1, 1), (1, 0)])
# Smooth continuation:
c4.cubic_smooth([(1, -1), (1, 0)], relative=True)
# Similarly for quadratic Bezier curves
c4.quadratic([(0.5, 1), (1, 0)], relative=True)
c4.quadratic_smooth((1, 0), relative=True)
# Smooth interpolating curve
c4.interpolation([(4, -1), (3, -2), (2, -1.5), (1, -2), (0, -1), (0, 0)])
p4 = gdstk.Polygon(c4.points())
# draw
ref_poly = gdstk.Polygon(
[
0j,
1j,
1 + 1j,
1 + 0j,
1 - 1j,
2 - 1j,
2 + 0j,
2.5 + 1j,
3 + 0j,
3.5 - 1j,
4 + 0j,
4 - 1j,
3 - 2j,
2 - 1.5j,
1 - 2j,
-1j,
],
datatype=1,
)
draw(gdstk.Cell("curves_2").add(p4, ref_poly), path)
# Transformations
poly = gdstk.rectangle((-2, -2), (2, 2))
poly.rotate(numpy.pi / 4)
poly.scale(1, 0.5)
draw(gdstk.Cell("transformations").add(poly), path)
# Layer and Datatype
# Layer/datatype definitions for each step in the fabrication
ld = {
"full etch": {"layer": 1, "datatype": 3},
"partial etch": {"layer": 2, "datatype": 3},
"lift-off": {"layer": 0, "datatype": 7},
}
p1 = gdstk.rectangle((-3, -3), (3, 3), **ld["full etch"])
p2 = gdstk.rectangle((-5, -3), (-3, 3), **ld["partial etch"])
p3 = gdstk.rectangle((5, -3), (3, 3), **ld["partial etch"])
p4 = gdstk.regular_polygon((0, 0), 2, 6, **ld["lift-off"])
draw(gdstk.Cell("layer_and_datatype").add(p1, p2, p3, p4), path)
# References
# Create a cell with a component that is used repeatedly
contact = gdstk.Cell("CONTACT")
contact.add(p1, p2, p3, p4)
# Create a cell with the complete device
device = gdstk.Cell("DEVICE")
device.add(cutout)
# Add 2 references to the component changing size and orientation
ref1 = gdstk.Reference(contact, (3.5, 1), magnification=0.25)
ref2 = gdstk.Reference(contact, (1, 3.5), magnification=0.25, rotation=numpy.pi / 2)
device.add(ref1, ref2)
# The final layout has several repetitions of the complete device
main = gdstk.Cell("MAIN")
main.add(gdstk.Reference(device, (0, 0), columns=3, rows=2, spacing=(6, 7)))
# draw
main.name = "references"
draw(main, path)
main.name = "MAIN"
# Flexible Paths
# Path defined by a sequence of points and stored as a GDSII path
fp1 = gdstk.FlexPath(
[(0, 0), (3, 0), (3, 2), (5, 3), (3, 4), (0, 4)], 1, simple_path=True
)
# Other construction methods can still be used
fp1.interpolation([(0, 2), (2, 2), (4, 3), (5, 1)], relative=True)
# Multiple parallel paths separated by 0.5 with different widths,
# end caps, and joins. Because of the join specification, they
# cannot be stared as GDSII paths, only as polygons.
fp2 = gdstk.FlexPath(
[(12, 0), (8, 0), (8, 3), (10, 2)],
[0.3, 0.2, 0.4],
0.5,
ends=["extended", "flush", "round"],
joins=["bevel", "miter", "round"],
)
fp2.arc(2, -0.5 * numpy.pi, 0.5 * numpy.pi)
fp2.arc(1, 0.5 * numpy.pi, 1.5 * numpy.pi)
draw(gdstk.Cell("flexible_paths").add(fp1, fp2), path)
# Flexible Paths 1
# Path created with automatic bends of radius 5
points = [(0, 0), (0, 10), (20, 0), (18, 15), (8, 15)]
fp3 = gdstk.FlexPath(points, 0.5, bend_radius=5, simple_path=True)
# Same path, generated with natural joins, for comparison
fp4 = gdstk.FlexPath(points, 0.5, layer=1, simple_path=True)
draw(gdstk.Cell("flexible_paths_2").add(fp3, fp4), path)
# Flexible Paths 2
# Straight segment showing the possibility of width and offset changes
fp5 = gdstk.FlexPath((0, 0), [0.5, 0.5], 1)
fp5.horizontal(2)
fp5.horizontal(4, width=0.8, offset=1.8)
fp5.horizontal(6)
draw(gdstk.Cell("flexible_paths_3").add(fp5), path)
# Robust Paths
# Create 4 parallel paths in different layers
rp = gdstk.RobustPath(
(0, 50),
[2, 0.5, 1, 1],
[0, 0, -1, 1],
ends=["extended", "round", "flush", "flush"],
layer=[1, 0, 2, 2],
)
rp.segment((0, 45))
rp.segment(
(0, 5),
width=[lambda u: 2 + 16 * u * (1 - u), 0.5, 1, 1],
offset=[
0,
lambda u: 8 * u * (1 - u) * numpy.cos(12 * numpy.pi * u),
lambda u: -1 - 8 * u * (1 - u),
lambda u: 1 + 8 * u * (1 - u),
],
)
rp.segment((0, 0))
rp.interpolation(
[(15, 5)],
angles=[0, 0.5 * numpy.pi],
width=0.5,
offset=[-0.25, 0.25, -0.75, 0.75],
)
rp.parametric(
lambda u: numpy.array((4 * numpy.sin(6 * numpy.pi * u), 45 * u)),
offset=[
lambda u: -0.25 * numpy.cos(24 * numpy.pi * u),
lambda u: 0.25 * numpy.cos(24 * numpy.pi * u),
-0.75,
0.75,
],
)
draw(gdstk.Cell("robust_paths").add(rp), path)
# Text
# Label centered at (1, 3)
label = gdstk.Label("Sample label", (5, 3), texttype=2)
# Horizontal text with height 2.25
htext = gdstk.text("12345", 2.25, (0.25, 6))
# Vertical text with height 1.5
vtext = gdstk.text("ABC", 1.5, (10.5, 4), vertical=True)
rect = gdstk.rectangle((0, 0), (10, 6), layer=10)
draw(gdstk.Cell("text").add(*htext, *vtext, label, rect), path)
# Boolean Operations
# Create some text
text = gdstk.text("GDSTK", 4, (0, 0))
# Create a rectangle extending the text's bounding box by 1
rect = gdstk.rectangle((-1, -1), (5 * 4 * 9 / 16 + 1, 4 + 1))
# Subtract the text from the rectangle
inv = gdstk.boolean(rect, text, "not")
draw(gdstk.Cell("boolean_operations").add(*inv), path)
# Slice Operation
ring1 = gdstk.ellipse((-6, 0), 6, inner_radius=4)
ring2 = gdstk.ellipse((0, 0), 6, inner_radius=4)
ring3 = gdstk.ellipse((6, 0), 6, inner_radius=4)
# Slice the first ring across x=-3, the second ring across x=-3
# and x=3, and the third ring across x=3
slices1 = gdstk.slice(ring1, -3, "x")
slices2 = gdstk.slice(ring2, [-3, 3], "x")
slices3 = gdstk.slice(ring3, 3, "x")
slices = gdstk.Cell("SLICES")
# Keep only the left side of slices1, the center part of slices2
# and the right side of slices3
slices.add(*slices1[0])
slices.add(*slices2[1])
slices.add(*slices3[1])
# draw
slices.name = "slice_operation"
draw(slices, path)
slices.name = "SLICES"
# Offset Operation
rect1 = gdstk.rectangle((-4, -4), (1, 1))
rect2 = gdstk.rectangle((-1, -1), (4, 4))
# Erosion: because we set `use_union=True`, the inner boundaries have no effect
outer = gdstk.offset([rect1, rect2], -0.5, use_union=True, layer=1)
draw(gdstk.Cell("offset_operation").add(rect1, rect2, *outer), path)
# Fillet Operation
flexpath = gdstk.FlexPath([(-8, -4), (0, -4), (0, 4), (8, 4)], 4)
filleted_path = flexpath.to_polygons()[0]
filleted_path.fillet(1.5)
draw(gdstk.Cell("fillet_operation").add(filleted_path), path)
``` |
{
"source": "Jimut123/code_skulptor_pygames",
"score": 4
} |
#### File: code_skulptor_pygames/memory_game/memory_game.py
```python
import simplegui
import random
# for repeatition check
# helper function to initialize globals
def new_game():
cards1 = range(0,8)
cards2 = range(0,8)
random.shuffle(cards1)
random.shuffle(cards2)
global cardDeck
cardDeck = cards1 + cards2
random.shuffle(cardDeck)
global exposed
exposed = [False] * 16
global turns, count
turns = [-1] * 2
count = 0
label.set_text("Turns = " + str(count))
# define event handlers
def mouseclick(pos):
# add game state logic here
global turns, count
# if its 1st Turn just flip (state 0)
if turns[0] == -1 and exposed[pos[0] / 50] == False:
turns[0] = pos[0] / 50
exposed[turns[0]] = True
# if its 2nd Turn (state 1)
elif turns[1] == -1 and exposed[pos[0] / 50] == False:
turns[1] = pos[0] / 50
exposed[turns[1]] = True
#increase overall count of turns after end of both turns
count += 1
label.set_text("Turns = " + str(count))
if False not in exposed:
label.set_text("Won the Game in " + str(count) + " Turns, Press Reset for New Game!" )
# if its 1st Turn (state 2)
elif turns[1] != -1 and exposed[pos[0] / 50] == False:
# if cards doesn't pair flip back both
if cardDeck[turns[0]] != cardDeck[turns[1]]:
exposed[turns[1]] = False
exposed[turns[0]] = False
turns[1] = -1
turns[0] = pos[0] / 50
exposed[turns[0]] = True
else:
turns[1] = -1
turns[0] = pos[0] / 50
exposed[turns[0]] = True
# cards are logically 50x100 pixels in size
def draw(canvas):
for index, card in enumerate(cardDeck):
if exposed[index] == True:
canvas.draw_polygon([(index*50, 0), ((index*50) + 50, 0), ((index*50) + 50, 100), (index*50 , 100)], 1, 'Black', 'White')
canvas.draw_text(str(card), ((index*50) + 10, 70), 65, 'Red')
else:
canvas.draw_polygon([(index*50, 0), ((index*50) + 50, 0), ((index*50) + 50, 100), (index*50 , 100)], 1, 'Black', 'Green')
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
``` |
{
"source": "Jimuyangz/flowtrack",
"score": 2
} |
#### File: Jimuyangz/flowtrack/datasets.py
```python
import torch
import torch.utils.data as data
import os, math, random
from os.path import *
import numpy as np
from glob import glob
import utils.frame_utils as frame_utils
from scipy.misc import imread, imresize
from utils.flow_utils import readFlow
import time
import cv2
import math
class StaticRandomCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
h, w = image_size
self.h1 = random.randint(0, h - self.th)
self.w1 = random.randint(0, w - self.tw)
def __call__(self, img):
return img[self.h1:(self.h1+self.th), self.w1:(self.w1+self.tw),:]
class StaticCenterCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
self.h, self.w = image_size
def __call__(self, img):
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2,:]
def IoU(box,boxes):
'''
计算detect box和 gt boxes的IoU值
形参:
box:numpy array,shape(5,):x1,y1,x2,y2,score
input box
boxes:numpy array,shape (n,4):x1,y1,x2,y2
input ground truth boxes
返回值:
ovr: numpy.array, shape (n, )
IoU
'''
box_area=(box[2]-box[0]+1)*(box[3]-box[1]+1)
area = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
xx1=np.maximum(box[0],boxes[:,0])
yy1=np.maximum(box[1],boxes[:,1])
xx2=np.minimum(box[2],boxes[:,2])
yy2=np.minimum(box[3],boxes[:,3])
#print(area.dtype, yy2.dtype)
#print((xx2-xx1+1).dtype)
#print(torch.tensor(0.).type(torch.DoubleTensor).dtype)
#计算 bounding box的长宽
w=np.maximum(0,xx2-xx1+1)
h=np.maximum(0,yy2-yy1+1)
inter=w*h
ovr= inter/(box_area+area-inter)
return ovr
class MOT(data.Dataset):
def __init__(self, args, is_cropped = False, root = ''):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.max_objs = 128
self.width = 1920
self.height = 1024
#flow_root = join(root, 'flow')
image_root = join(root, 'dataset.txt')
image_files = open(image_root).readlines()
self.flow_list = []
self.image_list = []
self.flow_image = []
for item in image_files:
img1 = item.strip('\n')
img2 = img1.replace('f.jpg', 'l.jpg')
file = img1.replace('_f.jpg','.npy')
sp = img1.split('/')[:-1]
ix = int(img1.split('/')[-1].split('_')[0])
flow_file = os.path.join('/',*sp,'{:06d}'.format(ix)+'.flo')
if not isfile(img1) or not isfile(img2) or not isfile(file):
print('Warning: the images or the file not exist!!!')
continue
self.image_list += [[img1, img2]]
self.flow_list += [file]
self.flow_image += [flow_file]
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
assert (len(self.image_list) == len(self.flow_list))
self.variances = [0.1, 0.2]
def __getitem__(self, index):
#start = time.time()
index = index % self.size
#start_1 = time.time()
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
#print(img1.shape)
images = [img1, img2]
image_size = img1.shape[:2]
images = np.array(images).transpose(3,0,1,2)
images = torch.from_numpy(images.astype(np.float32))
annos = np.load(self.flow_list[index], allow_pickle=True)
num_objs = len(annos)
assert num_objs <= self.max_objs
# read flow
flow = torch.from_numpy(readFlow(self.flow_image[index]))
flow = flow.permute(2,0,1)
rois = np.zeros((self.max_objs, 4))
rois.fill(-1)
gts = np.zeros((self.max_objs, 4))
gts.fill(-2000)
for k, anno in enumerate(annos):
bbox = anno['bbox'] # [x1,y1,w,h]
bbox_next = anno['bbox_next'] # [x1,y1,w,h]
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[0] + bbox[2]
y2 = bbox[1] + bbox[3]
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= 1920:
x2 = 1919
if y2 >= 1024:
y2 = 1023
if x2<=x1 or y2<=y1:
gts[k, :] = [-2000, -2000, -2000, -2000]
rois_flow[k, :] = [-1, -1, -1, -1]
# np.save(self.image_list[index][0].replace('.jpg','-')+'wronglabel.npy',annos)
continue
assert x2>x1 and y2>y1
box_original = np.array([x1, y1, x2, y2])
if np.random.randint(2):
iou = 0
while iou<=0.8:
ratio_w = np.random.uniform(0.85,1.15)
ratio_h = np.random.uniform(0.85,1.15)
width = x2 - x1
height= y2 - y1
new_width = width * ratio_w
new_height = height * ratio_h
ratio_shift_w = np.random.uniform(-0.15,0.15)
ratio_shift_h = np.random.uniform(-0.15,0.15)
shift_w = ratio_shift_w * width
shift_h = ratio_shift_h * height
xc = (x1 + x2) / 2.0
yc = (y1 + y2) / 2.0
xc_ = xc + shift_w
yc_ = yc + shift_h
x1_ = xc_ - new_width/2.0
x2_ = xc_ + new_width/2.0
y1_ = yc_ - new_height/2.0
y2_ = yc_ + new_height/2.0
box_shift = np.array([[x1_, y1_, x2_, y2_]])
iou = IoU(box_original, box_shift)
x1 = x1_
x2 = x2_
y1 = y1_
y2 = y2_
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= 1920:
x2 = 1919
if y2 >= 1024:
y2 = 1023
assert x2>0 and y2>0
assert x1<1919 and y1<1023
assert x2>x1 and y2>y1
dx = (bbox_next[0]+bbox_next[2]/2.0) - (x1+x2)/2.0
dy = (bbox_next[1]+bbox_next[3]/2.0) - (y1+y2)/2.0
w = x2 - x1
h = y2 - y1
#encode
l_cx = dx / (self.variances[0] * w)
l_cy = dy / (self.variances[0] * h)
l_w = math.log(bbox_next[2]/w)
l_h = math.log(bbox_next[3]/h)
gts[k, :] = [l_cx, l_cy, l_w, l_h]
rois[k, :] = [x1, y1, x2, y2]
rois = torch.from_numpy(rois.astype(np.float32))
gts = torch.from_numpy(gts.astype(np.float32))
return [images], [rois], [gts], [flow]
def __len__(self):
return self.size
class MpiSintel(data.Dataset):
def __init__(self, args, is_cropped = False, root = '', dstype = 'clean', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
flow_root = join(root, 'flow')
image_root = join(root, dstype)
file_list = sorted(glob(join(flow_root, '*/*.flo')))
self.flow_list = []
self.image_list = []
for file in file_list:
if 'test' in file:
# print file
continue
fbase = file[len(flow_root)+1:]
fprefix = fbase[:-8]
fnum = int(fbase[-8:-4])
img1 = join(image_root, fprefix + "%04d"%(fnum+0) + '.png')
img2 = join(image_root, fprefix + "%04d"%(fnum+1) + '.png')
if not isfile(img1) or not isfile(img2) or not isfile(file):
continue
self.image_list += [[img1, img2]]
self.flow_list += [file]
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
assert (len(self.image_list) == len(self.flow_list))
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class MpiSintelClean(MpiSintel):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(MpiSintelClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'clean', replicates = replicates)
class MpiSintelFinal(MpiSintel):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(MpiSintelFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'final', replicates = replicates)
class FlyingChairs(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/FlyingChairs_release/data', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
images = sorted( glob( join(root, '*.ppm') ) )
self.flow_list = sorted( glob( join(root, '*.flo') ) )
assert (len(images)//2 == len(self.flow_list))
self.image_list = []
for i in range(len(self.flow_list)):
im1 = images[2*i]
im2 = images[2*i + 1]
self.image_list += [ [ im1, im2 ] ]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class FlyingThings(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/flyingthings3d', dstype = 'frames_cleanpass', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
image_dirs = sorted(glob(join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([join(f, 'left') for f in image_dirs] + [join(f, 'right') for f in image_dirs])
flow_dirs = sorted(glob(join(root, 'optical_flow_flo_format/TRAIN/*/*')))
flow_dirs = sorted([join(f, 'into_future/left') for f in flow_dirs] + [join(f, 'into_future/right') for f in flow_dirs])
assert (len(image_dirs) == len(flow_dirs))
self.image_list = []
self.flow_list = []
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted( glob(join(idir, '*.png')) )
flows = sorted( glob(join(fdir, '*.flo')) )
for i in range(len(flows)):
self.image_list += [ [ images[i], images[i+1] ] ]
self.flow_list += [flows[i]]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class FlyingThingsClean(FlyingThings):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(FlyingThingsClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_cleanpass', replicates = replicates)
class FlyingThingsFinal(FlyingThings):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(FlyingThingsFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_finalpass', replicates = replicates)
class ChairsSDHom(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/chairssdhom/data', dstype = 'train', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
image1 = sorted( glob( join(root, dstype, 't0/*.png') ) )
image2 = sorted( glob( join(root, dstype, 't1/*.png') ) )
self.flow_list = sorted( glob( join(root, dstype, 'flow/*.flo') ) )
assert (len(image1) == len(self.flow_list))
self.image_list = []
for i in range(len(self.flow_list)):
im1 = image1[i]
im2 = image2[i]
self.image_list += [ [ im1, im2 ] ]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
flow = flow[::-1,:,:]
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class ChairsSDHomTrain(ChairsSDHom):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(ChairsSDHomTrain, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'train', replicates = replicates)
class ChairsSDHomTest(ChairsSDHom):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(ChairsSDHomTest, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'test', replicates = replicates)
class ImagesFromFolder(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/frames/only/folder', iext = 'png', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
images = sorted( glob( join(root, '*.' + iext) ) )
self.image_list = []
for i in range(len(images)-1):
im1 = images[i]
im2 = images[i+1]
self.image_list += [ [ im1, im2 ] ]
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
images = np.array(images).transpose(3,0,1,2)
images = torch.from_numpy(images.astype(np.float32))
return [images], [torch.zeros(images.size()[0:1] + (2,) + images.size()[-2:])]
def __len__(self):
return self.size * self.replicates
'''
import argparse
import sys, os
import importlib
from scipy.misc import imsave
import numpy as np
import datasets
reload(datasets)
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.inference_size = [1080, 1920]
args.crop_size = [384, 512]
args.effective_batch_size = 1
index = 500
v_dataset = datasets.MpiSintelClean(args, True, root='../MPI-Sintel/flow/training')
a, b = v_dataset[index]
im1 = a[0].numpy()[:,0,:,:].transpose(1,2,0)
im2 = a[0].numpy()[:,1,:,:].transpose(1,2,0)
imsave('./img1.png', im1)
imsave('./img2.png', im2)
flow_utils.writeFlow('./flow.flo', b[0].numpy().transpose(1,2,0))
'''
```
#### File: layers/modules/roi_pooling.py
```python
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..functions.roi_pooling import roi_pooling_2d
from ..functions.roi_pooling import roi_pooling_2d_pytorch
import numpy as np
import torch
class ROIPooling2d(nn.Module):
"""Spatial Region of Interest (ROI) pooling.
This function acts similarly to :class:`~pytorch.nn.MaxPool2d`, but
it computes the maximum of input spatial patch for each channel
with the region of interest. This module only works with CUDA tensors.
Take a look at the :class:`~ROIPooling2dPytorch` for an architecture
agnostic implementation.
See the original paper proposing ROIPooling:
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
Args:
x (~pytorch.autograd.Variable): Input variable. The shape is expected
to be 4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (~pytorch.autograd.Variable): Input roi variable. The shape is
expected to be (m: num-rois, 5), and each roi is set as below:
(batch_index, x_min, y_min, x_max, y_max).
output_size (int or tuple): the target output size of the image of the
form H x W. Can be a tuple (H, W) or a single number H for a square
image H x H.
spatial_scale (float): scale of the rois if resized.
Returns:
`~pytorch.autograd.Variable`: Output variable.
"""
def __init__(self, output_size, spatial_scale=1.0):
super(ROIPooling2d, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = spatial_scale
def forward(self, input, rois):
return roi_pooling_2d(input, rois, self.output_size,
self.spatial_scale)
def __repr__(self):
return ('{}(output_size={}, spatial_scale={:.6f})'.format(
self.__class__.__name__, str(self.output_size),
str(self.spatial_scale)))
class ROIPooling2dPytorch(nn.Module):
"""Spatial Region of Interest (ROI) pooling.
This function acts similarly to :class:`~ROIPooling2d`, but performs a
python loop over ROI. Note that this is not a direct replacement of that
operation and viceversa.
See the original paper proposing ROIPooling:
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_.
Args:
x (~pytorch.autograd.Variable): Input variable. The shape is expected
to be 4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (~pytorch.autograd.Variable): Input roi variable. The shape is
expected to be (m: num-rois, 5), and each roi is set as below:
(batch_index, x_min, y_min, x_max, y_max).
output_size (int or tuple): the target output size of the image of the
form H x W. Can be a tuple (H, W) or a single number H for a square
image H x H.
spatial_scale (float): scale of the rois if resized.
Returns:
`~pytorch.autograd.Variable`: Output variable.
"""
def __init__(self, output_size, spatial_scale=1.0):
super(ROIPooling2dPytorch, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = spatial_scale
def forward(self, input, rois):#, img):
# everything below is added by me
#rois.resize_((rois.shape[0] * 80, 5))
# input b,2,1024,1920
# rois b, 128, 4
rois_ = []
rois = rois.cpu()
for k,i in enumerate(rois):
rois_.append(np.insert(i, 0, k, axis=1))
rois_ = torch.cat(rois_)
assert rois_.shape[0] == rois.shape[1]#[0] * 128
ind = list(set(np.where(rois_ == -1)[0]))
rois = np.delete(rois_, ind, axis=0)
return roi_pooling_2d_pytorch(input, rois, self.output_size,
self.spatial_scale)#, img)
def __repr__(self):
return ('{}(output_size={}, spatial_scale={:.6f})'.format(
self.__class__.__name__, str(self.output_size),
str(self.spatial_scale)))
``` |
{
"source": "jimv3/csp",
"score": 4
} |
#### File: csp/lesson1/main.py
```python
def q1(answer):
if answer == "O(n)":
return "Great job!"
elif answer == "O(n^2)":
return "Think about how many times the for loop will be performed and try again."
elif answer == "O(1)":
return "O(1) is equivalent to constant time. This effectively means there is no difference regardless of the size of the input. Think about what is happening in the for loop and try again."
return "Please choose from (a), (b), or (c)."
def q2(answer):
if answer == "O(n^2)":
return "Great job!"
elif answer == "O(n)":
return "Think about how many times the for loop will be performed and remember the rules for combining Big O."
elif answer == "O(1)":
return "O(1) is equivalent to constant time. This effectively means there is no difference regardless of the size of the input. Think about what is happening in the for loop and try again."
return "Please choose from (a), (b), or (c)."
def q3(answer):
if answer == "O(m + n)":
return "Great job!"
elif answer == "O(n^2)":
return "Think about the how many times the for loops will be performed and remember the rules for combining Big O."
elif answer == "O(1)":
return "O(1) is equivalent to constant time. This effectively means there is no difference regardless of the size of the input. Think about what is happening in the for loop and try again."
return "Please choose from (a), (b), or (c)."
```
#### File: csp/lesson2/merge_sort_example.py
```python
def merge_sort_example(items):
current_size = 1
while current_size < len(items) - 1:
left = 0
while left < len(items)-1:
mid = left + current_size - 1
right = ((2 * current_size + left - 1,
len(items) - 1)[2 * current_size
+ left - 1 > len(items)-1])
merge(items, left, mid, right)
left = left + current_size*2
current_size = 2 * current_size
return items
def merge(a, l, m, r):
n1 = m - l + 1
n2 = r - m
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = a[l + i]
for i in range(0, n2):
R[i] = a[m + i + 1]
i, j, k = 0, 0, l
while i < n1 and j < n2:
if L[i] > R[j]:
a[k] = R[j]
j += 1
else:
a[k] = L[i]
i += 1
k += 1
while i < n1:
a[k] = L[i]
i += 1
k += 1
while j < n2:
a[k] = R[j]
j += 1
k += 1
unsorted_list = [1, 9, 3, 0, 6]
merge_sort_example(unsorted_list)
```
#### File: csp/lesson5/forward_backward_node.py
```python
class Node(object):
def __init__(self, data, next, previous):
self._data = data
self._next = next
self._previous = previous
@property
def data(self):
return self._data
@property
def next(self):
return self._next
@next.setter
def next(self, next):
self._next = next
@property
def previous(self):
return self._previous
@previous.setter
def previous(self, previous):
self._previous = previous
``` |
{
"source": "JimVaranelli/ActiveSet",
"score": 3
} |
#### File: JimVaranelli/ActiveSet/constrainedls.py
```python
import sys
import numpy as np
from activeset import ConstrainedLS
from numpy.testing import assert_allclose, assert_equal, \
assert_almost_equal, assert_raises
# input validation and unit testing for ActiveSet base
# class. unit tests verified against Matlab lsqlin().
def main():
print("Constrained least squares...")
# initialize CLS object
cls = ConstrainedLS(atol=1e-7)
# input validation testing starts here
# iv test #1 - empty objective matrix
assert_raises(ValueError, cls, [], [])
# iv test #2 - objective matrix/vector mismatch
A = [[0.0372, 0.2869],
[0.6861, 0.7071],
[0.6233, 0.6245],
[0.6344, 0.6170]]
b = [0.8587, 0.1781, 0.0747]
assert_raises(ValueError, cls, A, b)
# iv test #3 - no constraints
b = [0.8587, 0.1781, 0.0747, 0.8405]
assert_raises(ValueError, cls, A, b)
# iv test #4 - bound vector mismatch
cl = [0, 0, 0]
assert_raises(ValueError, cls, A, b, cl=cl)
# iv test #5 - objective/constraint mismatch
Ce = [[-.01150290, -.09501570, 0],
[-.21111988, -.29119600, 0]]
de = [.30503011, .10502311, -.09119810]
assert_raises(ValueError, cls, A, b, Ce, de)
# iv test #6 - constraint matrix/vector mismatch
Ce = [[-.01150290, -.09501570],
[-.21111988, -.29119600]]
assert_raises(ValueError, cls, A, b, Ce, de)
# iv test #7 - constraint matrix/vector mismatch
Ci = [[-.01150290, -.09501570],
[-.21111988, -.29119600]]
di = [.30503011, .10502311, -.09119810]
assert_raises(ValueError, cls, A, b, Ci=Ci, di=di)
# iv test #8 - input 1d/2d array
assert_raises(ValueError, cls, A, b, Ci=Ci, di=Ce)
# iv test #9 - input 2d array
assert_raises(ValueError, cls, A, b, Ci=di, di=di)
# iv test #10 - infeasible x0
assert_raises(ValueError, cls, A, b, cl=[0, 0], x0=[-1, -1])
# iv test #11 - infeasible program
assert_raises(ValueError, cls, A, b, cl=[0, 0], cu=[-1, -1])
# unit testing starts here
# unit test #1 - non-negative least sqares
# objective matrix
A = [[0.0372, 0.2869],
[0.6861, 0.7071],
[0.6233, 0.6245],
[0.6344, 0.6170]]
# target vector
b = [0.8587, 0.1781, 0.0747, 0.8405]
# lower bound vector
cl = [0, 0]
# solve
x, scr, nit = cls(A, b, cl=cl)
# check
print("x(final) = \n", x)
sln = np.asarray([0, 0.69293]).reshape(2,1)
assert_allclose(x, sln, rtol=0, atol=1e-5)
print("score(final) = ", scr)
assert_almost_equal(scr, 0.83146, decimal=5)
print("iter = ", nit)
assert_equal(nit, 2)
# unit test #2: inequality constraints
# objective matrix
A = [[ 1, 2, 0],
[-8, 3, 2],
[ 0, 1, 1]]
# target vector
b = [3, 2, 3]
# inequality constraint matrix
Ci = [[ 1, 2, 1],
[ 2, 0, 1],
[-1, 2, -1]]
# inequality constraint vector
di = [3, 2, -2]
# solve
x, scr, nit = cls(A, b, Ci=Ci, di=di)
# check
print("x(final) = \n", x)
sln = np.asarray([0.12997, -0.06499, 1.74005]).reshape(3,1)
assert_allclose(x, sln, rtol=0, atol=1e-5)
print("score(final) = ", scr)
assert_almost_equal(scr, 10.81565, decimal=5)
print("iter = ", nit)
assert_equal(nit, 1)
# unit test #3: equality + inequality + bound constraints
# objective matrix
A = [[0.9501, 0.7620, 0.6153, 0.4057],
[0.2311, 0.4564, 0.7919, 0.9354],
[0.6068, 0.0185, 0.9218, 0.9169],
[0.4859, 0.8214, 0.7382, 0.4102],
[0.8912, 0.4447, 0.1762, 0.8936]]
# target vector
b = [0.0578, 0.3528, 0.8131, 0.0098, 0.1388]
# inequality constraint matrix
Ci = [[0.2027, 0.2721, 0.7467, 0.4659],
[0.1987, 0.1988, 0.4450, 0.4186],
[0.6037, 0.0152, 0.9318, 0.8462]]
# inequality constraint vector
di = [0.5251, 0.2026, 0.6721]
# equality constraint matrix
Ce = [[3, 5, 7, 9]]
# equality constraint vector
de = [4]
# upper bound vector
cu = [2, 2, 2, 2]
# lower bound vector
cl = [-0.1, -0.1, -0.1, -0.1]
# solve
x, scr, nit = cls(A, b, Ce, de, Ci, di, cu, cl)
# check
print("x(final) = \n", x)
sln = np.asarray([-0.10000, -0.10000, 0.15991, 0.40896]).reshape(4, 1)
assert_allclose(x, sln, rtol=0, atol=1e-5)
print("score(final) = ", scr)
assert_almost_equal(scr, 0.16951, decimal=5)
print("iter = ", nit)
assert_equal(nit, 3)
# unit test #4: equality + bound constraints
# with supplied initial feasible solution
# objective matrix
A = [[-.01150290, -.09501570, .35119807],
[-.21111988, -.29119600, .15501210],
[-.11111200, -.11950019, -.01111994],
[ .35119863, .30119971, -.21150112],
[ .15119558, .10501690, -.11111198]]
# objective vector
b = [.30503011, .10502311, -.09119810, -.29501510, -.11950052]
# equality constraint matrix
Ce = [[1, 1, 1]]
# equality constraint vector
de = [1]
# upper bound vector
cu = [1, 1, 1]
# lower bound vector
cl = [-1, -1, -1]
# initial feasible solution
x0 = [0.333333333, 0.333333333, 0.333333333]
# solve
x, scr, nit = cls(A, b, Ce, de, cu=cu, cl=cl, x0=x0)
# check
print("x(final) = \n", x)
sln = np.asarray([-0.72381, 0.72381, 1.00000]).reshape(3,1)
assert_allclose(x, sln, rtol=0, atol=1e-5)
print("score(final) = ", scr)
assert_almost_equal(scr, 0.00861, decimal=5)
print("iter = ", nit)
assert_equal(nit, 2)
# unit test #5: equality constraints
# objective matrix
A = [[0.9501, 0.7620, 0.6153, 0.4057],
[0.2311, 0.4564, 0.7919, 0.9354],
[0.6068, 0.0185, 0.9218, 0.9169],
[0.4859, 0.8214, 0.7382, 0.4102],
[0.8912, 0.4447, 0.1762, 0.8936]]
# target vector
b = [0.0578, 0.3528, 0.8131, 0.0098, 0.1388]
# equality constraint matrix
Ce = [[3, 5, 7, 9]]
# equality constraint vector
de = [4]
# solve
x, scr, nit = cls(A, b, Ce, de)
# check
print("x(final) = \n", x)
sln = np.asarray([0.01756, -0.59434, 0.51380, 0.36916]).reshape(4, 1)
assert_allclose(x, sln, rtol=0, atol=1e-5)
print("score(final) = ", scr)
assert_almost_equal(scr, 0.02105, decimal=5)
print("iter = ", nit)
assert_equal(nit, 1)
if __name__ == "__main__":
sys.exit(int(main() or 0))
``` |
{
"source": "JimVaranelli/Leybourne-McCabe",
"score": 2
} |
#### File: JimVaranelli/Leybourne-McCabe/Leybourne.py
```python
import sys
import os
import time
import numpy as np
import pandas as pd
from builtins import int
# statsmodels 0.13 deprecates arima_model.ARIMA
# in favor of arima.model.ARIMA
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import InterpolationWarning
from statsmodels.tsa.stattools import pacf
from statsmodels.tsa.tsatools import lagmat
from numpy.testing import assert_equal, assert_almost_equal
class Leybourne(object):
"""
Class wrapper for Leybourne-Mccabe stationarity test
"""
def __init__(self):
"""
Asymptotic critical values for the two different models specified
for the Leybourne-McCabe stationarity test. Asymptotic CVs are the
same as the asymptotic CVs for the KPSS stationarity test.
Notes
-----
The p-values are generated through Monte Carlo simulation using
1,000,000 replications and 2000 data points.
"""
self.__leybourne_critical_values = {}
# constant-only model
self.__c = ((99.900, 0.0169233), (99.000, 0.0247863), (98.000, 0.0287636),
(97.000, 0.0317512), (96.000, 0.0342505), (95.000, 0.0364872),
(92.500, 0.0415061), (90.000, 0.0459481), (85.000, 0.0542763),
(80.000, 0.0621976), (75.000, 0.0702117), (70.000, 0.0785789),
(65.000, 0.0968259), (60.000, 0.0968259), (57.500, 0.101951),
(55.000, 0.107248), (52.500, 0.112855), (50.000, 0.118809),
(47.500, 0.125104), (45.000, 0.131743), (42.500, 0.138939),
(40.000, 0.146608), (37.500, 0.154828), (35.000, 0.163827),
(32.500, 0.173569), (30.000, 0.184215), (27.500, 0.196048),
(25.000, 0.209452), (22.500, 0.224259), (20.000, 0.24128),
(17.500, 0.260842), (15.000, 0.283831), (12.500, 0.311703),
(10.000, 0.347373), (7.500, 0.393998), (5.000, 0.46169),
(2.500, 0.580372), (1.000, 0.743491), (0.900, 0.763297),
(0.800, 0.785173), (0.700, 0.809092), (0.600, 0.83664),
(0.500, 0.869455), (0.400, 0.909901), (0.300, 0.962597),
(0.200, 1.03998), (0.100, 1.16701), (0.001, 2.84682))
self.__leybourne_critical_values['c'] = np.asarray(self.__c)
# constant+trend model
self.__ct = ((99.900, 0.0126788), (99.000, 0.0172984), (98.000, 0.0194624),
(97.000, 0.0210446), (96.000, 0.0223274), (95.000, 0.0234485),
(92.500, 0.0258551), (90.000, 0.0279374), (85.000, 0.0315677),
(80.000, 0.0349355), (75.000, 0.0381676), (70.000, 0.0413931),
(65.000, 0.0446997), (60.000, 0.0481063), (57.500, 0.0498755),
(55.000, 0.0517089), (52.500, 0.0536157), (50.000, 0.0555732),
(47.500, 0.0576502), (45.000, 0.059805), (42.500, 0.062043),
(40.000, 0.064408), (37.500, 0.0669198), (35.000, 0.0696337),
(32.500, 0.0725157), (30.000, 0.0756156), (27.500, 0.079006),
(25.000, 0.0827421), (22.500, 0.086865), (20.000, 0.09149),
(17.500, 0.0967682), (15.000, 0.102787), (12.500, 0.110122),
(10.000, 0.119149), (7.500, 0.130935), (5.000, 0.147723),
(2.500, 0.177229), (1.000, 0.216605), (0.900, 0.221306),
(0.800, 0.226324), (0.700, 0.23257), (0.600, 0.239896),
(0.500, 0.248212), (0.400, 0.258809), (0.300, 0.271849),
(0.200, 0.29052), (0.100, 0.324278), (0.001, 0.607007))
self.__leybourne_critical_values['ct'] = np.asarray(self.__ct)
def __leybourne_crit(self, stat, model='c'):
"""
Linear interpolation for Leybourne p-values and critical values
Parameters
----------
stat : float
The Leybourne-McCabe test statistic
model : {'c','ct'}
The model used when computing the test statistic. 'c' is default.
Returns
-------
pvalue : float
The interpolated p-value
cvdict : dict
Critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
The p-values are linear interpolated from the quantiles of the
simulated Leybourne-McCabe (KPSS) test statistic distribution
"""
table = self.__leybourne_critical_values[model]
# reverse the order
y = table[:, 0]
x = table[:, 1]
# LM cv table contains quantiles multiplied by 100
pvalue = np.interp(stat, x, y) / 100.0
cv = [1.0, 5.0, 10.0]
crit_value = np.interp(cv, np.flip(y), np.flip(x))
cvdict = {"1%" : crit_value[0], "5%" : crit_value[1],
"10%" : crit_value[2]}
return pvalue, cvdict
def _tsls_arima(self, x, arlags, model):
"""
Two-stage least squares approach for estimating ARIMA(p, 1, 1)
parameters as an alternative to MLE estimation in the case of
solver non-convergence
Parameters
----------
x : array_like
data series
arlags : int
AR(p) order
model : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only
* 'ct' : constant and trend
Returns
-------
arparams : int
AR(1) coefficient plus constant
theta : int
MA(1) coefficient
olsfit.resid : ndarray
residuals from second-stage regression
"""
endog = np.diff(x, axis=0)
exog = lagmat(endog, arlags, trim='both')
# add constant if requested
if model == 'ct':
exog = add_constant(exog)
# remove extra terms from front of endog
endog = endog[arlags:]
if arlags > 0:
resids = lagmat(OLS(endog, exog).fit().resid, 1, trim='forward')
else:
resids = lagmat(-endog, 1, trim='forward')
# add negated residuals column to exog as MA(1) term
exog = np.append(exog, -resids, axis=1)
olsfit = OLS(endog, exog).fit()
if model == 'ct':
arparams = olsfit.params[1:(len(olsfit.params)-1)]
else:
arparams = olsfit.params[0:(len(olsfit.params)-1)]
theta = olsfit.params[len(olsfit.params)-1]
return arparams, theta, olsfit.resid
def _autolag(self, x):
"""
Empirical method for Leybourne-McCabe auto AR lag detection.
Set number of AR lags equal to the first PACF falling within the
95% confidence interval. Maximum nuber of AR lags is limited to
the smaller of 10 or 1/2 series length.
Parameters
----------
x : array_like
data series
Returns
-------
arlags : int
AR(p) order
"""
p = pacf(x, nlags=min(int(len(x)/2), 10), method='ols')
ci = 1.960 / np.sqrt(len(x))
arlags = max(1, ([ n for n, i in enumerate(p) if abs(i) < ci ] + [-1])[0])
return arlags
def run(self, x, arlags=1, regression='c', method='mle', varest='var94'):
"""
Leybourne-McCabe stationarity test
The Leybourne-McCabe test can be used to test for stationarity in a
univariate process.
Parameters
----------
x : array_like
data series
arlags : int
number of autoregressive terms to include, default=None
regression : {'c','ct'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
method : {'mle','ols'}
Method used to estimate ARIMA(p, 1, 1) filter model
* 'mle' : condition sum of squares maximum likelihood (default)
* 'ols' : two-stage least squares
varest : {'var94','var99'}
Method used for residual variance estimation
* 'var94' : method used in original Leybourne-McCabe paper (1994)
(default)
* 'var99' : method used in follow-up paper (1999)
Returns
-------
lmstat : float
test statistic
pvalue : float
based on MC-derived critical values
arlags : int
AR(p) order used to create the filtered series
cvdict : dict
critical values for the test statistic at the 1%, 5%, and 10%
levels
Notes
-----
H0 = series is stationary
Basic process is to create a filtered series which removes the AR(p)
effects from the series under test followed by an auxiliary regression
similar to that of Kwiatkowski et al (1992). The AR(p) coefficients
are obtained by estimating an ARIMA(p, 1, 1) model. Two methods are
provided for ARIMA estimation: MLE and two-stage least squares.
Two methods are provided for residual variance estimation used in the
calculation of the test statistic. The first method ('var94') is the
mean of the squared residuals from the filtered regression. The second
method ('var99') is the MA(1) coefficient times the mean of the squared
residuals from the ARIMA(p, 1, 1) filtering model.
An empirical autolag procedure is provided. In this context, the number
of lags is equal to the number of AR(p) terms used in the filtering
step. The number of AR(p) terms is set equal to the to the first PACF
falling within the 95% confidence interval. Maximum nuber of AR lags is
limited to 1/2 series length.
References
----------
<NAME>., <NAME>., <NAME>. & <NAME>. (1992).
Testing the null hypothesis of stationarity against the alternative of
a unit root. Journal of Econometrics, 54: 159–178.
<NAME>., & <NAME>. (1994). A consistent test for a
unit root. Journal of Business and Economic Statistics, 12: 157–166.
<NAME>., & <NAME>. (1999). Modified stationarity tests
with data-dependent model-selection rules. Journal of Business and
Economic Statistics, 17: 264-270.
<NAME>. (1987). Effects of model specification on tests for unit
roots in macroeconomic data. Journal of Monetary Economics, 20: 73–103.
"""
if regression not in ['c', 'ct']:
raise ValueError(
'LM: regression option \'%s\' not understood' % regression)
if method not in ['mle', 'ols']:
raise ValueError(
'LM: method option \'%s\' not understood' % method)
if varest not in ['var94', 'var99']:
raise ValueError(
'LM: varest option \'%s\' not understood' % varest)
x = np.asarray(x)
if x.ndim > 2 or (x.ndim == 2 and x.shape[1] != 1):
raise ValueError(
'LM: x must be a 1d array or a 2d array with a single column')
x = np.reshape(x, (-1, 1))
# determine AR order if not specified
if arlags == None:
arlags = self._autolag(x)
elif not isinstance(arlags, int) or arlags < 1 or arlags > int(len(x) / 2):
raise ValueError(
'LM: arlags must be an integer in range [1..%s]' % str(int(len(x) / 2)))
# estimate the reduced ARIMA(p, 1, 1) model
if method == 'mle':
arfit = ARIMA(x, order=(arlags, 1, 1), trend=regression).fit()
resids = arfit.resid
arcoeffs = arfit.arparams
theta = arfit.maparams[0]
else:
arcoeffs, theta, resids = self._tsls_arima(x, arlags, model=regression)
# variance estimator from (1999) LM paper
var99 = abs(theta * np.sum(resids**2) / len(resids))
# create the filtered series:
# z(t) = x(t) - arcoeffs[0]*x(t-1) - ... - arcoeffs[p-1]*x(t-p)
z = np.full(len(x) - arlags, np.inf)
for i in range(len(z)):
z[i] = x[i + arlags]
for j in range(len(arcoeffs)):
z[i] -= arcoeffs[j] * x[i + arlags - j - 1]
# regress the filtered series against a constant and
# trend term (if requested)
if regression == 'c':
resids = z - z.mean()
else:
resids = OLS(z, add_constant(np.arange(1, len(z) + 1))).fit().resid
# variance estimator from (1994) LM paper
var94 = np.sum(resids**2) / len(resids)
# compute test statistic with specified variance estimator
eta = np.sum(resids.cumsum()**2) / (len(resids)**2)
if varest == 'var99':
lmstat = eta / var99
else:
lmstat = eta / var94
# calculate pval
crit = self.__leybourne_crit(lmstat, regression)
lmpval = crit[0]
cvdict = crit[1]
return lmstat, lmpval, arlags, cvdict
def __call__(self, x, arlags=None, regression='c', method='mle',
varest='var94'):
return self.run(x, arlags=arlags, regression=regression, method=method,
varest=varest)
# output results
def _print_res(res, st):
print(" lmstat =", "{0:0.5f}".format(res[0]), " pval =",
"{0:0.5f}".format(res[1]), " arlags =", res[2])
print(" cvdict =", res[3])
print(" time =", "{0:0.5f}".format(time.time() - st))
# unit tests taken from Schwert (1987) and verified against Matlab
def main():
print("Leybourne-McCabe stationarity test...")
cur_dir = os.path.abspath(os.path.dirname(__file__))
run_dir = os.path.join(cur_dir, "results\\")
files = ['BAA.csv', 'DBAA.csv', 'SP500.csv', 'DSP500.csv', 'UN.csv', 'DUN.csv']
lm = Leybourne()
for file in files:
print(" test file =", file)
mdl_file = os.path.join(run_dir, file)
mdl = np.asarray(pd.read_csv(mdl_file))
st = time.time()
if file == 'DBAA.csv':
res = lm(mdl)
_print_res(res=res, st=st)
assert_equal(res[2], 3)
assert_almost_equal(res[0], 0.1252, decimal=3)
assert_almost_equal(res[1], 0.4747, decimal=3)
st = time.time()
res = lm(mdl, regression='ct')
_print_res(res=res, st=st)
assert_almost_equal(res[0], 0.1248, decimal=3)
assert_almost_equal(res[1], 0.0881, decimal=3)
assert_equal(res[2], 3)
elif file == 'DSP500.csv':
res = lm(mdl)
_print_res(res=res, st=st)
assert_equal(res[2], 1)
assert_almost_equal(res[0], 0.2855, decimal=3)
assert_almost_equal(res[1], 0.1485, decimal=3)
st = time.time()
res = lm(mdl, varest='var99')
_print_res(res=res, st=st)
assert_equal(res[2], 1)
assert_almost_equal(res[0], 0.2874, decimal=3)
assert_almost_equal(res[1], 0.1468, decimal=3)
elif file == 'DUN.csv':
res = lm(mdl, regression='ct')
_print_res(res=res, st=st)
assert_almost_equal(res[0], 0.1657, decimal=3)
assert_almost_equal(res[1], 0.0348, decimal=3)
st = time.time()
res = lm(mdl, regression='ct', method='ols')
_print_res(res=res, st=st)
assert_almost_equal(res[0], 0.1650, decimal=3)
assert_almost_equal(res[1], 0.0353, decimal=3)
elif file == 'BAA.csv':
res = lm(mdl, regression='ct')
_print_res(res=res, st=st)
assert_equal(res[2], 4)
assert_almost_equal(res[0], 2.4868, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
st = time.time()
res = lm(mdl, regression='ct', method='ols')
_print_res(res=res, st=st)
assert_equal(res[2], 4)
assert_almost_equal(res[0], 2.9926, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
elif file == 'SP500.csv':
res = lm(mdl, arlags=4, regression='ct')
_print_res(res=res, st=st)
assert_almost_equal(res[0], 1.8761, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
st = time.time()
res = lm(mdl, arlags=4, regression='ct', method='ols')
_print_res(res=res, st=st)
assert_almost_equal(res[0], 1.9053, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
elif file == 'UN.csv':
res = lm(mdl, varest='var99')
_print_res(res=res, st=st)
assert_equal(res[2], 5)
assert_almost_equal(res[0], 1221.0154, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
st = time.time()
res = lm(mdl, method='ols', varest='var99')
_print_res(res=res, st=st)
assert_equal(res[2], 5)
assert_almost_equal(res[0], 1022.3827, decimal=3)
assert_almost_equal(res[1], 0.0000, decimal=3)
if __name__ == "__main__":
sys.exit(int(main() or 0))
``` |
{
"source": "JimVaranelli/Phillips-Perron",
"score": 3
} |
#### File: JimVaranelli/Phillips-Perron/arfimasim.py
```python
import numpy as np
# binomial expansion for ARFIMA models
def _calc_arfima_binomial(n, nterms):
# switch equation side
n = -n
bc = np.zeros([nterms, 1])
bc[0] = 1
# generate coefficients
for i in range(1, nterms):
bc[i] = abs(bc[i - 1] * (n - (i - 1)) / i)
return bc
def ARFIMA_sim(p_coeffs, q_coeffs, d, slen, alpha=0, sigma=1, numseas=100):
"""
Generate a random ARFIMA(p,d,q) series. Generalizes to ARMA(p,q)
when d = 0, and ARIMA(p,d,q) when d = 1.
User provides an array of coefficients for the AR(p) and MA(q)
portions of the series as well as the fractional differencing
parameter and the required length. A constant may optionally be
specified, as well as the standard deviation of the Gaussian
innovations, and the number of seasoning samples to be
generated before recording the series.
Parameters
----------
p_coeffs : array_like
AR(p) coefficients
len(p_coeffs) <= 10
q_coeffs : array_like
MA(q) coefficients
len(q_coeffs) <= 10
d : float
fractional differencing parameter
-1 < d <= 1
slen : int
number of samples in output ARFIMA series
10 <= len(series) <= 100000
alpha : float
series constant (default=0)
sigma : float
standard deviation of innovations
numseas : int
number of seasoning samples (default=100)
0 <= num(seasoning) <= 10000
Returns
-------
series : 1d array
random ARFIMA(p,d,q) series of specified length
Notes
-----
MA(q) parameters follow the Box-Jenkins convention which uses a
difference representation for the MA(q) process which is the opposite
of the standard ARIMA MA(q) summation representation. This matches the
operation of SAS/farmasim and R/arfimasim. As such, the SAS/farmafit
and R/arfima MA(q) estimates match the sign of the specified MA(q)
parameters while the statsmodels ARIMA().fit() estimates have opposite
the specified MA(q) parameter signs.
References
----------
SAS Institute Inc (2013). SAS/IML User's Guide. Cary, NC: SAS Institute
Inc.
<NAME>. (2012). Persistence and Anti-persistence: Theory and
Software (Doctoral Dissertation). Western University, Ontario, Canada.
"""
p = np.asarray(p_coeffs)
if p.ndim > 2 or (p.ndim == 2 and p.shape[1] != 1):
raise ValueError(
'ARFIMA_sim: p must be 1d array or 2d array with single column')
p = np.reshape(p, (-1, 1))
if p.shape[0] > 10:
raise ValueError(
'ARFIMA_sim: AR order must be <= 10')
q = np.asarray(q_coeffs)
if q.ndim > 2 or (q.ndim == 2 and q.shape[1] != 1):
raise ValueError(
'ARFIMA_sim: q must be 1d array or 2d array with single column')
q = np.reshape(q, (-1, 1))
if q.shape[0] > 10:
raise ValueError(
'ARFIMA_sim: MA order must be <= 10')
if d <= -1 or d > 1:
raise ValueError(
'ARFIMA_sim: valid differencing parameter in range (-1, 1]')
if slen < 10 or slen > 100000:
raise ValueError(
'ARFIMA_sim: valid series length in range [10, 100000]')
if numseas < 0 or numseas > 10000:
raise ValueError(
'ARFIMA_sim: valid seasoning length in range [0, 10000]')
# check for negative fractional d. if negative,
# add a unity order of integration, then single
# difference the final series.
neg = 0
if d < 0:
d += 1
neg = 1
# generate the MA(q) series
lqc = q.shape[0]
if lqc == 0:
ma = np.random.normal(scale=sigma, size=slen+numseas)
else:
e = np.random.normal(scale=sigma, size=slen+numseas)
ma = np.zeros([slen+numseas, 1])
ma[0] = e[0]
for t in range(1, slen + numseas):
err = e[max(0, t-lqc):t]
qcr = np.flip(q[0:min(lqc, t)])
ma[t] = e[t] - np.dot(err, qcr)
# generate the ARMA(p,q) series
lpc = p.shape[0]
if lpc == 0:
arma = ma
else:
arma = np.zeros([slen+numseas, 1])
arma[0] = ma[0]
for t in range(1, slen + numseas):
arr = arma[max(0, t-lpc):t]
pcr = np.flip(p[0:min(lpc, t)])
arma[t] = ma[t] + np.dot(arr.T, pcr)
# generate the ARFIMA(p,d,q) series
if np.isclose(d, 0):
series = alpha + arma
else:
# get binomial coefficients
bc = np.flip(_calc_arfima_binomial(d, slen + numseas))
end = slen + numseas + 1
series = np.zeros([slen+numseas, 1])
for t in range(slen + numseas):
bcr = bc[end-t-2:end]
ars = arma[0:t+1]
series[t] = alpha + np.dot(bcr.T, ars)
# if negative d then single difference
if neg:
series1 = np.zeros([slen+numseas, 1])
series1[0] = series[0]
for t in range(1, slen + numseas):
series1[t] = series[t] - series[t - 1]
series = series1
# trim seasoning samples and return 1d
return series[numseas:].flatten()
``` |
{
"source": "jimver04/rasa",
"score": 2
} |
#### File: nlu/featurizers/test_convert_featurizer.py
```python
import numpy as np
import pytest
from typing import Text, Optional, List, Tuple, Dict, Any, Callable
from pathlib import Path
import os
from _pytest.monkeypatch import MonkeyPatch
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.nlu.constants import (
FEATURIZER_CLASS_ALIAS,
TOKENS_NAMES,
NUMBER_OF_SUB_TOKENS,
)
from rasa.shared.nlu.constants import TEXT, INTENT, RESPONSE
from rasa.nlu.featurizers.dense_featurizer.convert_featurizer import (
ConveRTFeaturizerGraphComponent,
RESTRICTED_ACCESS_URL,
ORIGINAL_TF_HUB_MODULE_URL,
)
from rasa.exceptions import RasaException
from rasa.engine.graph import ExecutionContext
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.storage.resource import Resource
@pytest.fixture
def create_or_load_convert_featurizer(
default_model_storage: ModelStorage, default_execution_context: ExecutionContext,
) -> Callable[[Dict[Text, Any], bool], ConveRTFeaturizerGraphComponent]:
def inner(
config: Dict[Text, Any], load: bool = False
) -> Callable[[Dict[Text, Any], bool], ConveRTFeaturizerGraphComponent]:
if load:
constructor = ConveRTFeaturizerGraphComponent.load
else:
constructor = ConveRTFeaturizerGraphComponent.create
return constructor(
config,
model_storage=default_model_storage,
execution_context=default_execution_context,
resource=Resource("unused"),
)
return inner
@pytest.mark.skip_on_windows
def test_convert_featurizer_process(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
monkeypatch: MonkeyPatch,
):
tokenizer = WhitespaceTokenizer()
monkeypatch.setattr(
ConveRTFeaturizerGraphComponent,
"_validate_model_url",
lambda _: RESTRICTED_ACCESS_URL,
)
component_config = {
FEATURIZER_CLASS_ALIAS: "alias",
"model_url": RESTRICTED_ACCESS_URL,
}
featurizer = create_or_load_convert_featurizer(component_config)
sentence = "Hey how are you today ?"
message = Message.build(text=sentence)
td = TrainingData([message])
tokenizer.train(td)
tokens = featurizer.tokenize(message, attribute=TEXT)
featurizer.process([message])
expected = np.array([2.2636216, -0.26475656, -1.1358104, -0.49751878, -1.3946456])
expected_cls = np.array(
[1.0251294, -0.04053932, -0.7018805, -0.82054937, -0.75054353]
)
seq_vecs, sent_vecs = message.get_dense_features(TEXT, [])
seq_vecs = seq_vecs.features
sent_vecs = sent_vecs.features
assert len(tokens) == len(seq_vecs)
assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5)
assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5)
@pytest.mark.skip_on_windows
@pytest.mark.parametrize("load", [True, False])
def test_convert_featurizer_train(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
monkeypatch: MonkeyPatch,
load: bool,
):
tokenizer = WhitespaceTokenizer()
monkeypatch.setattr(
ConveRTFeaturizerGraphComponent, "_validate_model_url", lambda _: None,
)
component_config = {
FEATURIZER_CLASS_ALIAS: "alias",
"model_url": RESTRICTED_ACCESS_URL,
}
featurizer = create_or_load_convert_featurizer(component_config, load=True)
sentence = "Hey how are you today ?"
message = Message(data={TEXT: sentence})
message.set(RESPONSE, sentence)
td = TrainingData([message])
tokenizer.train(td)
tokens = featurizer.tokenize(message, attribute=TEXT)
message.set(TOKENS_NAMES[TEXT], tokens)
message.set(TOKENS_NAMES[RESPONSE], tokens)
featurizer.process_training_data(TrainingData([message]))
expected = np.array([2.2636216, -0.26475656, -1.1358104, -0.49751878, -1.3946456])
expected_cls = np.array(
[1.0251294, -0.04053932, -0.7018805, -0.82054937, -0.75054353]
)
seq_vecs, sent_vecs = message.get_dense_features(TEXT, [])
seq_vecs = seq_vecs.features
sent_vecs = sent_vecs.features
assert len(tokens) == len(seq_vecs)
assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5)
assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5)
seq_vecs, sent_vecs = message.get_dense_features(RESPONSE, [])
seq_vecs = seq_vecs.features
sent_vecs = sent_vecs.features
assert len(tokens) == len(seq_vecs)
assert np.allclose(seq_vecs[0][:5], expected, atol=1e-5)
assert np.allclose(sent_vecs[-1][:5], expected_cls, atol=1e-5)
seq_vecs, sent_vecs = message.get_dense_features(INTENT, [])
assert seq_vecs is None
assert sent_vecs is None
@pytest.mark.skip_on_windows
@pytest.mark.parametrize(
"sentence, expected_text",
[
("hello", "hello"),
("you're", "you re"),
("r. n. b.", "r n b"),
("rock & roll", "rock & roll"),
("ńöñàśçií", "ńöñàśçií"),
],
)
def test_convert_featurizer_tokens_to_text(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
sentence: Text,
expected_text: Text,
monkeypatch: MonkeyPatch,
):
tokenizer = WhitespaceTokenizer()
monkeypatch.setattr(
ConveRTFeaturizerGraphComponent, "_validate_model_url", lambda _: None,
)
component_config = {
FEATURIZER_CLASS_ALIAS: "alias",
"model_url": RESTRICTED_ACCESS_URL,
}
featurizer = create_or_load_convert_featurizer(component_config)
message = Message.build(text=sentence)
td = TrainingData([message])
tokenizer.train(td)
tokens = featurizer.tokenize(message, attribute=TEXT)
actual_text = ConveRTFeaturizerGraphComponent._tokens_to_text([tokens])[0]
assert expected_text == actual_text
@pytest.mark.skip_on_windows
@pytest.mark.parametrize(
"text, expected_tokens, expected_indices",
[
(
"forecast for lunch",
["forecast", "for", "lunch"],
[(0, 8), (9, 12), (13, 18)],
),
("hello", ["hello"], [(0, 5)]),
("you're", ["you", "re"], [(0, 3), (4, 6)]),
("r. n. b.", ["r", "n", "b"], [(0, 1), (3, 4), (6, 7)]),
("rock & roll", ["rock", "&", "roll"], [(0, 4), (5, 6), (7, 11)]),
("ńöñàśçií", ["ńöñàśçií"], [(0, 8)]),
],
)
def test_convert_featurizer_token_edge_cases(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
text: Text,
expected_tokens: List[Text],
expected_indices: List[Tuple[int]],
monkeypatch: MonkeyPatch,
):
tokenizer = WhitespaceTokenizer()
monkeypatch.setattr(
ConveRTFeaturizerGraphComponent, "_validate_model_url", lambda _: None,
)
component_config = {
FEATURIZER_CLASS_ALIAS: "alias",
"model_url": RESTRICTED_ACCESS_URL,
}
featurizer = create_or_load_convert_featurizer(component_config)
message = Message.build(text=text)
td = TrainingData([message])
tokenizer.train(td)
tokens = featurizer.tokenize(message, attribute=TEXT)
assert [t.text for t in tokens] == expected_tokens
assert [t.start for t in tokens] == [i[0] for i in expected_indices]
assert [t.end for t in tokens] == [i[1] for i in expected_indices]
@pytest.mark.skip_on_windows
@pytest.mark.parametrize(
"text, expected_number_of_sub_tokens",
[("Aarhus is a city", [2, 1, 1, 1]), ("sentence embeddings", [1, 3])],
)
def test_convert_featurizer_number_of_sub_tokens(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
text: Text,
expected_number_of_sub_tokens: List[int],
monkeypatch: MonkeyPatch,
):
tokenizer = WhitespaceTokenizer()
monkeypatch.setattr(
ConveRTFeaturizerGraphComponent, "_validate_model_url", lambda _: None,
)
component_config = {
FEATURIZER_CLASS_ALIAS: "alias",
"model_url": RESTRICTED_ACCESS_URL,
}
featurizer = create_or_load_convert_featurizer(component_config)
message = Message.build(text=text)
td = TrainingData([message])
tokenizer.train(td)
tokens = featurizer.tokenize(message, attribute=TEXT)
assert [
t.get(NUMBER_OF_SUB_TOKENS) for t in tokens
] == expected_number_of_sub_tokens
@pytest.mark.skip_on_windows
@pytest.mark.parametrize(
"model_url, exception_phrase",
[
(ORIGINAL_TF_HUB_MODULE_URL, "which does not contain the model any longer"),
(
RESTRICTED_ACCESS_URL,
"which is strictly reserved for pytests of Rasa Open Source only",
),
(None, "'model_url' was not specified in the configuration"),
("", "'model_url' was not specified in the configuration"),
],
)
def test_raise_invalid_urls(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
model_url: Optional[Text],
exception_phrase: Text,
):
component_config = {FEATURIZER_CLASS_ALIAS: "alias", "model_url": model_url}
with pytest.raises(RasaException) as excinfo:
_ = create_or_load_convert_featurizer(component_config)
assert exception_phrase in str(excinfo.value)
@pytest.mark.skip_on_windows
def test_raise_wrong_model_directory(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
tmp_path: Path,
):
component_config = {FEATURIZER_CLASS_ALIAS: "alias", "model_url": str(tmp_path)}
with pytest.raises(RasaException) as excinfo:
_ = create_or_load_convert_featurizer(component_config)
assert "Re-check the files inside the directory" in str(excinfo.value)
@pytest.mark.skip_on_windows
def test_raise_wrong_model_file(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
tmp_path: Path,
):
# create a dummy file
temp_file = os.path.join(tmp_path, "saved_model.pb")
f = open(temp_file, "wb")
f.close()
component_config = {FEATURIZER_CLASS_ALIAS: "alias", "model_url": temp_file}
with pytest.raises(RasaException) as excinfo:
_ = create_or_load_convert_featurizer(component_config)
assert "set to the path of a file which is invalid" in str(excinfo.value)
@pytest.mark.skip_on_windows
def test_raise_invalid_path(
create_or_load_convert_featurizer: Callable[
[Dict[Text, Any]], ConveRTFeaturizerGraphComponent
],
):
component_config = {FEATURIZER_CLASS_ALIAS: "alias", "model_url": "saved_model.pb"}
with pytest.raises(RasaException) as excinfo:
_ = create_or_load_convert_featurizer(component_config)
assert "neither a valid remote URL nor a local directory" in str(excinfo.value)
``` |
{
"source": "Jimver/numpy",
"score": 2
} |
#### File: benchmarks/benchmarks/bench_indexing.py
```python
from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
from os.path import join as pjoin
import shutil
from numpy import memmap, float32, array
import numpy as np
from tempfile import mkdtemp
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': get_squares_(),
'np': np,
'indexes_': get_indexes_(),
'indexes_rand_': get_indexes_rand_()}
code = "def run():\n for a in squares_.values(): a[%s]%s"
code = code % (sel, op)
exec(code, ns)
self.func = ns['run']
def time_op(self, indexes, sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.tmp_dir = mkdtemp()
self.fp = memmap(pjoin(self.tmp_dir, 'tmp.dat'),
dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def teardown(self):
del self.fp
shutil.rmtree(self.tmp_dir)
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructured0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
```
#### File: numpy/core/_methods.py
```python
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core._asarray import asanyarray
from numpy.core import numerictypes as nt
from numpy.core import _exceptions
from numpy._globals import _NoValue
from numpy.compat import pickle, os_fspath, contextlib_nullcontext
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# Complex types to -> (2,)float view for fast-path computation in _var()
_complex_to_float = {
nt.dtype(nt.csingle) : nt.dtype(nt.single),
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
}
# Special case for windows: ensure double takes precedence
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
_complex_to_float.update({
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
})
# Add reverse-endian types
_complex_to_float.update({
k.newbyteorder() : v.newbyteorder() for k, v in _complex_to_float.items()
})
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_all(a, axis, dtype, out, keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
# Numpy 1.17.0, 2019-02-24
# Various clip behavior deprecations, marked with _clip_dep as a prefix.
def _clip_dep_is_scalar_nan(a):
# guarded to protect circular imports
from numpy.core.fromnumeric import ndim
if ndim(a) != 0:
return False
try:
return um.isnan(a)
except TypeError:
return False
def _clip_dep_is_byte_swapped(a):
if isinstance(a, mu.ndarray):
return not a.dtype.isnative
return False
def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
# normal path
if casting is not None:
return ufunc(*args, out=out, casting=casting, **kwargs)
# try to deal with broken casting rules
try:
return ufunc(*args, out=out, **kwargs)
except _exceptions._UFuncOutputCastingError as e:
# Numpy 1.17.0, 2019-02-24
warnings.warn(
"Converting the output of clip from {!r} to {!r} is deprecated. "
"Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
"correct the type of the variables.".format(e.from_, e.to),
DeprecationWarning,
stacklevel=2
)
return ufunc(*args, out=out, casting="unsafe", **kwargs)
def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
# Numpy 1.17.0, 2019-02-24
# This deprecation probably incurs a substantial slowdown for small arrays,
# it will be good to get rid of it.
if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
using_deprecated_nan = False
if _clip_dep_is_scalar_nan(min):
min = -float('inf')
using_deprecated_nan = True
if _clip_dep_is_scalar_nan(max):
max = float('inf')
using_deprecated_nan = True
if using_deprecated_nan:
warnings.warn(
"Passing `np.nan` to mean no clipping in np.clip has always "
"been unreliable, and is now deprecated. "
"In future, this will always return nan, like it already does "
"when min or max are arrays that contain nan. "
"To skip a bound, pass either None or an np.inf of an "
"appropriate sign.",
DeprecationWarning,
stacklevel=2
)
if min is None:
return _clip_dep_invoke_with_casting(
um.minimum, a, max, out=out, casting=casting, **kwargs)
elif max is None:
return _clip_dep_invoke_with_casting(
um.maximum, a, min, out=out, casting=casting, **kwargs)
else:
return _clip_dep_invoke_with_casting(
um.clip, a, min, max, out=out, casting=casting, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
is_float16_result = False
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None:
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
elif issubclass(arr.dtype.type, nt.float16):
dtype = mu.dtype('f4')
is_float16_result = True
ret = umr_sum(arr, axis, dtype, out, keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
stacklevel=2)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
# Fast-paths for built-in complex types
elif x.dtype in _complex_to_float:
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
um.multiply(xv, xv, out=xv)
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
# Most general case; includes handling object arrays containing imaginary
# numbers and complex types with non-native byteorder
else:
x = um.multiply(x, um.conjugate(x), out=x).real
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
def _ptp(a, axis=None, out=None, keepdims=False):
return um.subtract(
umr_maximum(a, axis, None, out, keepdims),
umr_minimum(a, axis, None, None, keepdims),
out
)
def _dump(self, file, protocol=2):
if hasattr(file, 'write'):
ctx = contextlib_nullcontext(file)
else:
ctx = open(os_fspath(file), "wb")
with ctx as f:
pickle.dump(self, f, protocol=protocol)
def _dumps(self, protocol=2):
return pickle.dumps(self, protocol=protocol)
``` |
{
"source": "Jimver/prettypercentiles",
"score": 3
} |
#### File: Jimver/prettypercentiles/customfunctions.py
```python
def convert_nanos_to_millis(column):
return [i/1000000 for i in column]
def avg(sequence):
total = sum(sequence)
return total/len(sequence)
``` |
{
"source": "Jimver/TUD-DistributedSystems",
"score": 3
} |
#### File: Jade/experiments/common.py
```python
import requests
DEFAULT_PORT = 5000
def ping_all_nodes(ip_addresses, route='/'):
node_availabilities = []
for ip in ip_addresses:
is_up = ping_url('http://' + ip + ':' + str(DEFAULT_PORT) + route)
node_availabilities.append(is_up)
return node_availabilities
def ping_url(url):
try:
request = requests.get(url, verify=False, timeout=1)
if request.status_code == 200:
return request.elapsed.microseconds / 1000
else:
print(request)
return -1
except Exception as e:
print(e)
return -1
``` |
{
"source": "Jimvin/nipyapi",
"score": 2
} |
#### File: nifi/models/connection_diagnostics_snapshot_dto.py
```python
from pprint import pformat
from six import iteritems
import re
class ConnectionDiagnosticsSnapshotDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_flow_file_count': 'int',
'total_byte_count': 'int',
'node_identifier': 'str',
'local_queue_partition': 'LocalQueuePartitionDTO',
'remote_queue_partitions': 'list[RemoteQueuePartitionDTO]'
}
attribute_map = {
'total_flow_file_count': 'totalFlowFileCount',
'total_byte_count': 'totalByteCount',
'node_identifier': 'nodeIdentifier',
'local_queue_partition': 'localQueuePartition',
'remote_queue_partitions': 'remoteQueuePartitions'
}
def __init__(self, total_flow_file_count=None, total_byte_count=None, node_identifier=None, local_queue_partition=None, remote_queue_partitions=None):
"""
ConnectionDiagnosticsSnapshotDTO - a model defined in Swagger
"""
self._total_flow_file_count = None
self._total_byte_count = None
self._node_identifier = None
self._local_queue_partition = None
self._remote_queue_partitions = None
if total_flow_file_count is not None:
self.total_flow_file_count = total_flow_file_count
if total_byte_count is not None:
self.total_byte_count = total_byte_count
if node_identifier is not None:
self.node_identifier = node_identifier
if local_queue_partition is not None:
self.local_queue_partition = local_queue_partition
if remote_queue_partitions is not None:
self.remote_queue_partitions = remote_queue_partitions
@property
def total_flow_file_count(self):
"""
Gets the total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of FlowFiles owned by the Connection
:return: The total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_flow_file_count
@total_flow_file_count.setter
def total_flow_file_count(self, total_flow_file_count):
"""
Sets the total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of FlowFiles owned by the Connection
:param total_flow_file_count: The total_flow_file_count of this ConnectionDiagnosticsSnapshotDTO.
:type: int
"""
self._total_flow_file_count = total_flow_file_count
@property
def total_byte_count(self):
"""
Gets the total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of bytes that make up the content for the FlowFiles owned by this Connection
:return: The total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
:rtype: int
"""
return self._total_byte_count
@total_byte_count.setter
def total_byte_count(self, total_byte_count):
"""
Sets the total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
Total number of bytes that make up the content for the FlowFiles owned by this Connection
:param total_byte_count: The total_byte_count of this ConnectionDiagnosticsSnapshotDTO.
:type: int
"""
self._total_byte_count = total_byte_count
@property
def node_identifier(self):
"""
Gets the node_identifier of this ConnectionDiagnosticsSnapshotDTO.
The Node Identifier that this information pertains to
:return: The node_identifier of this ConnectionDiagnosticsSnapshotDTO.
:rtype: str
"""
return self._node_identifier
@node_identifier.setter
def node_identifier(self, node_identifier):
"""
Sets the node_identifier of this ConnectionDiagnosticsSnapshotDTO.
The Node Identifier that this information pertains to
:param node_identifier: The node_identifier of this ConnectionDiagnosticsSnapshotDTO.
:type: str
"""
self._node_identifier = node_identifier
@property
def local_queue_partition(self):
"""
Gets the local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
The local queue partition, from which components can pull FlowFiles on this node.
:return: The local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
:rtype: LocalQueuePartitionDTO
"""
return self._local_queue_partition
@local_queue_partition.setter
def local_queue_partition(self, local_queue_partition):
"""
Sets the local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
The local queue partition, from which components can pull FlowFiles on this node.
:param local_queue_partition: The local_queue_partition of this ConnectionDiagnosticsSnapshotDTO.
:type: LocalQueuePartitionDTO
"""
self._local_queue_partition = local_queue_partition
@property
def remote_queue_partitions(self):
"""
Gets the remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:return: The remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:rtype: list[RemoteQueuePartitionDTO]
"""
return self._remote_queue_partitions
@remote_queue_partitions.setter
def remote_queue_partitions(self, remote_queue_partitions):
"""
Sets the remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:param remote_queue_partitions: The remote_queue_partitions of this ConnectionDiagnosticsSnapshotDTO.
:type: list[RemoteQueuePartitionDTO]
"""
self._remote_queue_partitions = remote_queue_partitions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ConnectionDiagnosticsSnapshotDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: nifi/models/drop_request_dto.py
```python
from pprint import pformat
from six import iteritems
import re
class DropRequestDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uri': 'str',
'submission_time': 'str',
'last_updated': 'str',
'percent_completed': 'int',
'finished': 'bool',
'failure_reason': 'str',
'current_count': 'int',
'current_size': 'int',
'current': 'str',
'original_count': 'int',
'original_size': 'int',
'original': 'str',
'dropped_count': 'int',
'dropped_size': 'int',
'dropped': 'str',
'state': 'str'
}
attribute_map = {
'id': 'id',
'uri': 'uri',
'submission_time': 'submissionTime',
'last_updated': 'lastUpdated',
'percent_completed': 'percentCompleted',
'finished': 'finished',
'failure_reason': 'failureReason',
'current_count': 'currentCount',
'current_size': 'currentSize',
'current': 'current',
'original_count': 'originalCount',
'original_size': 'originalSize',
'original': 'original',
'dropped_count': 'droppedCount',
'dropped_size': 'droppedSize',
'dropped': 'dropped',
'state': 'state'
}
def __init__(self, id=None, uri=None, submission_time=None, last_updated=None, percent_completed=None, finished=None, failure_reason=None, current_count=None, current_size=None, current=None, original_count=None, original_size=None, original=None, dropped_count=None, dropped_size=None, dropped=None, state=None):
"""
DropRequestDTO - a model defined in Swagger
"""
self._id = None
self._uri = None
self._submission_time = None
self._last_updated = None
self._percent_completed = None
self._finished = None
self._failure_reason = None
self._current_count = None
self._current_size = None
self._current = None
self._original_count = None
self._original_size = None
self._original = None
self._dropped_count = None
self._dropped_size = None
self._dropped = None
self._state = None
if id is not None:
self.id = id
if uri is not None:
self.uri = uri
if submission_time is not None:
self.submission_time = submission_time
if last_updated is not None:
self.last_updated = last_updated
if percent_completed is not None:
self.percent_completed = percent_completed
if finished is not None:
self.finished = finished
if failure_reason is not None:
self.failure_reason = failure_reason
if current_count is not None:
self.current_count = current_count
if current_size is not None:
self.current_size = current_size
if current is not None:
self.current = current
if original_count is not None:
self.original_count = original_count
if original_size is not None:
self.original_size = original_size
if original is not None:
self.original = original
if dropped_count is not None:
self.dropped_count = dropped_count
if dropped_size is not None:
self.dropped_size = dropped_size
if dropped is not None:
self.dropped = dropped
if state is not None:
self.state = state
@property
def id(self):
"""
Gets the id of this DropRequestDTO.
The id for this drop request.
:return: The id of this DropRequestDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DropRequestDTO.
The id for this drop request.
:param id: The id of this DropRequestDTO.
:type: str
"""
self._id = id
@property
def uri(self):
"""
Gets the uri of this DropRequestDTO.
The URI for future requests to this drop request.
:return: The uri of this DropRequestDTO.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this DropRequestDTO.
The URI for future requests to this drop request.
:param uri: The uri of this DropRequestDTO.
:type: str
"""
self._uri = uri
@property
def submission_time(self):
"""
Gets the submission_time of this DropRequestDTO.
The timestamp when the query was submitted.
:return: The submission_time of this DropRequestDTO.
:rtype: str
"""
return self._submission_time
@submission_time.setter
def submission_time(self, submission_time):
"""
Sets the submission_time of this DropRequestDTO.
The timestamp when the query was submitted.
:param submission_time: The submission_time of this DropRequestDTO.
:type: str
"""
self._submission_time = submission_time
@property
def last_updated(self):
"""
Gets the last_updated of this DropRequestDTO.
The last time this drop request was updated.
:return: The last_updated of this DropRequestDTO.
:rtype: str
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""
Sets the last_updated of this DropRequestDTO.
The last time this drop request was updated.
:param last_updated: The last_updated of this DropRequestDTO.
:type: str
"""
self._last_updated = last_updated
@property
def percent_completed(self):
"""
Gets the percent_completed of this DropRequestDTO.
The current percent complete.
:return: The percent_completed of this DropRequestDTO.
:rtype: int
"""
return self._percent_completed
@percent_completed.setter
def percent_completed(self, percent_completed):
"""
Sets the percent_completed of this DropRequestDTO.
The current percent complete.
:param percent_completed: The percent_completed of this DropRequestDTO.
:type: int
"""
self._percent_completed = percent_completed
@property
def finished(self):
"""
Gets the finished of this DropRequestDTO.
Whether the query has finished.
:return: The finished of this DropRequestDTO.
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""
Sets the finished of this DropRequestDTO.
Whether the query has finished.
:param finished: The finished of this DropRequestDTO.
:type: bool
"""
self._finished = finished
@property
def failure_reason(self):
"""
Gets the failure_reason of this DropRequestDTO.
The reason, if any, that this drop request failed.
:return: The failure_reason of this DropRequestDTO.
:rtype: str
"""
return self._failure_reason
@failure_reason.setter
def failure_reason(self, failure_reason):
"""
Sets the failure_reason of this DropRequestDTO.
The reason, if any, that this drop request failed.
:param failure_reason: The failure_reason of this DropRequestDTO.
:type: str
"""
self._failure_reason = failure_reason
@property
def current_count(self):
"""
Gets the current_count of this DropRequestDTO.
The number of flow files currently queued.
:return: The current_count of this DropRequestDTO.
:rtype: int
"""
return self._current_count
@current_count.setter
def current_count(self, current_count):
"""
Sets the current_count of this DropRequestDTO.
The number of flow files currently queued.
:param current_count: The current_count of this DropRequestDTO.
:type: int
"""
self._current_count = current_count
@property
def current_size(self):
"""
Gets the current_size of this DropRequestDTO.
The size of flow files currently queued in bytes.
:return: The current_size of this DropRequestDTO.
:rtype: int
"""
return self._current_size
@current_size.setter
def current_size(self, current_size):
"""
Sets the current_size of this DropRequestDTO.
The size of flow files currently queued in bytes.
:param current_size: The current_size of this DropRequestDTO.
:type: int
"""
self._current_size = current_size
@property
def current(self):
"""
Gets the current of this DropRequestDTO.
The count and size of flow files currently queued.
:return: The current of this DropRequestDTO.
:rtype: str
"""
return self._current
@current.setter
def current(self, current):
"""
Sets the current of this DropRequestDTO.
The count and size of flow files currently queued.
:param current: The current of this DropRequestDTO.
:type: str
"""
self._current = current
@property
def original_count(self):
"""
Gets the original_count of this DropRequestDTO.
The number of flow files to be dropped as a result of this request.
:return: The original_count of this DropRequestDTO.
:rtype: int
"""
return self._original_count
@original_count.setter
def original_count(self, original_count):
"""
Sets the original_count of this DropRequestDTO.
The number of flow files to be dropped as a result of this request.
:param original_count: The original_count of this DropRequestDTO.
:type: int
"""
self._original_count = original_count
@property
def original_size(self):
"""
Gets the original_size of this DropRequestDTO.
The size of flow files to be dropped as a result of this request in bytes.
:return: The original_size of this DropRequestDTO.
:rtype: int
"""
return self._original_size
@original_size.setter
def original_size(self, original_size):
"""
Sets the original_size of this DropRequestDTO.
The size of flow files to be dropped as a result of this request in bytes.
:param original_size: The original_size of this DropRequestDTO.
:type: int
"""
self._original_size = original_size
@property
def original(self):
"""
Gets the original of this DropRequestDTO.
The count and size of flow files to be dropped as a result of this request.
:return: The original of this DropRequestDTO.
:rtype: str
"""
return self._original
@original.setter
def original(self, original):
"""
Sets the original of this DropRequestDTO.
The count and size of flow files to be dropped as a result of this request.
:param original: The original of this DropRequestDTO.
:type: str
"""
self._original = original
@property
def dropped_count(self):
"""
Gets the dropped_count of this DropRequestDTO.
The number of flow files that have been dropped thus far.
:return: The dropped_count of this DropRequestDTO.
:rtype: int
"""
return self._dropped_count
@dropped_count.setter
def dropped_count(self, dropped_count):
"""
Sets the dropped_count of this DropRequestDTO.
The number of flow files that have been dropped thus far.
:param dropped_count: The dropped_count of this DropRequestDTO.
:type: int
"""
self._dropped_count = dropped_count
@property
def dropped_size(self):
"""
Gets the dropped_size of this DropRequestDTO.
The size of flow files that have been dropped thus far in bytes.
:return: The dropped_size of this DropRequestDTO.
:rtype: int
"""
return self._dropped_size
@dropped_size.setter
def dropped_size(self, dropped_size):
"""
Sets the dropped_size of this DropRequestDTO.
The size of flow files that have been dropped thus far in bytes.
:param dropped_size: The dropped_size of this DropRequestDTO.
:type: int
"""
self._dropped_size = dropped_size
@property
def dropped(self):
"""
Gets the dropped of this DropRequestDTO.
The count and size of flow files that have been dropped thus far.
:return: The dropped of this DropRequestDTO.
:rtype: str
"""
return self._dropped
@dropped.setter
def dropped(self, dropped):
"""
Sets the dropped of this DropRequestDTO.
The count and size of flow files that have been dropped thus far.
:param dropped: The dropped of this DropRequestDTO.
:type: str
"""
self._dropped = dropped
@property
def state(self):
"""
Gets the state of this DropRequestDTO.
The current state of the drop request.
:return: The state of this DropRequestDTO.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this DropRequestDTO.
The current state of the drop request.
:param state: The state of this DropRequestDTO.
:type: str
"""
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DropRequestDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: nifi/models/repository_usage_dto.py
```python
from pprint import pformat
from six import iteritems
import re
class RepositoryUsageDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'file_store_hash': 'str',
'free_space': 'str',
'total_space': 'str',
'free_space_bytes': 'int',
'total_space_bytes': 'int',
'utilization': 'str'
}
attribute_map = {
'name': 'name',
'file_store_hash': 'fileStoreHash',
'free_space': 'freeSpace',
'total_space': 'totalSpace',
'free_space_bytes': 'freeSpaceBytes',
'total_space_bytes': 'totalSpaceBytes',
'utilization': 'utilization'
}
def __init__(self, name=None, file_store_hash=None, free_space=None, total_space=None, free_space_bytes=None, total_space_bytes=None, utilization=None):
"""
RepositoryUsageDTO - a model defined in Swagger
"""
self._name = None
self._file_store_hash = None
self._free_space = None
self._total_space = None
self._free_space_bytes = None
self._total_space_bytes = None
self._utilization = None
if name is not None:
self.name = name
if file_store_hash is not None:
self.file_store_hash = file_store_hash
if free_space is not None:
self.free_space = free_space
if total_space is not None:
self.total_space = total_space
if free_space_bytes is not None:
self.free_space_bytes = free_space_bytes
if total_space_bytes is not None:
self.total_space_bytes = total_space_bytes
if utilization is not None:
self.utilization = utilization
@property
def name(self):
"""
Gets the name of this RepositoryUsageDTO.
The name of the repository
:return: The name of this RepositoryUsageDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this RepositoryUsageDTO.
The name of the repository
:param name: The name of this RepositoryUsageDTO.
:type: str
"""
self._name = name
@property
def file_store_hash(self):
"""
Gets the file_store_hash of this RepositoryUsageDTO.
A SHA-256 hash of the File Store name/path that is used to store the repository's data. This information is exposed as a hash in order to avoid exposing potentially sensitive information that is not generally relevant. What is typically relevant is whether or not multiple repositories on the same node are using the same File Store, as this indicates that the repositories are competing for the resources of the backing disk/storage mechanism.
:return: The file_store_hash of this RepositoryUsageDTO.
:rtype: str
"""
return self._file_store_hash
@file_store_hash.setter
def file_store_hash(self, file_store_hash):
"""
Sets the file_store_hash of this RepositoryUsageDTO.
A SHA-256 hash of the File Store name/path that is used to store the repository's data. This information is exposed as a hash in order to avoid exposing potentially sensitive information that is not generally relevant. What is typically relevant is whether or not multiple repositories on the same node are using the same File Store, as this indicates that the repositories are competing for the resources of the backing disk/storage mechanism.
:param file_store_hash: The file_store_hash of this RepositoryUsageDTO.
:type: str
"""
self._file_store_hash = file_store_hash
@property
def free_space(self):
"""
Gets the free_space of this RepositoryUsageDTO.
Amount of free space.
:return: The free_space of this RepositoryUsageDTO.
:rtype: str
"""
return self._free_space
@free_space.setter
def free_space(self, free_space):
"""
Sets the free_space of this RepositoryUsageDTO.
Amount of free space.
:param free_space: The free_space of this RepositoryUsageDTO.
:type: str
"""
self._free_space = free_space
@property
def total_space(self):
"""
Gets the total_space of this RepositoryUsageDTO.
Amount of total space.
:return: The total_space of this RepositoryUsageDTO.
:rtype: str
"""
return self._total_space
@total_space.setter
def total_space(self, total_space):
"""
Sets the total_space of this RepositoryUsageDTO.
Amount of total space.
:param total_space: The total_space of this RepositoryUsageDTO.
:type: str
"""
self._total_space = total_space
@property
def free_space_bytes(self):
"""
Gets the free_space_bytes of this RepositoryUsageDTO.
The number of bytes of free space.
:return: The free_space_bytes of this RepositoryUsageDTO.
:rtype: int
"""
return self._free_space_bytes
@free_space_bytes.setter
def free_space_bytes(self, free_space_bytes):
"""
Sets the free_space_bytes of this RepositoryUsageDTO.
The number of bytes of free space.
:param free_space_bytes: The free_space_bytes of this RepositoryUsageDTO.
:type: int
"""
self._free_space_bytes = free_space_bytes
@property
def total_space_bytes(self):
"""
Gets the total_space_bytes of this RepositoryUsageDTO.
The number of bytes of total space.
:return: The total_space_bytes of this RepositoryUsageDTO.
:rtype: int
"""
return self._total_space_bytes
@total_space_bytes.setter
def total_space_bytes(self, total_space_bytes):
"""
Sets the total_space_bytes of this RepositoryUsageDTO.
The number of bytes of total space.
:param total_space_bytes: The total_space_bytes of this RepositoryUsageDTO.
:type: int
"""
self._total_space_bytes = total_space_bytes
@property
def utilization(self):
"""
Gets the utilization of this RepositoryUsageDTO.
Utilization of this storage location.
:return: The utilization of this RepositoryUsageDTO.
:rtype: str
"""
return self._utilization
@utilization.setter
def utilization(self, utilization):
"""
Sets the utilization of this RepositoryUsageDTO.
Utilization of this storage location.
:param utilization: The utilization of this RepositoryUsageDTO.
:type: str
"""
self._utilization = utilization
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RepositoryUsageDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: nifi/models/thread_dump_dto.py
```python
from pprint import pformat
from six import iteritems
import re
class ThreadDumpDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'node_id': 'str',
'node_address': 'str',
'api_port': 'int',
'stack_trace': 'str',
'thread_name': 'str',
'thread_active_millis': 'int',
'task_terminated': 'bool'
}
attribute_map = {
'node_id': 'nodeId',
'node_address': 'nodeAddress',
'api_port': 'apiPort',
'stack_trace': 'stackTrace',
'thread_name': 'threadName',
'thread_active_millis': 'threadActiveMillis',
'task_terminated': 'taskTerminated'
}
def __init__(self, node_id=None, node_address=None, api_port=None, stack_trace=None, thread_name=None, thread_active_millis=None, task_terminated=None):
"""
ThreadDumpDTO - a model defined in Swagger
"""
self._node_id = None
self._node_address = None
self._api_port = None
self._stack_trace = None
self._thread_name = None
self._thread_active_millis = None
self._task_terminated = None
if node_id is not None:
self.node_id = node_id
if node_address is not None:
self.node_address = node_address
if api_port is not None:
self.api_port = api_port
if stack_trace is not None:
self.stack_trace = stack_trace
if thread_name is not None:
self.thread_name = thread_name
if thread_active_millis is not None:
self.thread_active_millis = thread_active_millis
if task_terminated is not None:
self.task_terminated = task_terminated
@property
def node_id(self):
"""
Gets the node_id of this ThreadDumpDTO.
The ID of the node in the cluster
:return: The node_id of this ThreadDumpDTO.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""
Sets the node_id of this ThreadDumpDTO.
The ID of the node in the cluster
:param node_id: The node_id of this ThreadDumpDTO.
:type: str
"""
self._node_id = node_id
@property
def node_address(self):
"""
Gets the node_address of this ThreadDumpDTO.
The address of the node in the cluster
:return: The node_address of this ThreadDumpDTO.
:rtype: str
"""
return self._node_address
@node_address.setter
def node_address(self, node_address):
"""
Sets the node_address of this ThreadDumpDTO.
The address of the node in the cluster
:param node_address: The node_address of this ThreadDumpDTO.
:type: str
"""
self._node_address = node_address
@property
def api_port(self):
"""
Gets the api_port of this ThreadDumpDTO.
The port the node is listening for API requests.
:return: The api_port of this ThreadDumpDTO.
:rtype: int
"""
return self._api_port
@api_port.setter
def api_port(self, api_port):
"""
Sets the api_port of this ThreadDumpDTO.
The port the node is listening for API requests.
:param api_port: The api_port of this ThreadDumpDTO.
:type: int
"""
self._api_port = api_port
@property
def stack_trace(self):
"""
Gets the stack_trace of this ThreadDumpDTO.
The stack trace for the thread
:return: The stack_trace of this ThreadDumpDTO.
:rtype: str
"""
return self._stack_trace
@stack_trace.setter
def stack_trace(self, stack_trace):
"""
Sets the stack_trace of this ThreadDumpDTO.
The stack trace for the thread
:param stack_trace: The stack_trace of this ThreadDumpDTO.
:type: str
"""
self._stack_trace = stack_trace
@property
def thread_name(self):
"""
Gets the thread_name of this ThreadDumpDTO.
The name of the thread
:return: The thread_name of this ThreadDumpDTO.
:rtype: str
"""
return self._thread_name
@thread_name.setter
def thread_name(self, thread_name):
"""
Sets the thread_name of this ThreadDumpDTO.
The name of the thread
:param thread_name: The thread_name of this ThreadDumpDTO.
:type: str
"""
self._thread_name = thread_name
@property
def thread_active_millis(self):
"""
Gets the thread_active_millis of this ThreadDumpDTO.
The number of milliseconds that the thread has been executing in the Processor
:return: The thread_active_millis of this ThreadDumpDTO.
:rtype: int
"""
return self._thread_active_millis
@thread_active_millis.setter
def thread_active_millis(self, thread_active_millis):
"""
Sets the thread_active_millis of this ThreadDumpDTO.
The number of milliseconds that the thread has been executing in the Processor
:param thread_active_millis: The thread_active_millis of this ThreadDumpDTO.
:type: int
"""
self._thread_active_millis = thread_active_millis
@property
def task_terminated(self):
"""
Gets the task_terminated of this ThreadDumpDTO.
Indicates whether or not the user has requested that the task be terminated. If this is true, it may indicate that the thread is in a state where it will continue running indefinitely without returning.
:return: The task_terminated of this ThreadDumpDTO.
:rtype: bool
"""
return self._task_terminated
@task_terminated.setter
def task_terminated(self, task_terminated):
"""
Sets the task_terminated of this ThreadDumpDTO.
Indicates whether or not the user has requested that the task be terminated. If this is true, it may indicate that the thread is in a state where it will continue running indefinitely without returning.
:param task_terminated: The task_terminated of this ThreadDumpDTO.
:type: bool
"""
self._task_terminated = task_terminated
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ThreadDumpDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: registry/models/build_info.py
```python
from pprint import pformat
from six import iteritems
import re
class BuildInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'build_tool': 'str',
'build_flags': 'str',
'build_branch': 'str',
'build_tag': 'str',
'build_revision': 'str',
'built': 'int',
'built_by': 'str'
}
attribute_map = {
'build_tool': 'buildTool',
'build_flags': 'buildFlags',
'build_branch': 'buildBranch',
'build_tag': 'buildTag',
'build_revision': 'buildRevision',
'built': 'built',
'built_by': 'builtBy'
}
def __init__(self, build_tool=None, build_flags=None, build_branch=None, build_tag=None, build_revision=None, built=None, built_by=None):
"""
BuildInfo - a model defined in Swagger
"""
self._build_tool = None
self._build_flags = None
self._build_branch = None
self._build_tag = None
self._build_revision = None
self._built = None
self._built_by = None
if build_tool is not None:
self.build_tool = build_tool
if build_flags is not None:
self.build_flags = build_flags
if build_branch is not None:
self.build_branch = build_branch
if build_tag is not None:
self.build_tag = build_tag
if build_revision is not None:
self.build_revision = build_revision
if built is not None:
self.built = built
if built_by is not None:
self.built_by = built_by
@property
def build_tool(self):
"""
Gets the build_tool of this BuildInfo.
The tool used to build the version of the bundle
:return: The build_tool of this BuildInfo.
:rtype: str
"""
return self._build_tool
@build_tool.setter
def build_tool(self, build_tool):
"""
Sets the build_tool of this BuildInfo.
The tool used to build the version of the bundle
:param build_tool: The build_tool of this BuildInfo.
:type: str
"""
self._build_tool = build_tool
@property
def build_flags(self):
"""
Gets the build_flags of this BuildInfo.
The flags used to build the version of the bundle
:return: The build_flags of this BuildInfo.
:rtype: str
"""
return self._build_flags
@build_flags.setter
def build_flags(self, build_flags):
"""
Sets the build_flags of this BuildInfo.
The flags used to build the version of the bundle
:param build_flags: The build_flags of this BuildInfo.
:type: str
"""
self._build_flags = build_flags
@property
def build_branch(self):
"""
Gets the build_branch of this BuildInfo.
The branch used to build the version of the bundle
:return: The build_branch of this BuildInfo.
:rtype: str
"""
return self._build_branch
@build_branch.setter
def build_branch(self, build_branch):
"""
Sets the build_branch of this BuildInfo.
The branch used to build the version of the bundle
:param build_branch: The build_branch of this BuildInfo.
:type: str
"""
self._build_branch = build_branch
@property
def build_tag(self):
"""
Gets the build_tag of this BuildInfo.
The tag used to build the version of the bundle
:return: The build_tag of this BuildInfo.
:rtype: str
"""
return self._build_tag
@build_tag.setter
def build_tag(self, build_tag):
"""
Sets the build_tag of this BuildInfo.
The tag used to build the version of the bundle
:param build_tag: The build_tag of this BuildInfo.
:type: str
"""
self._build_tag = build_tag
@property
def build_revision(self):
"""
Gets the build_revision of this BuildInfo.
The revision used to build the version of the bundle
:return: The build_revision of this BuildInfo.
:rtype: str
"""
return self._build_revision
@build_revision.setter
def build_revision(self, build_revision):
"""
Sets the build_revision of this BuildInfo.
The revision used to build the version of the bundle
:param build_revision: The build_revision of this BuildInfo.
:type: str
"""
self._build_revision = build_revision
@property
def built(self):
"""
Gets the built of this BuildInfo.
The timestamp the version of the bundle was built
:return: The built of this BuildInfo.
:rtype: int
"""
return self._built
@built.setter
def built(self, built):
"""
Sets the built of this BuildInfo.
The timestamp the version of the bundle was built
:param built: The built of this BuildInfo.
:type: int
"""
self._built = built
@property
def built_by(self):
"""
Gets the built_by of this BuildInfo.
The identity of the user that performed the build
:return: The built_by of this BuildInfo.
:rtype: str
"""
return self._built_by
@built_by.setter
def built_by(self, built_by):
"""
Sets the built_by of this BuildInfo.
The identity of the user that performed the build
:param built_by: The built_by of this BuildInfo.
:type: str
"""
self._built_by = built_by
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BuildInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: registry/models/bundle_version_metadata.py
```python
from pprint import pformat
from six import iteritems
import re
class BundleVersionMetadata(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'link': 'JaxbLink',
'id': 'str',
'bundle_id': 'str',
'bucket_id': 'str',
'group_id': 'str',
'artifact_id': 'str',
'version': 'str',
'timestamp': 'int',
'author': 'str',
'description': 'str',
'sha256': 'str',
'sha256_supplied': 'bool',
'content_size': 'int',
'system_api_version': 'str',
'build_info': 'BuildInfo'
}
attribute_map = {
'link': 'link',
'id': 'id',
'bundle_id': 'bundleId',
'bucket_id': 'bucketId',
'group_id': 'groupId',
'artifact_id': 'artifactId',
'version': 'version',
'timestamp': 'timestamp',
'author': 'author',
'description': 'description',
'sha256': 'sha256',
'sha256_supplied': 'sha256Supplied',
'content_size': 'contentSize',
'system_api_version': 'systemApiVersion',
'build_info': 'buildInfo'
}
def __init__(self, link=None, id=None, bundle_id=None, bucket_id=None, group_id=None, artifact_id=None, version=None, timestamp=None, author=None, description=None, sha256=None, sha256_supplied=None, content_size=None, system_api_version=None, build_info=None):
"""
BundleVersionMetadata - a model defined in Swagger
"""
self._link = None
self._id = None
self._bundle_id = None
self._bucket_id = None
self._group_id = None
self._artifact_id = None
self._version = None
self._timestamp = None
self._author = None
self._description = None
self._sha256 = None
self._sha256_supplied = None
self._content_size = None
self._system_api_version = None
self._build_info = None
if link is not None:
self.link = link
if id is not None:
self.id = id
if bundle_id is not None:
self.bundle_id = bundle_id
self.bucket_id = bucket_id
if group_id is not None:
self.group_id = group_id
if artifact_id is not None:
self.artifact_id = artifact_id
if version is not None:
self.version = version
if timestamp is not None:
self.timestamp = timestamp
if author is not None:
self.author = author
if description is not None:
self.description = description
if sha256 is not None:
self.sha256 = sha256
self.sha256_supplied = sha256_supplied
self.content_size = content_size
if system_api_version is not None:
self.system_api_version = system_api_version
self.build_info = build_info
@property
def link(self):
"""
Gets the link of this BundleVersionMetadata.
An WebLink to this entity.
:return: The link of this BundleVersionMetadata.
:rtype: JaxbLink
"""
return self._link
@link.setter
def link(self, link):
"""
Sets the link of this BundleVersionMetadata.
An WebLink to this entity.
:param link: The link of this BundleVersionMetadata.
:type: JaxbLink
"""
self._link = link
@property
def id(self):
"""
Gets the id of this BundleVersionMetadata.
The id of this version of the extension bundle
:return: The id of this BundleVersionMetadata.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BundleVersionMetadata.
The id of this version of the extension bundle
:param id: The id of this BundleVersionMetadata.
:type: str
"""
self._id = id
@property
def bundle_id(self):
"""
Gets the bundle_id of this BundleVersionMetadata.
The id of the extension bundle this version is for
:return: The bundle_id of this BundleVersionMetadata.
:rtype: str
"""
return self._bundle_id
@bundle_id.setter
def bundle_id(self, bundle_id):
"""
Sets the bundle_id of this BundleVersionMetadata.
The id of the extension bundle this version is for
:param bundle_id: The bundle_id of this BundleVersionMetadata.
:type: str
"""
self._bundle_id = bundle_id
@property
def bucket_id(self):
"""
Gets the bucket_id of this BundleVersionMetadata.
The id of the bucket the extension bundle belongs to
:return: The bucket_id of this BundleVersionMetadata.
:rtype: str
"""
return self._bucket_id
@bucket_id.setter
def bucket_id(self, bucket_id):
"""
Sets the bucket_id of this BundleVersionMetadata.
The id of the bucket the extension bundle belongs to
:param bucket_id: The bucket_id of this BundleVersionMetadata.
:type: str
"""
if bucket_id is None:
raise ValueError("Invalid value for `bucket_id`, must not be `None`")
self._bucket_id = bucket_id
@property
def group_id(self):
"""
Gets the group_id of this BundleVersionMetadata.
:return: The group_id of this BundleVersionMetadata.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""
Sets the group_id of this BundleVersionMetadata.
:param group_id: The group_id of this BundleVersionMetadata.
:type: str
"""
self._group_id = group_id
@property
def artifact_id(self):
"""
Gets the artifact_id of this BundleVersionMetadata.
:return: The artifact_id of this BundleVersionMetadata.
:rtype: str
"""
return self._artifact_id
@artifact_id.setter
def artifact_id(self, artifact_id):
"""
Sets the artifact_id of this BundleVersionMetadata.
:param artifact_id: The artifact_id of this BundleVersionMetadata.
:type: str
"""
self._artifact_id = artifact_id
@property
def version(self):
"""
Gets the version of this BundleVersionMetadata.
The version of the extension bundle
:return: The version of this BundleVersionMetadata.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this BundleVersionMetadata.
The version of the extension bundle
:param version: The version of this BundleVersionMetadata.
:type: str
"""
self._version = version
@property
def timestamp(self):
"""
Gets the timestamp of this BundleVersionMetadata.
The timestamp of the create date of this version
:return: The timestamp of this BundleVersionMetadata.
:rtype: int
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this BundleVersionMetadata.
The timestamp of the create date of this version
:param timestamp: The timestamp of this BundleVersionMetadata.
:type: int
"""
if timestamp is not None and timestamp < 1:
raise ValueError("Invalid value for `timestamp`, must be a value greater than or equal to `1`")
self._timestamp = timestamp
@property
def author(self):
"""
Gets the author of this BundleVersionMetadata.
The identity that created this version
:return: The author of this BundleVersionMetadata.
:rtype: str
"""
return self._author
@author.setter
def author(self, author):
"""
Sets the author of this BundleVersionMetadata.
The identity that created this version
:param author: The author of this BundleVersionMetadata.
:type: str
"""
self._author = author
@property
def description(self):
"""
Gets the description of this BundleVersionMetadata.
The description for this version
:return: The description of this BundleVersionMetadata.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this BundleVersionMetadata.
The description for this version
:param description: The description of this BundleVersionMetadata.
:type: str
"""
self._description = description
@property
def sha256(self):
"""
Gets the sha256 of this BundleVersionMetadata.
The hex representation of the SHA-256 digest of the binary content for this version
:return: The sha256 of this BundleVersionMetadata.
:rtype: str
"""
return self._sha256
@sha256.setter
def sha256(self, sha256):
"""
Sets the sha256 of this BundleVersionMetadata.
The hex representation of the SHA-256 digest of the binary content for this version
:param sha256: The sha256 of this BundleVersionMetadata.
:type: str
"""
self._sha256 = sha256
@property
def sha256_supplied(self):
"""
Gets the sha256_supplied of this BundleVersionMetadata.
Whether or not the client supplied a SHA-256 when uploading the bundle
:return: The sha256_supplied of this BundleVersionMetadata.
:rtype: bool
"""
return self._sha256_supplied
@sha256_supplied.setter
def sha256_supplied(self, sha256_supplied):
"""
Sets the sha256_supplied of this BundleVersionMetadata.
Whether or not the client supplied a SHA-256 when uploading the bundle
:param sha256_supplied: The sha256_supplied of this BundleVersionMetadata.
:type: bool
"""
if sha256_supplied is None:
raise ValueError("Invalid value for `sha256_supplied`, must not be `None`")
self._sha256_supplied = sha256_supplied
@property
def content_size(self):
"""
Gets the content_size of this BundleVersionMetadata.
The size of the binary content for this version in bytes
:return: The content_size of this BundleVersionMetadata.
:rtype: int
"""
return self._content_size
@content_size.setter
def content_size(self, content_size):
"""
Sets the content_size of this BundleVersionMetadata.
The size of the binary content for this version in bytes
:param content_size: The content_size of this BundleVersionMetadata.
:type: int
"""
if content_size is None:
raise ValueError("Invalid value for `content_size`, must not be `None`")
if content_size is not None and content_size < 0:
raise ValueError("Invalid value for `content_size`, must be a value greater than or equal to `0`")
self._content_size = content_size
@property
def system_api_version(self):
"""
Gets the system_api_version of this BundleVersionMetadata.
The version of the system API that this bundle version was built against
:return: The system_api_version of this BundleVersionMetadata.
:rtype: str
"""
return self._system_api_version
@system_api_version.setter
def system_api_version(self, system_api_version):
"""
Sets the system_api_version of this BundleVersionMetadata.
The version of the system API that this bundle version was built against
:param system_api_version: The system_api_version of this BundleVersionMetadata.
:type: str
"""
self._system_api_version = system_api_version
@property
def build_info(self):
"""
Gets the build_info of this BundleVersionMetadata.
The build information about this version
:return: The build_info of this BundleVersionMetadata.
:rtype: BuildInfo
"""
return self._build_info
@build_info.setter
def build_info(self, build_info):
"""
Sets the build_info of this BundleVersionMetadata.
The build information about this version
:param build_info: The build_info of this BundleVersionMetadata.
:type: BuildInfo
"""
if build_info is None:
raise ValueError("Invalid value for `build_info`, must not be `None`")
self._build_info = build_info
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BundleVersionMetadata):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: registry/models/registry_configuration.py
```python
from pprint import pformat
from six import iteritems
import re
class RegistryConfiguration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supports_managed_authorizer': 'bool',
'supports_configurable_authorizer': 'bool',
'supports_configurable_users_and_groups': 'bool'
}
attribute_map = {
'supports_managed_authorizer': 'supportsManagedAuthorizer',
'supports_configurable_authorizer': 'supportsConfigurableAuthorizer',
'supports_configurable_users_and_groups': 'supportsConfigurableUsersAndGroups'
}
def __init__(self, supports_managed_authorizer=None, supports_configurable_authorizer=None, supports_configurable_users_and_groups=None):
"""
RegistryConfiguration - a model defined in Swagger
"""
self._supports_managed_authorizer = None
self._supports_configurable_authorizer = None
self._supports_configurable_users_and_groups = None
if supports_managed_authorizer is not None:
self.supports_managed_authorizer = supports_managed_authorizer
if supports_configurable_authorizer is not None:
self.supports_configurable_authorizer = supports_configurable_authorizer
if supports_configurable_users_and_groups is not None:
self.supports_configurable_users_and_groups = supports_configurable_users_and_groups
@property
def supports_managed_authorizer(self):
"""
Gets the supports_managed_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a managed authorizer. Managed authorizers can visualize users, groups, and policies in the UI.
:return: The supports_managed_authorizer of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_managed_authorizer
@supports_managed_authorizer.setter
def supports_managed_authorizer(self, supports_managed_authorizer):
"""
Sets the supports_managed_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a managed authorizer. Managed authorizers can visualize users, groups, and policies in the UI.
:param supports_managed_authorizer: The supports_managed_authorizer of this RegistryConfiguration.
:type: bool
"""
self._supports_managed_authorizer = supports_managed_authorizer
@property
def supports_configurable_authorizer(self):
"""
Gets the supports_configurable_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a configurable authorizer.
:return: The supports_configurable_authorizer of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_configurable_authorizer
@supports_configurable_authorizer.setter
def supports_configurable_authorizer(self, supports_configurable_authorizer):
"""
Sets the supports_configurable_authorizer of this RegistryConfiguration.
Whether this NiFi Registry supports a configurable authorizer.
:param supports_configurable_authorizer: The supports_configurable_authorizer of this RegistryConfiguration.
:type: bool
"""
self._supports_configurable_authorizer = supports_configurable_authorizer
@property
def supports_configurable_users_and_groups(self):
"""
Gets the supports_configurable_users_and_groups of this RegistryConfiguration.
Whether this NiFi Registry supports configurable users and groups.
:return: The supports_configurable_users_and_groups of this RegistryConfiguration.
:rtype: bool
"""
return self._supports_configurable_users_and_groups
@supports_configurable_users_and_groups.setter
def supports_configurable_users_and_groups(self, supports_configurable_users_and_groups):
"""
Sets the supports_configurable_users_and_groups of this RegistryConfiguration.
Whether this NiFi Registry supports configurable users and groups.
:param supports_configurable_users_and_groups: The supports_configurable_users_and_groups of this RegistryConfiguration.
:type: bool
"""
self._supports_configurable_users_and_groups = supports_configurable_users_and_groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, RegistryConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
```
#### File: registry/models/versioned_flow_snapshot.py
```python
from pprint import pformat
from six import iteritems
import re
class VersionedFlowSnapshot(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'snapshot_metadata': 'VersionedFlowSnapshotMetadata',
'flow_contents': 'VersionedProcessGroup',
'external_controller_services': 'dict(str, ExternalControllerServiceReference)',
'parameter_contexts': 'dict(str, VersionedParameterContext)',
'flow_encoding_version': 'str',
'flow': 'VersionedFlow',
'bucket': 'Bucket',
'latest': 'bool'
}
attribute_map = {
'snapshot_metadata': 'snapshotMetadata',
'flow_contents': 'flowContents',
'external_controller_services': 'externalControllerServices',
'parameter_contexts': 'parameterContexts',
'flow_encoding_version': 'flowEncodingVersion',
'flow': 'flow',
'bucket': 'bucket',
'latest': 'latest'
}
def __init__(self, snapshot_metadata=None, flow_contents=None, external_controller_services=None, parameter_contexts=None, flow_encoding_version=None, flow=None, bucket=None, latest=None):
"""
VersionedFlowSnapshot - a model defined in Swagger
"""
self._snapshot_metadata = None
self._flow_contents = None
self._external_controller_services = None
self._parameter_contexts = None
self._flow_encoding_version = None
self._flow = None
self._bucket = None
self._latest = None
self.snapshot_metadata = snapshot_metadata
self.flow_contents = flow_contents
if external_controller_services is not None:
self.external_controller_services = external_controller_services
if parameter_contexts is not None:
self.parameter_contexts = parameter_contexts
if flow_encoding_version is not None:
self.flow_encoding_version = flow_encoding_version
if flow is not None:
self.flow = flow
if bucket is not None:
self.bucket = bucket
if latest is not None:
self.latest = latest
@property
def snapshot_metadata(self):
"""
Gets the snapshot_metadata of this VersionedFlowSnapshot.
The metadata for this snapshot
:return: The snapshot_metadata of this VersionedFlowSnapshot.
:rtype: VersionedFlowSnapshotMetadata
"""
return self._snapshot_metadata
@snapshot_metadata.setter
def snapshot_metadata(self, snapshot_metadata):
"""
Sets the snapshot_metadata of this VersionedFlowSnapshot.
The metadata for this snapshot
:param snapshot_metadata: The snapshot_metadata of this VersionedFlowSnapshot.
:type: VersionedFlowSnapshotMetadata
"""
if snapshot_metadata is None:
raise ValueError("Invalid value for `snapshot_metadata`, must not be `None`")
self._snapshot_metadata = snapshot_metadata
@property
def flow_contents(self):
"""
Gets the flow_contents of this VersionedFlowSnapshot.
The contents of the versioned flow
:return: The flow_contents of this VersionedFlowSnapshot.
:rtype: VersionedProcessGroup
"""
return self._flow_contents
@flow_contents.setter
def flow_contents(self, flow_contents):
"""
Sets the flow_contents of this VersionedFlowSnapshot.
The contents of the versioned flow
:param flow_contents: The flow_contents of this VersionedFlowSnapshot.
:type: VersionedProcessGroup
"""
if flow_contents is None:
raise ValueError("Invalid value for `flow_contents`, must not be `None`")
self._flow_contents = flow_contents
@property
def external_controller_services(self):
"""
Gets the external_controller_services of this VersionedFlowSnapshot.
The information about controller services that exist outside this versioned flow, but are referenced by components within the versioned flow.
:return: The external_controller_services of this VersionedFlowSnapshot.
:rtype: dict(str, ExternalControllerServiceReference)
"""
return self._external_controller_services
@external_controller_services.setter
def external_controller_services(self, external_controller_services):
"""
Sets the external_controller_services of this VersionedFlowSnapshot.
The information about controller services that exist outside this versioned flow, but are referenced by components within the versioned flow.
:param external_controller_services: The external_controller_services of this VersionedFlowSnapshot.
:type: dict(str, ExternalControllerServiceReference)
"""
self._external_controller_services = external_controller_services
@property
def parameter_contexts(self):
"""
Gets the parameter_contexts of this VersionedFlowSnapshot.
The parameter contexts referenced by process groups in the flow contents. The mapping is from the name of the context to the context instance, and it is expected that any context in this map is referenced by at least one process group in this flow.
:return: The parameter_contexts of this VersionedFlowSnapshot.
:rtype: dict(str, VersionedParameterContext)
"""
return self._parameter_contexts
@parameter_contexts.setter
def parameter_contexts(self, parameter_contexts):
"""
Sets the parameter_contexts of this VersionedFlowSnapshot.
The parameter contexts referenced by process groups in the flow contents. The mapping is from the name of the context to the context instance, and it is expected that any context in this map is referenced by at least one process group in this flow.
:param parameter_contexts: The parameter_contexts of this VersionedFlowSnapshot.
:type: dict(str, VersionedParameterContext)
"""
self._parameter_contexts = parameter_contexts
@property
def flow_encoding_version(self):
"""
Gets the flow_encoding_version of this VersionedFlowSnapshot.
The optional encoding version of the flow contents.
:return: The flow_encoding_version of this VersionedFlowSnapshot.
:rtype: str
"""
return self._flow_encoding_version
@flow_encoding_version.setter
def flow_encoding_version(self, flow_encoding_version):
"""
Sets the flow_encoding_version of this VersionedFlowSnapshot.
The optional encoding version of the flow contents.
:param flow_encoding_version: The flow_encoding_version of this VersionedFlowSnapshot.
:type: str
"""
self._flow_encoding_version = flow_encoding_version
@property
def flow(self):
"""
Gets the flow of this VersionedFlowSnapshot.
The flow this snapshot is for
:return: The flow of this VersionedFlowSnapshot.
:rtype: VersionedFlow
"""
return self._flow
@flow.setter
def flow(self, flow):
"""
Sets the flow of this VersionedFlowSnapshot.
The flow this snapshot is for
:param flow: The flow of this VersionedFlowSnapshot.
:type: VersionedFlow
"""
self._flow = flow
@property
def bucket(self):
"""
Gets the bucket of this VersionedFlowSnapshot.
The bucket where the flow is located
:return: The bucket of this VersionedFlowSnapshot.
:rtype: Bucket
"""
return self._bucket
@bucket.setter
def bucket(self, bucket):
"""
Sets the bucket of this VersionedFlowSnapshot.
The bucket where the flow is located
:param bucket: The bucket of this VersionedFlowSnapshot.
:type: Bucket
"""
self._bucket = bucket
@property
def latest(self):
"""
Gets the latest of this VersionedFlowSnapshot.
:return: The latest of this VersionedFlowSnapshot.
:rtype: bool
"""
return self._latest
@latest.setter
def latest(self, latest):
"""
Sets the latest of this VersionedFlowSnapshot.
:param latest: The latest of this VersionedFlowSnapshot.
:type: bool
"""
self._latest = latest
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VersionedFlowSnapshot):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
``` |
{
"source": "Jimvin/UsefulHadoopScripts",
"score": 3
} |
#### File: Jimvin/UsefulHadoopScripts/getHiveCounters.py
```python
import sys
import requests
import json
import kerberos
"""
Queries the YARN Job History Server (JHS) for a given jobId and returns the Hive counters
Works with unauthenticated and SPNEGO (Kerberos) authenticated API endpoints
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__license__ = "Apache License Version 2.0"
# Global configuration
hostname="db-secure.local" # Hostname of job history server
port="19888" # Job history server port number
# getNegotiateString
# Returns the Negotiate header payload for SPNEGO authentication
def getNegotiateString(service, hostname):
negotiate = None
__, krb_context = kerberos.authGSSClientInit("%s@%s" % (service, hostname))
kerberos.authGSSClientStep(krb_context, "")
return kerberos.authGSSClientResponse(krb_context)
# getHttpResponse
# Attempts an unauthenticated call to url, then attempts SPNEGO auth if required
# This does not attempt a Kerberos login, you need to kinit before running this script
def getHttpResponse(url):
response = requests.get(url)
# Check to see if the API endpoint requires Kerberos (SPNEGO) authentication
if (response.status_code == 401 and response.headers["www-authenticate"].startswith("Negotiate")):
# SPNEGO authentication required, let's get a HTTP ticket for the API
negotiateString = getNegotiateString("HTTP", hostname)
if (negotiateString == None):
sys.stderr.write("Error: Unable to get Kerberos authentication header. Did you kinit?")
# Build a new HTTP response using SPNEGO
headers = {"Authorization": "Negotiate " + negotiateString}
response = requests.get(url, headers=headers)
return response
# getHiveCounters
# Extracts the Hive counters from the JSON received from the job history server
def getHiveCounters(jData):
hiveCounters = None
for counterGroup in jData['jobCounters']['counterGroup']:
if counterGroup['counterGroupName'] == "HIVE":
hiveCounters = counterGroup['counter']
return hiveCounters
def main():
if len(sys.argv) != 2:
sys.stderr.write("Usage: get_counters.py <job_id>")
exit(1)
# The script takes one argument, a YARN jobId for a Hive job
jobIds=sys.argv[1]
allMetrics = {}
allMetrics['hiveJobCounters'] = []
for jobId in jobIds.split(","):
url = 'http://%s:%s/ws/v1/history/mapreduce/jobs/%s/counters' % (hostname, port, jobId)
response = getHttpResponse(url)
# We should either have a non-secure or a SPNEGO response object at this point
if (response.status_code != 200):
# A 404 response indicates the jobId was not found on the server
if (response.status_code == 404):
sys.stderr.write("Error: jobId %s not found on job history server" % (jobId))
else:
sys.stderr.write("HTTP %d: Unable to get counters" % (response.status_code))
exit(1)
jData = json.loads(response.content)
hiveCounters = getHiveCounters(jData)
if (hiveCounters == None):
sys.stderr.write("No Hive counters in job output, was %s really a Hive job?" % jobId)
exit(2)
metrics = {}
counters = {}
metrics['jobId'] = jobId
for counter in hiveCounters:
counters[counter['name']] = counter['totalCounterValue']
metrics['jobId'] = jobId
metrics['counters'] = counters
allMetrics['hiveJobCounters'].append(metrics)
result = "metrics=" + str(allMetrics) + "\n"
# We can log the result to a file
f = open('/tmp/output.txt', 'a')
f.write(result)
f.close()
# Alternatively we can write the result to stdout
sys.stdout.write(result)
if __name__ == '__main__': main()
``` |
{
"source": "Jimvy/pytorch_resnet_cifar10",
"score": 4
} |
#### File: pytorch_resnet_cifar10/utils/statistics_meter.py
```python
r"""
Classes to handle statistics about values.
"""
from math import sqrt
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class AverageStddevMeter(AverageMeter):
def __init__(self):
super().__init__()
self.sum_squared = 0
self.stddev = 0
def reset(self):
super().reset()
self.sum_squared = 0
self.stddev = 0
def update(self, val, n=1):
super().update(val, n)
self.sum_squared += n * val**2
self.stddev = sqrt((self.sum_squared / self.count) - self.avg**2)
``` |
{
"source": "jimw567/proc-mem-monitor",
"score": 2
} |
#### File: proc-mem-monitor/proc_mem_monitor/proc_mem_gui.py
```python
import tkinter as tk
from tkinter import ttk, Toplevel, scrolledtext, messagebox
from tksheet import Sheet
import datetime
from proc_mem_monitor.proc_mem_handler import get_mem_usages
from proc_mem_monitor.proc_mem_plot import ProcMemPlot
from proc_mem_monitor import VERSION, LABEL_WIDTH, COMBO_WIDTH, STATUS_CODES, \
DEFAULT_REFRESH_INTERVAL, __icon__, \
SHEET_PID_COL, SHEET_CPU_COL, SHEET_RSS_COL, SHEET_CMD_COL, \
SHEET_LAST_UPDATED_COL
def show_plot_window():
global proc_mem_plot
proc_mem_plot.show_plot_window(root_window)
# refresh database every DEFAULT_REFRESH_INTERVAL seconds
def refresh_database():
global root_window
pattern = combo_pattern.get()
mem_usages = get_mem_usages(pattern)
if mem_usages is not None:
update_sheet_proc(mem_usages)
proc_mem_plot.update_history(mem_usages)
proc_mem_plot.plot_metrics()
# add refresh_database back to the eventloop
root_window.after(DEFAULT_REFRESH_INTERVAL*1000, refresh_database)
def start_gui(hostname, total_mem, patterns):
global combo_pattern, root_window, sheet_proc, sheet_proc_last_row, proc_mem_plot
global root_window
###############################################################################
# root window
###############################################################################
root_window = tk.Tk()
root_window.geometry('1200x400+20+20')
root_window.title('Process Memory Monitor ' + VERSION + ' - ' + hostname)
root_window_icon = tk.PhotoImage(file=str(__icon__))
root_window.iconphoto(True, root_window_icon)
root_window.grid_columnconfigure(0, weight=0)
root_window.grid_columnconfigure(1, weight=0)
root_window.grid_columnconfigure(2, weight=0)
root_window.grid_columnconfigure(3, weight=1)
cur_grid_row = 0
label_pattern = ttk.Label(root_window, text="Command Pattern", width=LABEL_WIDTH, anchor='w')
label_pattern.grid(row=cur_grid_row, column=0, sticky='w', padx=10, pady=10)
combo_pattern = ttk.Combobox(root_window, width=COMBO_WIDTH)
combo_pattern['values'] = []
combo_pattern.grid(row=cur_grid_row, column=1, sticky='w', pady=10)
cur_grid_row = cur_grid_row + 1
# sheet for pattern
sheet_proc = Sheet(root_window,
default_row_index="numbers", total_rows=200, total_columns=5)
sheet_proc.enable_bindings(("single_select", # "single_select" or "toggle_select"
"drag_select", # enables shift click selection as well
"column_drag_and_drop",
"row_drag_and_drop",
#"column_select",
"row_select",
"column_width_resize",
"double_click_column_resize",
"arrowkeys",
#"row_height_resize",
#"double_click_row_resize",
"right_click_popup_menu",
"rc_select",
#"rc_insert_column",
#"rc_delete_column",
#"rc_insert_row",
#"rc_delete_row",
"copy",
"cut",
"paste",
"delete",
"undo",
"edit_cell"))
sheet_proc.grid(row=cur_grid_row, columnspan=4, sticky='nswe')
root_window.grid_rowconfigure(cur_grid_row, weight=1)
sheet_proc.set_cell_data(0, SHEET_PID_COL, 'PID')
sheet_proc.set_cell_data(0, SHEET_CPU_COL, '%CPU')
sheet_proc.set_cell_data(0, SHEET_RSS_COL, 'RSS(GB)')
sheet_proc.set_cell_data(0, SHEET_CMD_COL, 'CMD')
sheet_proc.set_cell_data(0, SHEET_LAST_UPDATED_COL, 'Last Updated')
sheet_proc.column_width(column=SHEET_PID_COL, width=150)
sheet_proc.column_width(column=SHEET_CPU_COL, width=100)
sheet_proc.column_width(column=SHEET_RSS_COL, width=100)
sheet_proc.column_width(column=SHEET_CMD_COL, width=450)
sheet_proc.column_width(column=SHEET_LAST_UPDATED_COL, width=200)
cur_grid_row = cur_grid_row + 1
sheet_proc_last_row = 0
# command buttons
button_plot = ttk.Button(root_window, text="Plot", command=show_plot_window)
button_plot.grid(row=cur_grid_row, column=1, pady=10)
cur_grid_row = cur_grid_row + 1
proc_mem_plot = ProcMemPlot(hostname)
proc_mem_plot.total_mem = total_mem
combo_pattern['values'] = patterns
if len(patterns) > 0:
combo_pattern.current(0)
root_window.after(DEFAULT_REFRESH_INTERVAL*1000, refresh_database)
root_window.mainloop()
def update_sheet_proc(mem_usage):
global sheet_proc_last_row, sheet_proc
last_udpated = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
row = 1
for k in mem_usage.keys():
sheet_proc.set_cell_data(row, SHEET_PID_COL, k)
sheet_proc.set_cell_data(row, SHEET_CPU_COL, mem_usage[k]['cpu'])
sheet_proc.set_cell_data(row, SHEET_RSS_COL, mem_usage[k]['rss'])
sheet_proc.set_cell_data(row, SHEET_CMD_COL, mem_usage[k]['cmd'])
sheet_proc.set_cell_data(row, SHEET_LAST_UPDATED_COL, last_udpated)
row = row + 1
if sheet_proc_last_row > row:
# clear contents from previous dump
for r in range(row, sheet_proc_last_row+1):
sheet_proc.set_cell_data(r, SHEET_PID_COL, '')
sheet_proc.set_cell_data(r, SHEET_CPU_COL, '')
sheet_proc.set_cell_data(r, SHEET_RSS_COL, '')
sheet_proc.set_cell_data(r, SHEET_CMD_COL, '')
sheet_proc.set_cell_data(r, SHEET_LAST_UPDATED_COL, '')
# udpate the last row count
sheet_proc_last_row = row
sheet_proc.refresh()
```
#### File: proc-mem-monitor/proc_mem_monitor/proc_mem_handler.py
```python
import subprocess
import json
import re
def get_mem_usages(pattern, host='localhost'):
ps_command = ['ps', 'ax', '--sort', '-rss', '-o', 'pid,%cpu,rss,cmd']
if host == 'localhost':
# ps -o pid,rss,cmd 273899
command = ps_command
else:
command = ['ssh', host] + ps_command
#print('command=', command)
#print('pattern', pattern)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps_dump = p.stdout.read().decode('utf-8')
#print(ps_dump)
ps_lines = ps_dump.strip().split('\n')
rss_total = 0
mem_usage_dict = {}
re_pattern = re.compile(pattern)
for l in ps_lines[1:]:
#print('line', l)
ps_fields = l.split()
pid = ps_fields[0]
cpu = ps_fields[1]
rss = int(ps_fields[2])
cmd = ps_fields[3]
#print(pattern, cmd, re_pattern.search(cmd))
if bool(re.search(pattern, cmd)):
mem_usage_dict[pid] = {'cpu': cpu, 'rss': rss/1000000, 'cmd': cmd}
#print('mem_usage_dict pid ', pid, mem_usage_dict[pid])
rss_total = rss_total + rss
mem_usage_dict['rss_total'] = {'cpu': '', 'rss': rss_total/1000000, 'cmd': ''}
#print(mem_usage_dict)
return mem_usage_dict
```
#### File: proc-mem-monitor/proc_mem_monitor/proc_mem_nogui.py
```python
import time
import datetime
import select
import sys
import os
from proc_mem_monitor import VERSION, DEFAULT_REFRESH_INTERVAL
from proc_mem_monitor.proc_mem_handler import get_mem_usages
def refresh_screen(hostname, total_mem, pattern):
global rss_peak
os.system('clear')
print('Process Memory Monitor ' + VERSION + ' - ' + hostname + \
' Total system memory: ' + str(total_mem))
last_udpated = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
print('Last refreshed at ', last_udpated)
print('\n')
mem_table_format = '{0:10s}|{1:10s}|{2:12s}|{3:20s}'
print(mem_table_format.format('PID', 'CPU %', 'RSS GB', 'Command'))
mem_usages = get_mem_usages(pattern)
for k in mem_usages.keys():
print(mem_table_format.format(k, str(mem_usages[k]['cpu']),
str(mem_usages[k]['rss']), mem_usages[k]['cmd']))
if k == 'rss_total' and mem_usages[k]['rss'] > rss_peak:
rss_peak = mem_usages[k]['rss']
print(mem_table_format.format('rss peak', '', str(rss_peak), ''))
print('\n\nPress "q" followed by "enter" key to exit')
def start_nogui(hostname, total_mem, patterns):
global rss_peak
rss_peak = 0
while True:
input = select.select([sys.stdin], [], [], 1)[0]
if input:
value = sys.stdin.readline().rstrip()
if (value == "q"):
sys.exit(0)
else:
refresh_screen(hostname, total_mem, patterns[0])
time.sleep(1)
``` |
{
"source": "jimw567/toolbox",
"score": 2
} |
#### File: jimw567/toolbox/log2timeline.py
```python
import plotly.figure_factory as ff
from datetime import datetime
import numpy as np
def int2dt(x):
return datetime.fromtimestamp(31536000+x*24*3600).strftime("%Y-%d-%m")
df = [dict(Task="T111111111111111", Start=int2dt(0), Finish=int2dt(1), Resource='Func1'),
dict(Task="T111111111111111", Start=int2dt(3), Finish=int2dt(4), Resource='Func2'),
dict(Task="T222222222222222", Start=int2dt(5), Finish=int2dt(6), Resource='Func1'),
dict(Task="T222222222222222", Start=int2dt(7), Finish=int2dt(8), Resource='Func2'),
]
colors = {'Func1': 'rgb(220, 0, 0)',
'Func2': 'rgb(0, 255, 100)'}
fig = ff.create_gantt(df, colors=colors, index_col='Resource', show_colorbar=True,
group_tasks=True)
num_tick_labels = np.linspace(start=0, stop=10, num=11, dtype=int)
date_ticks = [int2dt(x) for x in num_tick_labels]
fig.layout.xaxis.update({'tickvals': date_ticks, 'ticktext': num_tick_labels})
fig.show()
``` |
{
"source": "jimw567/xbutil-gui",
"score": 2
} |
#### File: xbutil-gui/xbutil_gui/xbutil_gui.py
```python
import tkinter as tk
from tkinter import ttk, messagebox, simpledialog
from tksheet import Sheet
import subprocess
import os
import json
import shutil
import datetime
import argparse
from pathlib import Path
import socket
from xbutil_gui.xbutil_top import XbutilTop
from xbutil_gui.plot_metrics import PlotMetrics
from xbutil_gui.device_manager import DeviceManager
from xbutil_gui.xbutil_handler import get_devices_compute_units, \
get_xbutil_dump, get_devices_from_lspci
from xbutil_gui import VERSION, LABEL_WIDTH, COMBO_WIDTH, STATUS_CODES, \
DEFAULT_XBUTIL_REFRESH_INTERVAL, __resource_path__, __icon__, \
SHEET_TOTAL_ROWS, SHEET_TOTAL_COLS, \
SHEET_HOST_COL, SHEET_DEVICE_COL, SHEET_CU_COL, SHEET_CU_STATUS_COL, \
SHEET_CU_USAGE_COL, SHEET_POWER_COL, SHEET_TEMP_COL, \
SHEET_LAST_UPDATED_COL
# interval in seconds between xbutil json dumps
auto_refresh_plot_seconds = 0
xbutil_top = XbutilTop()
plot_metrics = PlotMetrics()
device_manager = DeviceManager()
shadow_sheet_hosts = ['' for i in range(SHEET_TOTAL_ROWS)]
shadow_sheet_device_id_names = ['' for i in range(SHEET_TOTAL_ROWS)]
pause_sheet = 0
no_supassword = False
def get_selected_host_device():
global shadow_sheet_hosts, shadow_sheet_device_id_names
sheet_selected = sheet_cluster.get_currently_selected()
if len(sheet_selected) == 0:
messagebox.showinfo("showinfo", "Please select a cell or row on the sheet first.")
return -1, None, None
if str(sheet_selected[0]) == 'row':
selected_row = sheet_selected[1]
else:
selected_row = sheet_selected[0]
selected_host = shadow_sheet_hosts[selected_row]
selected_device_id_name = shadow_sheet_device_id_names[selected_row]
if len(selected_device_id_name) == 0:
messagebox.showinfo("showinfo", "Please wait until devices are scanned.")
return -2, None, None
return 0, selected_host, selected_device_id_name
def show_top_window():
status, selected_host, selected_device_id_name = get_selected_host_device()
if status < 0:
return
xbutil_top.show_top_window(root_window, selected_host, selected_device_id_name)
def show_plot_window():
status, selected_host, selected_device_id_name = get_selected_host_device()
if status < 0:
return
plot_metrics.show_plot_window(root_window, selected_host, selected_device_id_name)
def show_devman_window():
if not no_sudo_passwd:
sudo_password = tk.simpledialog.askstring("Password", "Enter Sudo password:", show='*')
if sudo_password is None or sudo_password == '':
return
# Save the password to a file with permission 0o600 to be secure
password_file = os.path.expanduser("~") + '/.xbutil-gui-tmp'
with open(password_file, 'w') as fh:
os.chmod(password_file, 0o600)
fh.write(sudo_password)
selected_cluster = combo_cluster.current()
selected_cluster_name = combo_cluster['values'][selected_cluster]
device_manager.show_devman_window(root_window, selected_cluster_name)
device_manager.get_devices(clusters[selected_cluster_name])
def toggle_pause_sheet():
global pause_sheet
if pause_sheet == 0:
pause_sheet = 1
button_pause_sheet['text'] = 'Resume'
else:
pause_sheet = 0
button_pause_sheet['text'] = 'Pause'
###############################################################################
# root window
###############################################################################
running_host = socket.gethostname()
root_window = tk.Tk()
root_window.geometry('1500x700+20+20')
root_window.title('Xilinx xbutil GUI ' + VERSION + ' runnning on ' + running_host)
root_window_icon = tk.PhotoImage(file=str(__icon__))
root_window.iconphoto(True, root_window_icon)
root_window.columnconfigure(0, weight=0, minsize=150)
root_window.columnconfigure(1, weight=0, minsize=150)
root_window.columnconfigure(2, weight=0, minsize=150)
root_window.columnconfigure(3, weight=0, minsize=150)
root_window.columnconfigure(4, weight=0, minsize=150)
root_window.columnconfigure(5, weight=1, minsize=150)
cur_grid_row = 0
label_cluster = ttk.Label(root_window, text="Cluster", width=10, anchor='w')
label_cluster.grid(row=cur_grid_row, column=0, sticky='w', pady=10)
combo_cluster = ttk.Combobox(root_window, width=COMBO_WIDTH)
combo_cluster['values'] = []
combo_cluster.grid(row=cur_grid_row, column=1, columnspan=3, sticky='w', pady=10)
cur_grid_row = cur_grid_row + 1
# sheet for cluster
sheet_cluster = Sheet(root_window,
default_row_index="numbers",
total_rows=SHEET_TOTAL_ROWS,
total_columns=SHEET_TOTAL_COLS
)
sheet_cluster.enable_bindings(("single_select", # "single_select" or "toggle_select"
"drag_select", # enables shift click selection as well
"column_drag_and_drop",
"row_drag_and_drop",
#"column_select",
"row_select",
"column_width_resize",
"double_click_column_resize",
"arrowkeys",
#"row_height_resize",
#"double_click_row_resize",
"right_click_popup_menu",
"rc_select",
#"rc_insert_column",
#"rc_delete_column",
#"rc_insert_row",
#"rc_delete_row",
"copy",
"cut",
"paste",
"delete",
"undo",
"edit_cell"))
sheet_cluster.grid(row=cur_grid_row, columnspan=6, sticky='nswe')
root_window.grid_rowconfigure(cur_grid_row, weight=1)
sheet_cluster.set_cell_data(0, SHEET_HOST_COL, 'Host')
sheet_cluster.set_cell_data(0, SHEET_DEVICE_COL, 'Device ID::Shell')
sheet_cluster.set_cell_data(0, SHEET_CU_COL, 'Compute Unit (CU)')
sheet_cluster.set_cell_data(0, SHEET_CU_STATUS_COL, 'CU Status')
sheet_cluster.set_cell_data(0, SHEET_CU_USAGE_COL, 'CU Usage')
sheet_cluster.set_cell_data(0, SHEET_POWER_COL, 'P(W)')
sheet_cluster.set_cell_data(0, SHEET_TEMP_COL, 'T(C)')
sheet_cluster.set_cell_data(0, SHEET_LAST_UPDATED_COL, 'Last Updated')
sheet_cluster.column_width(column=SHEET_HOST_COL, width=150)
sheet_cluster.column_width(column=SHEET_DEVICE_COL, width=300)
sheet_cluster.column_width(column=SHEET_CU_COL, width=300)
sheet_cluster.column_width(column=SHEET_CU_STATUS_COL, width=60)
sheet_cluster.column_width(column=SHEET_CU_USAGE_COL, width=60)
sheet_cluster.column_width(column=SHEET_POWER_COL, width=50)
sheet_cluster.column_width(column=SHEET_TEMP_COL, width=50)
sheet_cluster.column_width(column=SHEET_LAST_UPDATED_COL, width=200)
cur_grid_row = cur_grid_row + 1
sheet_cluster_last_row = 0
# command buttons for selected host
button_top = ttk.Button(root_window, text="Top", command=show_top_window)
button_top.grid(row=cur_grid_row, column=1, pady=10)
button_plot = ttk.Button(root_window, text="Plot", command=show_plot_window)
button_plot.grid(row=cur_grid_row, column=2, pady=10)
button_manage_devices = ttk.Button(root_window, text="Manage", command=show_devman_window)
button_manage_devices.grid(row=cur_grid_row, column=3, pady=10)
button_pause_sheet = ttk.Button(root_window, text="Pause", command=toggle_pause_sheet)
button_pause_sheet.grid(row=cur_grid_row, column=4, pady=10)
cur_grid_row = cur_grid_row + 1
# global variables
cur_cluster_name = ""
auto_refresh_host_idx = 0
auto_refresh_sheet_row = 0
alveo_spec_dict = {}
def update_sheet_cluster(devices_compute_units, xbutil_dump_json, selected_cluster,
refresh_host):
global auto_refresh_host_idx, auto_refresh_sheet_row, sheet_cluster_last_row
global shadow_sheet_hosts, shadow_sheet_hosts, alveo_spec_dict, pause_sheet
if pause_sheet == 1:
return
host_displayed = 0
last_udpated = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
for i_dn in range(len(devices_compute_units['device_id_names'])):
#refresh_host = 'host' + str(i_dn)
device_vbnv = devices_compute_units['device_vbnvs'][i_dn]
device_id_name = devices_compute_units['device_id_names'][i_dn]
last_metrics = plot_metrics.get_last_metrics(refresh_host, device_id_name)
if xbutil_dump_json is not None:
xbutil_top.generate_top_dict(xbutil_dump_json, refresh_host, device_id_name)
plot_metrics.update_history(xbutil_dump_json, refresh_host, device_id_name)
dev_displayed = 0
for cu in devices_compute_units['compute_units'][i_dn]:
if host_displayed == 0:
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_HOST_COL, refresh_host)
sheet_cluster.highlight_rows([auto_refresh_sheet_row], bg='light sky blue' )
else:
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_HOST_COL, '')
sheet_cluster.dehighlight_rows([auto_refresh_sheet_row])
host_displayed = 1
if dev_displayed == 0:
dev = device_id_name
p_display = last_metrics[0]
t_display = last_metrics[1]
board = alveo_spec_dict["shell_board_lut"][device_vbnv]
if t_display > alveo_spec_dict[board]['fpga_temp']['critical']:
sheet_cluster.highlight_cells(auto_refresh_sheet_row,
SHEET_TEMP_COL, bg='red')
elif t_display > alveo_spec_dict[board]['fpga_temp']['warning']:
sheet_cluster.highlight_cells(auto_refresh_sheet_row,
SHEET_TEMP_COL, bg='yellow')
else:
dev = ''
p_display = ''
t_display = ''
dev_displayed = 1
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_DEVICE_COL, dev)
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_CU_COL, cu['name'])
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_CU_STATUS_COL, cu['status'])
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_CU_USAGE_COL, cu['usage'])
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_POWER_COL, p_display)
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_TEMP_COL, t_display)
sheet_cluster.set_cell_data(auto_refresh_sheet_row, SHEET_LAST_UPDATED_COL, last_udpated)
# save host/dev_id_name into shadow varaibles
shadow_sheet_hosts[auto_refresh_sheet_row] = refresh_host
shadow_sheet_device_id_names[auto_refresh_sheet_row] = device_id_name
auto_refresh_sheet_row = auto_refresh_sheet_row + 1
sheet_cluster.refresh()
auto_refresh_host_idx = auto_refresh_host_idx + 1
if auto_refresh_host_idx == len(clusters[combo_cluster['values'][selected_cluster]]):
auto_refresh_host_idx = 0
if sheet_cluster_last_row > auto_refresh_sheet_row:
# clear contents from previous full scan
sheet_cluster.dehighlight_rows(range(auto_refresh_sheet_row, sheet_cluster_last_row+1))
for r in range(auto_refresh_sheet_row, sheet_cluster_last_row+1):
for c in range(SHEET_TOTAL_COLS):
sheet_cluster.set_cell_data(r, c, '')
# udpate the last row count
sheet_cluster_last_row = auto_refresh_sheet_row
auto_refresh_sheet_row = 1
# dump log file
def dump_log_file():
global log_file, sheet_cluster
if log_file is None:
return
with open(log_file, 'w') as fp:
for r in range(sheet_cluster_last_row):
d = sheet_cluster.get_row_data(r)
fp.write(f'{d[0]:15}|{d[1]:50}|{d[2]:50}|{d[3]:10}|{d[4]:8}|{d[5]:10}|{d[6]:8}|{d[7]:20}')
fp.write('\n')
fp.write(f'{"-"*15}+{"-"*50}+{"-"*50}+{"-"*10}+{"-"*8}+{"-"*10}+{"-"*8}+{"-"*20}')
fp.write('\n')
# get xbutil dump from each host in round robin fashion every XBUTIL_REFRESH_INTERVAL
def refresh_database(json_file):
global auto_refresh_plot_seconds, auto_refresh_host_idx, cur_cluster_name, \
auto_refresh_sheet_row, sheet_cluster_last_row
selected_cluster = combo_cluster.current()
selected_cluster_name = combo_cluster['values'][selected_cluster]
if cur_cluster_name != selected_cluster_name:
print('INFO: switch to new cluster', selected_cluster_name)
auto_refresh_host_idx = 0
auto_refresh_sheet_row = 1
cur_cluster_name = selected_cluster_name
sheet_cluster_last_row = SHEET_TOTAL_ROWS - 1
refresh_host = clusters[selected_cluster_name][auto_refresh_host_idx]
xbutil_dump_json,lspci_dict = get_xbutil_dump(json_file, host=refresh_host)
if xbutil_dump_json is not None:
devices_compute_units = get_devices_compute_units(xbutil_dump_json)
update_sheet_cluster(devices_compute_units, xbutil_dump_json,
selected_cluster, refresh_host)
xbutil_top.show_top_info()
plot_metrics.plot_metrics(auto_refresh_plot_seconds)
elif lspci_dict:
devices_compute_units = get_devices_from_lspci(lspci_dict, alveo_spec_dict)
update_sheet_cluster(devices_compute_units, xbutil_dump_json,
selected_cluster, refresh_host)
else:
# something wrong with current refresh host. Move on to the next host
auto_refresh_host_idx = auto_refresh_host_idx + 1
if auto_refresh_host_idx == len(clusters[selected_cluster_name]):
auto_refresh_host_idx = 0
dump_log_file()
# add refresh_database back to the eventloop
auto_refresh_plot_seconds = auto_refresh_plot_seconds + DEFAULT_XBUTIL_REFRESH_INTERVAL
root_window.after(DEFAULT_XBUTIL_REFRESH_INTERVAL*1000, refresh_database, json_file)
def main():
global plot_metric, cur_cluster_name, clusters, auto_refresh_host_idx, \
auto_refresh_sheet_row, alveo_spec_dict, no_sudo_passwd, log_file
# First check if XRT is installed
if not Path('/opt/xilinx/xrt/bin/unwrapped/xbutil2').exists():
print('ERROR: Xilinx XRT needs to be installed')
return
parser = argparse.ArgumentParser()
parser.add_argument('--json-file', dest='json_file', default=None,
help='Specify a JSON file for getting the data')
parser.add_argument('--no-sudo-passwd', action='store_true', dest='no_sudo_passwd',
help='Do not prompt for sudo password')
parser.add_argument('--log', dest='log_file', default=None, help='Sepcify text log file')
args = parser.parse_args()
no_sudo_passwd = args.no_sudo_passwd
log_file = args.log_file
home = os.path.expanduser("~")
user_config_file = home + '/xbutil-gui-config.json'
default_config_file = __resource_path__ / 'xbutil-gui-config.json'
cluster_names = []
if Path(user_config_file).exists():
config_file = Path(user_config_file)
elif default_config_file.exists():
config_file = default_config_file
with open(config_file, 'r') as fp:
xbutil_config_json = json.load(fp)
alveo_spec_file = __resource_path__ / 'alveo-specifications.json'
with open(alveo_spec_file, 'r') as fp:
alveo_spec_dict = json.load(fp)
device_manager.alveo_spec_dict = alveo_spec_dict
clusters = xbutil_config_json.get('clusters', [])
for k in clusters.keys():
cluster_names.append(k)
combo_cluster['values'] = cluster_names
if len(cluster_names) > 0:
combo_cluster.current(0)
# populate the cluster spreadsheet
row = 1
for host in clusters[cluster_names[0]]:
sheet_cluster.set_cell_data(row, 0, host)
row = row + 1
cur_cluster_name = cluster_names[0]
auto_refresh_host_idx = 0
auto_refresh_sheet_row = 1
root_window.after(DEFAULT_XBUTIL_REFRESH_INTERVAL*1000, refresh_database, args.json_file)
root_window.mainloop()
if __name__ == '__main__':
main()
``` |
{
"source": "jimwaldo/HarvardX-Tools",
"score": 3
} |
#### File: python/checkData/checkTime.py
```python
from __future__ import division
import json
import sys
from dateutil import parser
def getCurrentTime(line):
# Format: 2014-08-10T12:50:02.849167+00:00
try:
dcl = json.loads(line)
time = dcl['time']
#datetime_format = str('%Y-%m-%dT%H:%M:%S.%f%z')
#currentTime = datetime.datetime.strptime(time, datetime_format)
currentTime = parser.parse(time)
#print "time", currentTime
except ValueError:
print "[Error]: Could not find time"
return currentTime
if __name__ == '__main__':
fileToCheck = sys.argv[1]
print fileToCheck
jfile = open(fileToCheck, 'r')
currentTime = 0
prevTime = 0
wrongTimeOrder = 0
lineCnt = 0
for line in jfile:
if (lineCnt>0):
currentTime = getCurrentTime(line)
else:
currentTime = getCurrentTime(line)
prevTime = currentTime
if (currentTime < prevTime):
wrongTimeOrder += 1
#print "[Error]: Previous Event is newer than Current Event: ", prevTime, "(Previous) vs. ", currentTime, "(Current)"
lineCnt += 1
prevTime = currentTime
percentUnordered = (wrongTimeOrder) / lineCnt
print "Total Events: ", lineCnt
print "Total Misordered Time Events: ", wrongTimeOrder, " (", percentUnordered, "% of total)"
```
#### File: python/checkData/testKAnon.py
```python
import operator
import csv, sys
import utils
def buildKey(ids, dataLine):
"""
Concatenate a set of fields together to build an overall key
This is a simple approach to determining k-anonymity, in which all
of the fields of interest are concatenated as a single key. The
ids coming in should be a list of indexes into the fields in the dataLine.
These will be concatenated in order to form a new key. Note that this
currently assumes that all of the data fields are strings.
"""
retKey = ''
for i in ids:
retKey += dataLine[i]
return retKey
def makeDict(ids, infile):
"""
Create and return a dictionary keyed by a concatenation of fields with value the number
of entries containing all and only those fields.
Taking a list of indexes into a line of a (csv) file and an open csv.reader(), build a
dictionary that is keyed by the string concatenation of the fields in the index with
value the number of times a line containing just those fields in those indexes occurs. Return
the dictionary to the caller.
"""
retDict = {}
for line in infile:
keyAnon = buildKey(ids, line)
if keyAnon in retDict:
retDict[keyAnon] += 1
else:
retDict[keyAnon] = 1
return retDict
if __name__ == '__main__':
"""
When run stand-alone, this script will query for a filename and a level of anonymity
to check for the externally-connected data fields in the .csv file. The user will also
be prompted for either a summary of the anonymity level (in which case only the number
of records that fail to be at least anonymous to the level indicated) will be printed, or
a full report, in which case the concatenation of fields that allow identification finer
than the level entered will be printed. Note that the indexes of the fields that can be
linked to external properties is hard-coded at the moment; it would be good to have a more
flexible mechanism for this but finding one that is not error prone is difficult.
The id fields that could connect to the outside are 0 -> course_id, 6 -> final_cc_cname,
7 -> LoE, 8 -> YoB, 9 -> gender, and 17 -> nforum_posts]
"""
idFields = [0,6,7,8,9,17]
if len(sys.argv) < 4:
fname = utils.getFileName('data file to test')
kanon = utils.getIntVal('Enter value of k to test : ')
full = utils.getStringVal('Enter s for summary, f for full report : ', ['s', 'f'])
else:
fname = sys.argv[1]
kanon = int(sys.argv[2])
full = sys.argv[3]
fin = open(fname, 'rU')
fread = csv.reader(fin)
totals = []
for i in range(0,kanon):
totals.append(0)
fread.next()
anonDict = makeDict(idFields, fread)
sortedDict = sorted(anonDict.iteritems(), key=operator.itemgetter(1))
for k,v in sortedDict:
if v < kanon:
totals[v-1] += 1
if full == 'f':
print v, k
for i in range(0,kanon-1):
print 'Number of buckets with', i+1, 'entries is', totals[i]
```
#### File: python/checkData/testSameEvents.py
```python
import sys
import json
from dateutil import parser
import time
def compareLogs(logOne, logTwo, outTimes=False):
mismatches = 0
dateDict = {}
diffCreated = False
for d in sorted(logOne, key=logOne.get, reverse=True):
for l in logOne[d]:
try:
if l in logTwo[d]:
logThis = "Time %s exists in first, but not second" % (d)
except KeyError:
mismatches += 1
# Find out what days
currentTime = parser.parse(d)
currentDate = currentTime.date()
ctime = currentTime.time()
if currentDate not in dateDict:
dateDict[currentDate] = 1
else:
dateDict[currentDate] += 1
logThis = "Missing Time Hack (only exists in one log file): %s" % (d)
logger(logThis)
# outTimes is true, then output to file
if outTimes:
if not diffCreated:
diffCreated = True
fname = time.strftime("testSameEvents_%m_%d_%Y-%H_%M_%S_timelist.txt")
diffFile = open(fname, 'w')
diffFile.write(d + "\n")
return mismatches, dateDict
def logger(logit):
global logCreated
global logFile
if not logCreated:
logCreated = True
fname = time.strftime("testSameEvents_%m_%d_%Y-%H_%M_%S.txt")
print "creating file: ", fname
logFile = open(fname, 'w')
logFile.write(logit + "\n")
def openAndRead(fname):
lineDictForLog = {}
lineNo = 1
fout = open(fname, 'r')
for line in fout:
try:
dcl = json.loads(line)
ts = dcl['time']
if ts not in lineDictForLog:
lineDictForLog[ts] = [line]
else:
lineDictForLog[ts].append(line)
lineNo += 1
except ValueError:
print "[Error]: Could not find time"
fout.close()
return lineDictForLog
if __name__ == '__main__':
global logCreated
global logFile
logCreated = False
logFile = 0
# Takes 2 files as input (3rd optional for outputfile)
fileOne = sys.argv[1]
fileTwo = sys.argv[2]
timeDiffs = len(sys.argv) >= 4 and sys.argv[3] == '-o'
logMsg1 = "LogOne File = %s" % (fileOne)
logMsg2 = "LogTwo File = %s" % (fileTwo)
logger(logMsg1)
logger(logMsg2)
logOneDict = {}
logTwoDict = {}
logOneDict = openAndRead(fileOne)
logTwoDict = openAndRead(fileTwo)
dailyLogMismatchOne = {}
dailyLogMismatchTwo = {}
mismatches = 0
mismatches_logOneLogTwo = 0
mismatches_logTwoLogOne = 0
# Check if fileOne events exist in fileTwo events
mismatches_logOneLogTwo, dailyLogMismatchTwo = compareLogs(logOneDict, logTwoDict, timeDiffs)
mismatches_logTwoLogOne, dailyLogMismatchOne = compareLogs(logTwoDict, logOneDict, timeDiffs)
logMsg3 = "Total Mismatches (LogOne vs. LogTwo): %s" % mismatches_logOneLogTwo
logMsg4 = "Total Mismatches (LogTwo vs. LogOne): %s" % mismatches_logTwoLogOne
logMsg5 = "# of Mismatches per Day"
print logMsg3
print logMsg4
print logMsg5
logger(logMsg1)
logger(logMsg2)
logger(logMsg3)
logger(logMsg4)
logger(logMsg5)
for o in iter(dailyLogMismatchOne):
msg = "Daily Log: %s, Count: %s" % ( o, dailyLogMismatchOne[o] )
print msg
logger(msg)
for t in iter(dailyLogMismatchTwo):
msg = "Daily Log: %s, Count: %s" % ( t, dailyLogMismatchTwo[t] )
print msg
logger(msg)
```
#### File: python/classData/buildEmailList.py
```python
from classData import user, userprofile as userp
import sys
import csv
def split(name):
"""
Splits a string of form firstname lastname into two strings, returning a list
containing those names.
"""
spIn = name.rfind(' ')
first = name[ :spIn]
last = name[spIn + 1: ]
return [first, last]
if __name__ == '__main__':
csv.field_size_limit(sys.maxsize)
ufile = csv.reader(open('users.csv','r'))
udict = user.builddict(ufile)
pfile = csv.reader(open('profiles.csv', 'r'))
pdict = userp.builddict(pfile)
outfile = csv.writer(open('mailAddresses.csv','w'))
for uid in iter(udict):
if uid in pdict:
name = pdict[uid].name
else :
name = 'Missing Profile'
[first, last] = split(name)
outfile.writerow([first, last, udict[uid].email])
```
#### File: python/classData/certificates.py
```python
import logging
import json
class cert(object):
"""
Hold the information from the course_certificates data dump files
This object reflects all of the fields held in the course_certificates
data dump file, whether interesting or not. There is also a utility
function to build a dictionary of all of the information, keyed by
user_id, and a function that will remove all of the lines in a csv file
for the certificates that are not of the correct length.
"""
def __init__(self, uid, durl, grade, courseid, key, distinction, status, \
ver_uuid, down_uuid, name, cdate, mdate, ereason):
"""
Constructor
"""
self.uid = uid
self.download_url = durl
self.grade = grade
self.courseid = courseid
self.key = key
self.distinction = distinction
self.status = status
self.ver_uuid = ver_uuid
self.down_uuid = down_uuid
self.name = name
self.cdate = cdate
self.mdate = mdate
self.error_reason = ereason
class CertEncoder(json.JSONEncoder):
"""
Extends the default JSONEncoder to allow for json.dumps() to work on
cert objects.
By default, it will simply return a dictionary containing all the member
variables of the cert and their values
"""
def default(self, obj):
"""
Default encoder
"""
if isinstance(obj, cert):
return obj.__dict__
return json.JSONEncoder.default(self, obj)
def decodeCertJSON(dict):
"""
Decodes the JSON dictionary corresponding to a certificate
Use with json.load(file, object_hook=decodeCertJSON) or
json.loads(string, object_hook=decodeCertJSON)
Parameters
-----------
dict: dictionary of certificate attributes
"""
if "uid" in dict:
return cert(*dict)
return dict
def builddict(f):
"""
Construct a dictionary of certificate recipients, keyed by the id of the recipient
The dictionary constructed by this function will contain a certificate object, keyed
by the id of the recipient. If a line is read that is of the wrong size, a warning
is logged.
Note that the second field of the raw csv file is ignored; this field is an id that
is only meaningful in the context of this file. The id in the first field is the one
that can be used to identify the student in other data files in this course (and in
others, if they use the same edX registration).
Parameters
-----------
f: csv.reader
An open reader containing the information about the certificate recipients
"""
retdict = {}
lineno = 0
#skip the header line
f.next()
for row in f:
lineno += 1
if (len(row) != 14):
logging.warning("bad line at " + str(lineno))
else:
[uid, ignore, durl, grade, cid, key, dist, stat, vuid, duid, name, cd, md, er] = row
if uid not in retdict:
rec = cert(uid, durl, grade, cid, key, dist, stat, vuid, duid, name, \
cd, md, er)
retdict[uid] = rec
return retdict
def readdict(fin):
"""
Reconstructs a certificates dictionary from an open csv file like one written by writedict
Build a dictionary of the same form as builddict, indexed by student id, from an
open csv file that contains the contents of such a dictionary that has been previously
saved.
"""
retdict = {}
fin.next()
for uid, durl, grade, courseid, key, distinction, status, ver_uuid, down_uuid, name, cdate, mdate, ereason in fin:
ncert = cert(uid, durl, grade, courseid, key, distinction, status, ver_uuid, down_uuid, name,
cdate, mdate, ereason )
retdict[uid] = ncert
return retdict
def writedict(fout, cdict):
"""
Write the contents of a certificates dictionary to an opened csv file
Write out the contents of a certificates dictionary to a csv file so that it can be
reconstructed by readdict.
"""
fout.writerow(['Student id', 'Download URL', 'Grade', 'Course Id', 'Key', 'Distinction',
'Status', 'Verify UUID', 'Download UUID', 'Name', 'Date Created',
'Date Modified', 'Error Reason'])
for c in iter(cdict):
oc = cdict[c]
fout.writerow([oc.uid, oc.durl, oc.grade, oc.courseid, oc.key, oc.distinction,
oc.status, oc.ver_uuid, oc.down_uuid, oc.name, oc.cdate, oc.mdate,
oc.ereason])
```
#### File: python/classData/course_enrollment.py
```python
import logging
#from convertfiles import xmltocsv
class course_enrollment(object):
"""
A representation of the state kept concerning a student's enrollment
This object encapsulates the time of enrollment for a student. There isn't
much here other than the student id, the course id, and the date of enrollment
"""
def __init__(self, uid, course_id, enroll_d):
"""
Constructor for an object containing the enrollment state
Note that the id that is only relevant within the file is not part of this
object.
"""
self.uid = uid
self.course_id = course_id
self.enroll_d = enroll_d
def builddict(f):
"""
Build a dictionary of the enrollment date for a student
The dictionary that is returned by this function is indexed by student id.
The internal id that is stored in the raw data file is dropped, as it has no
meaning outside of this file.
Parameters
-----------
f: csv.reader
An open csv reader object that contains the course enrollment data
"""
retdict = {}
lineno = 0
for line in f:
lineno += 1
if len(line) != 4:
logging.warning('bad row size at line ' + str(lineno))
continue
[oid, user_id, course_id, enrolld] = line
rec = course_enrollment(user_id, course_id, enrolld)
retdict[user_id] = rec
return retdict
def readdict(fin):
"""
Reconstruct a dictionary or enrollment information from an open .csv file previously created by writedict
Reads the contents of a csv file containing the dump of a course enrollment dictionary, and creates
a dictionary containing that enrollment data. Input is a csv.reader object.
Returns a dictionary, indexed by user id, where each line is a course enrollment object.
"""
retDict = {}
fin.next()
for [uid, cid, edate] in fin:
retDict[uid] = course_enrollment(uid, cid, edate)
return retDict
def writedict(fout, pDict):
"""
Save a dictionary or enrollment data to an open .csv file, to be written by readdict
Writes the contents of a course enrollment dictionary to an open csv file. The file will have
a human-readable header placed on it that will need to be skipped on reading.
"""
fout.writerow(['User id', 'Course id', 'Enrollment date'])
for u in iter(pDict):
fout.writerow([u, pDict[u].course_id, pDict[u].enroll_d])
# def scrubstate(f1, f2):
# """
# Clean up the state of a course enrollment csv file
#
# Reads through a csv file containing the course enrollment data, removing any
# lines that are of the wrong size. Produces a scrubbed csv file
#
# Parameters
# --------------
# f1: csv reader
# An open csv reader, containing the data to be cleaned up
# f2: csv writer
# An open csv writer, that will take all of the lines of the right
# size
# """
#
# xmltocsv.scrubcsv(f1, f2, 4)
```
#### File: python/classData/globalUserList.py
```python
import glob
import csv
import user
def addDup(inDict, key, oval, nval):
if key in inDict:
inDict[key].append(nval)
else:
inDict[key]= [oval, nval]
def readId2Name(fin):
"""
Reconstruct a global id=>username dictionary from an open csv file
Reads the globalId2Name file (or equivalent) and constructs an id=>username
dictionary. Input is an open csv.reader file such as the one constructed
by the main method of this module. Returns a dictionary of id=>username
"""
retDict = {}
fin.readrow()
for iden, name in fin:
retDict[iden] = name
return retDict
def readName2Id(fin):
"""
Reconstructs a global username=>id dictionary from an open csv file
Reads the globalName2Id file (or equivalent) and constructs a username=>id
dictionary. Input is an open csv.reader files such as the one constructed
by the main method of this module. Returns a dictionary of username=>id
"""
retDict= {}
fin.readrow()
for name, iden in fin:
retDict[name] = iden
return retDict
def buildMaps(udict, idDict, nameDict, dupNameDict, dupIdDict):
for u in iter(udict):
if u not in idDict:
idDict[u] = udict[u].username
elif idDict[u] != udict[u].username:
addDup(dupNameDict, u, idDict[u], udict[u].username)
if udict[u].username not in nameDict:
nameDict[udict[u].username] = u
elif nameDict[udict[u].username] != u:
addDup(dupIdDict, udict[u].username, nameDict[udict[u].username], u)
def main():
ulist = glob.glob('*/*/users.csv')
idDict = {}
nameDict = {}
dupNameDict = {}
dupIdDict = {}
for fname in ulist:
fin = open(fname, 'r')
fcsv = csv.reader(fin)
udict = user.builddict(fcsv)
buildMaps(udict, idDict, nameDict, dupNameDict, dupIdDict)
fin.close()
idOut = csv.writer(open('globalid2name.csv', 'w'))
idOut.writerow(['User ID', 'User Name'])
for i in iter(idDict):
idOut.writerow([i, idDict[i]])
nameOut = csv.writer(open('globalname2id.csv', 'w'))
nameOut.writerow(['User Name', 'User ID'])
for n in iter(nameDict):
nameOut.writerow([n, nameDict[n]])
if len(dupNameDict) > 0:
nameDupOut = csv.writer(open('nameDups.csv', 'w'))
for u in iter(dupNameDict):
nameDupOut.writerow[u, dupNameDict[u]]
else:
print("No duplicate names found")
if len(dupIdDict) > 0:
idDupOut = csv.writer(open('idDups.csv', 'w'))
for u in iter(dupIdDict):
idDupOut.write([u, dupIdDict[u]])
else:
print("No duplicate ids found")
if __name__ == '__main__':
main()
```
#### File: python/classData/ipGeoloc.py
```python
import csv
import sys
def builddict(fin):
"""
Build a dictionary mapping from username to country for all classes.
Takes as input an open csv.reader on the edX supplied file that lists
classname, country, and username and returns a dictionary that maps from
username to country
"""
retdict = {}
for course, country, username in fin:
if username not in retdict:
retdict[username] = country
return retdict
def buildIdtoLoc(geoDict, idNameDict):
"""
Build a dictionary mapping from user id to country for all classes
Takes as input a dictionary mapping usernames to countries (as build by
builddict, above) and a dictionary that maps from ids to names (as is built
by globalUserList.readId2Name) and returns a dictionary that maps from
user id to country
"""
retDict = {}
for i in iter(idNameDict):
if idNameDict[i] in geoDict:
retDict[i] = geoDict[idNameDict[i]]
else:
retDict[i] = 'unknown'
return retDict
def readIdToLoc(fname):
"""
Builds a dictionary from user id => geoloc from a csv file containing the information
Takes as input the name of the csv file that was created from a call to writeIdtoLoc
"""
retDict = {}
fin = csv.reader(open(fname, 'r'))
fin.next()
for id, loc in fin:
retDict[id] = loc
return retDict
def writeIdToLoc(fname, idtoLocDict):
outf = csv.writer(open(fname, 'w'))
for i in iter(idtoLocDict):
outf.writerow([i, idtoLocDict[i]])
def readNametoLoc(fname):
"""
Builds a dictionary from username => geoloc from a csv file containing the information
Takes as input the name of the csv file that was created by a call to writeNametoLoc
"""
retDict = {}
fin = csv.reader(open(fname, 'r'))
fin.next()
for uname, loc in fin:
retDict[uname] = loc
return retDict
def writeNameToLoc(fname, ntolDict):
"""
Writes out a username=>location dictionary to a csv file
Creates a csv file containing the mapping from username=>location, which can be read
by readNametoLoc. The dictionary itself is of the sort constructed by builddict.
"""
outf = csv.writer(open(fname, 'w'))
for n in iter(ntolDict):
outf.writerow([n, ntolDict[n]])
def buildidtoNameDict(fin):
retDict = {}
fin.next()
for n, id in fin:
retDict[id] = n
return retDict
if __name__ == '__main__':
locfName = sys.argv[1]
gufName = sys.argv[2]
lin = csv.reader(open(locfName, 'r'))
uin = csv.reader(open(gufName, 'r'))
name2loc = builddict(lin)
id2name = buildidtoNameDict(uin)
id2loc = buildIdtoLoc(name2loc, id2name)
n2lout = csv.writer(open('nametolocation.csv', 'w'))
id2lout = csv.writer(open('idtolocation.csv', 'w'))
n2lout.writerow(['User name', 'Country'])
for n in iter(name2loc):
n2lout.writerow([n, name2loc[n]])
id2lout.writerow(['User id', 'Country'])
for i in iter(id2loc):
id2lout.writerow([i, id2loc[i]])
```
#### File: python/classData/scrapeEmail.py
```python
import user
import certificates
import os
import csv
def getFileName(prompt):
while (True):
fname = raw_input("Please enter file name for " + prompt + ' : ')
if os.path.exists(fname):
return fname
else:
print ("file entered does not exist, please retry")
if __name__ == '__main__':
f1name = getFileName('Enter name of the user file')
f1 = csv.reader(open(f1name, 'r'))
f2name = getFileName('Enter the name of the certificates file')
f2 = csv.reader(open(f2name, 'r'))
udict = user.builddict(f1)
cdict = certificates.builddict(f2)
out1 = open('allmail', 'w')
out2 = open('certMail', 'w')
for u in iter(udict):
out1.write(udict[u].email + '\n')
if u in cdict:
out2.write(udict[u].email + '\n')
```
#### File: python/convertfiles/makeIdCountryFile.py
```python
from classData import user
import csv
import utils
import sys
def buildNameCountry(cfile):
"""
Take an open .csv file with format country, username and create a dictionary
indexed by username with value country
"""
retDict = {}
for [country, username] in cfile:
retDict[username] = country
return retDict
if __name__ == '__main__':
if len(sys.argv) > 3:
cFileName = sys.argv[1]
userFileName = sys.argv[2]
lName = sys.argv[3]
else:
cFileName = utils.getFileName('Name of user name to country file : ')
userFileName = utils.getFileName('Name of user file : ')
clName = utils.getNewFileName('Name of class for the output file : ')
cfile = csv.reader(open(cFileName, 'r'))
nameDict = buildNameCountry(cfile)
ufile = csv.reader(open(userFileName, 'r'))
userDict = user.builddict(ufile)
clfName = clName + '_id_country.csv'
outfile = csv.writer(open(clfName, 'w'))
users = userDict.keys()
outfile.writerow(['User id', 'Country'])
for u in users:
userName = userDict[u].username
if (userName in nameDict):
country = nameDict[userDict[u].username]
outfile.writerow([u, country])
else:
print ('unknown userName ' + userName)
```
#### File: python/demographics/demographics.py
```python
from classData import userprofile as prof
import sys
import csv
from classData import ipGeoloc as geo
import utils
def makeCountryDict(fin, filename):
idC = csv.reader(open(filename, 'r'))
return geo.buildIdtoLoc(fin, idC)
def writeAnon(outf, p):
"""
Write an anonymized version of the data into a line of a csv file
This function will create a file with the user profile data, stripped of
identifying information (the user_id, name, and address of the student)
All that is left is the gender, the year of birth, the level of education,
and the self-reported goal for taking the course.
Parameters
----------
outf: csv.writer
File in which to place the anonymized data
p: profile
The full profile information for the student
"""
outf.writerow([p.gender, p.yob, p.ledu, p.goal])
male = female = undisc = 0
countries = {}
ages = {}
edu = {'p_se':0,
'p_oth':0,
'm':0,
'b':0,
'hs':0,
'jhs':0,
'el':0,
'none':0,
'other':0,
'unk':0
}
csv.field_size_limit(1000000000)
clName = sys.argv[1]
ufile = utils.getFileName('user file')
ufin = csv.reader(open(ufile, 'r'))
profile_file = utils.getFileName('student profiles')
infile = csv.reader(open(profile_file, 'r'))
profiles = prof.builddict(infile)
countryFile = utils.getFileName('username and country file')
countryDict = makeCountryDict(ufin, countryFile)
outName = raw_input("enter file name for output; nothing for stdout : ")
if outName == '':
outp = sys.stdout.write
else:
outFile = open(outName, 'w')
outp = outFile.write
outfile = csv.writer(open('anon'+sys.argv[1], 'w'))
students = profiles.keys()
for s in students:
if profiles[s].gender == 'm':
male += 1
elif profiles[s].gender == 'f':
female += 1
else:
undisc += 1
if (s in countryDict):
where = countryDict[s]
else:
where = None
if where == None:
where = 'unknown'
if where not in countries:
countries[where] = 1
else:
countries[where] += 1
ed = profiles[s].ledu
if ed in edu:
edu[ed] += 1
else:
edu['unk'] += 1
yb = profiles[s].yob
if yb.isdigit():
age = 2013 - int(yb)
if age not in ages:
ages[age] = 0
ages[age] += 1
writeAnon(outfile, profiles[s])
outp("Demographic information for " + clName + '\n')
outp( 'Gender distribution:' + '\n')
outp( '\tMale : ' + str(male) + '\n')
outp( '\tFemale : ' + str(female) + '\n')
outp( '\tunspecified : ' + str(undisc)+ '\n')
outp( '\tTotal : ' + str(male + female + undisc) + '\n')
outp(''+ '\n')
outp( 'Reported education level'+ '\n')
outp( '\tPh.D. in science or engineering : ' + str(edu['p_se'])+ '\n')
outp( '\tPh.D. in another field : ' + str(edu['p_oth'])+ '\n')
outp( '\tMaster or professional degree : ' + str(edu['m'])+ '\n')
outp( '\tBachelor degree : ' + str(edu['b'])+ '\n')
outp( '\tSecondary/high school : ' + str(edu['hs'])+ '\n')
outp( '\tJunior high school : ' + str(edu['jhs'])+ '\n')
outp( '\tElementary school : ' + str(edu['el'])+ '\n')
outp( '\tNone : ' + str(edu['none'])+ '\n')
outp( '\tOther : ' + str(edu['other'])+ '\n')
outp(''+ '\n')
outp( 'Participants by age'+ '\n')
ca = sorted(ages.keys())
for a in ca:
outp( '\t' + str(a) + ' : ' + str(ages[a])+ '\n')
outp( ''+ '\n')
outp( 'Participants by country'+ '\n')
ct = sorted(countries.keys())
for c in ct:
outp('\t' + c + ' : ' + str(countries[c])+ '\n')
outp( ''+ '\n')
```
#### File: python/edXDump/buildClassList.py
```python
import glob
import sys
import os
def reduceName(flist):
"""
Return a list of classes whose data is being dealt with
This routine takes the file names supplied by edX and returns a
list of classes that can be used to isolate the data files by
course. The current algorithm simply removes the known institutional
prefix, and the '.mongo' extension; it assumes that it is fed
only the results of the an operation that will give exemplars
of the *.mongo files supplied.
This is something of a hack. As we continue to talk with edX
about naming conventions, this should be one of the few places
that will require change.
"""
clist = []
postSlice = -11
preSlice = 0
for f in flist:
if 'HarvardX-' in f:
preSlice = 9
elif 'Harvardx-' in f:
preSlice = 9
elif 'HarvardKennedySchool' in f:
preSlice = 21
elif 'Harvard_DCE' in f:
preSlice = 12
elif 'Harvard-' in f:
preSlice = 8
elif 'Harvard_' in f:
preSlice = 8
elif 'HARVARD-' in f:
preSlice = 8
elif 'HarvardUniversityNoSpaces-' in f:
preSlice = 26
elif 'HarvardUniversityResearchX-' in f:
preSlice = 27
elif 'HarvardXPLUS-' in f:
preSlice = 13
else:
preSlice = 0
cname = f[preSlice:postSlice]
if cname not in clist:
clist.append(cname)
return clist
def writeList(of, cl):
"""
Writes a class list passed in as cl to the open file of
Saves a classlist to a file. The file must be opened for write
or append. The contents of cl will be written one entry per line
"""
for c in cl:
of.write(c + '\n')
def readList(inf):
"""
Reads a classlist from the open file inf and returns a list
Returns a list of the contents of inf, with each entry in the
list being a single line of the file. Intended to return a
classlist of the type written by writeList, with the newline removed
"""
cl = [];
for line in inf:
cl.append(line[ :-1])
return cl
if __name__ == '__main__':
if len(sys.argv) > 1:
os.chdir(sys.argv[1])
flist = glob.glob('*.mongo')
clist = reduceName(flist)
ofile = open('weeklyClassList', 'w')
writeList(ofile, clist)
```
#### File: python/logs/distillUnknownLabels.py
```python
import json
import glob
def buildList(fname, rdict):
with open (fname, 'r') as fin:
for line in fin:
lstr = json.loads(line)
st = lstr['event_type']
if st not in rdict:
rdict[st] = 1
else:
rdict[st] += 1
return rdict
if __name__ == '__main__':
ukdict = {}
fname = glob.glob('*/unknown*.log')
for n in fname:
ukdict = buildList(n, ukdict)
s = sorted(ukdict.items(), key = lambda(k,v):(v,k))
for i in s:
print i
```
#### File: python/logs/trackingLogParser.py
```python
import re
import csv
import json # ujson is faster!
# list manually maintained
possible_verbs = ["annotation_create"
,"book_view"
#,"email" # unreliable and not used for now
,"forum_close"
,"forum_create"
,"forum_delete"
,"forum_downvote"
,"forum_endorse"
,"forum_flag_abuse"
,"forum_follow"
,"forum_pin"
,"forum_reply"
,"forum_search"
,"forum_unflag_abuse"
,"forum_unfollow"
,"forum_unpin"
,"forum_unvote"
,"forum_update"
,"forum_upvote"
,"forum_view"
,"forum_view_followed_threads"
,"forum_view_inline"
,"forum_view_user_profile"
,"page_view"
,"page_close"
,"poll_answer"
,"poll_view"
,"problem_check"
,"problem_save"
,"problem_view"
,"problem_show_answer"
,"seq_goto"
,"seq_next"
,"seq_prev"
,"video_change_speed"
,"video_hide_transcript"
,"video_pause"
,"video_play"
,"video_seek"
,"video_show_transcript"
,"wiki_view"]
# TODO: ADD THESE TO COURSE AXIS (?)
# top-level tabs are not available in course axes so I've hard-coded them in here;
# needs to be updated for any new courses with custom top-level pages;
# maps the page's last url slug to the tab's display name
top_level_tabs = {
# General
"info" : "Course Info"
,"progress" : "Progress"
,"notes" : "My Notes"
,"open_ended_notifications" : "Open Ended Panel"
,"open_ended_problems" : "Open Ended Problems"
,"peer_grading" : "Peer Grading"
,"instructor" : "Instructor"
,"about" : "About" # where is this page? (/courses/HarvardX/ER22x/2013_Spring/about)
# CB22x/2013_Spring
,"01356a17b5924b17a04b7fc2426a3798" : "Syllabus"
,"57e9991c0d794ff58f7defae3e042e39" : "Advice for Participants"
# ER22x/2013_Spring
,"7a8a540432444be59bd3b6a6ddf725ff" : "Weekly Forum Digest"
,"677191905f71448aab5346a3ed038f87" : "Frequently Asked Questions"
# PH278x/2013_Spring
,"1e7a1201a4214fbaa1d675393c61be5f" : "Syllabus"
,"861a584197fc40a1af55117629d087b8" : "Textbook"
,"782c7c85b9784de5a18199d1c2eaedfa" : "Readings"
,"73f5c222bdc3440bb8dd0423c86e219d" : "Solution Discussion Groups"
# PH207x/2012_Fall
,"datasets" : "Data Sets"
,"faq" : "FAQ"
# CS50x/2012
,"discuss" : "CS50 Discuss"
,"gradebook" : "CS50 Gradebook"
,"spaces" : "CS50 Spaces"
# HLS1x[A|B|C|D]/Copyright
,"Logistics" : "Logistics"
,"Syllabus" : "Syllabus"
,"Resources" : "Resources"
,"Staff" : "Course Staff and Teaching Fellows"
}
# REGEX
# global so not recompiling each time
# all verb regex should be applied to event_type unless otherwise noted
re_hash = re.compile("[0-9a-f]{24,32}")
re_video_play = re.compile("^play_video$")
re_video_pause = re.compile("^pause_video$")
re_video_show_transcript = re.compile("^show_transcript$")
re_video_hide_transcript = re.compile("^hide_transcript$")
re_video_change_speed = re.compile("^speed_change_video$")
re_video_seek = re.compile("^seek_video$")
re_seq_goto = re.compile("^seq_goto$")
re_seq_next = re.compile("^seq_next$")
re_seq_prev = re.compile("^seq_prev$")
re_poll_view = re.compile("poll_question\/[^/]+\/get_state")
re_poll_answer = re.compile("poll_question\/[^/]+\/(?!get_state).+")
re_problem_view = re.compile("problem\/[^/]+\/problem_get$")
re_problem_save_success = re.compile("^save_problem_success$")
re_problem_save_fail = re.compile("^save_problem_fail$")
re_problem_check = re.compile("^problem_check$") # we want the server event b/c contains correctness info
re_problem_check2 = re.compile("^save_problem_check$") # also needs to be a server event
re_problem_show_answer = re.compile("^showanswer$")
re_wiki_view = re.compile("wiki")
#re_email = re.compile("[^@]+@[^@]+\.[^@]+")
re_annotation_create = re.compile("notes\/api\/annotations$") # see POST for object
re_book_view = re.compile("notes\/api\/search$") # used in CB22x; not the normal textbook module
re_book_view_actual = re.compile("^book$")
re_forum_view = re.compile("discussion\/forum$") # view threads
re_forum_view_user_profile = re.compile("discussion\/forum\/users\/[^/]+$")
re_forum_view_followed_threads = re.compile("discussion\/forum\/users\/[^/]+\/followed$")
re_forum_search = re.compile("discussion\/forum\/search$") # also used when selecting from dropdown
re_forum_thread_view_inline = re.compile("discussion\/forum\/[^/]+\/inline$") # view thread from courseware
re_forum_thread_view = re.compile("discussion\/forum\/[^/]+\/threads\/[^/]+$") # retrieve_single_thread (permanent_link_thread)
re_forum_thread_create = re.compile("discussion\/[^/]+\/threads\/create$")
re_forum_thread_close = re.compile("discussion\/threads\/[^/]+/close$")
re_forum_thread_delete = re.compile("discussion\/threads\/[^/]+/delete$")
re_forum_thread_downvote = re.compile("discussion\/threads\/[^/]+/downvote$")
re_forum_thread_flag_abuse = re.compile("discussion\/threads\/[^/]+/flagAbuse$")
re_forum_thread_follow = re.compile("discussion\/threads\/[^/]+/follow$")
re_forum_thread_pin = re.compile("discussion\/threads\/[^/]+/pin$")
re_forum_thread_reply = re.compile("discussion\/threads\/[^/]+/reply$") # create comment
re_forum_thread_unflag_abuse = re.compile("discussion\/threads\/[^/]+/unFlagAbuse$")
re_forum_thread_unfollow = re.compile("discussion\/threads\/[^/]+/unfollow$")
re_forum_thread_unpin = re.compile("discussion\/threads\/[^/]+/unpin$")
re_forum_thread_unvote = re.compile("discussion\/threads\/[^/]+/unvote$")
re_forum_thread_update = re.compile("discussion\/threads\/[^/]+/update$")
re_forum_thread_upvote = re.compile("discussion\/threads\/[^/]+/upvote$")
re_forum_comment_delete = re.compile("discussion\/comments\/[^/]+/delete$")
re_forum_comment_downvote = re.compile("discussion\/comments\/[^/]+/downvote$")
re_forum_comment_endorse = re.compile("discussion\/comments\/[^/]+/endorse$")
re_forum_comment_flag_abuse = re.compile("discussion\/comments\/[^/]+/flagAbuse$")
re_forum_comment_reply = re.compile("discussion\/comments\/[^/]+/reply$")
re_forum_comment_unflag_abuse = re.compile("discussion\/comments\/[^/]+/unFlagAbuse$")
re_forum_comment_unvote = re.compile("discussion\/comments\/[^/]+/unvote$")
re_forum_comment_update = re.compile("discussion\/comments\/[^/]+/update$")
re_forum_comment_upvote = re.compile("discussion\/comments\/[^/]+/upvote$")
re_page_view_courseware = re.compile("courseware\/[^/]+([^/]+)*\/?")
re_page_view_main = re.compile("courses\/[^/]+\/[^/]+\/[^/]+\/[^/]+") # very general, run after everything else
re_page_close = re.compile("^page_close$")
# PARSER OBJECT
# parameterized by a course axis (for identifying objects)
# where axis csv line format is as follows:
# ["index","url_name","category","gformat","start","due","name","path","module_id","data"]
class LogParser:
def __init__(self, axis_csv):
"""
An Parser instance is particular to a course. Initialize by passing
the relevant Course Axis (.CSV), which is used for identifying objects.
"""
# we need to build two axis lookup dicts
self.axis_path_to_courseware_name = {} # used for page_view and page_close
self.axis_url_name_to_courseware_name = {} # used for everything else
current_chapter = ""
current_sequential = ""
current_vertical = ""
row_num = 0
for line in csv.reader(open(axis_csv)):
row_num += 1
if(row_num < 3): continue # first two rows are headers and "course"
url_name = line[1]
category = line[2]
name = line[6]
path = line[7]
courseware_name = ""
if(category == "chapter"):
current_chapter = name
courseware_name = current_chapter
elif(category == "sequential"):
current_sequential = name
courseware_name = "/".join([current_chapter, current_sequential])
elif(category == "vertical"):
current_vertical = name
courseware_name = "/".join([current_chapter, current_sequential, current_vertical])
else:
# category is a resource
# courseware_name looks like: {chapter}/{sequential}/{vertical}/{resource}
# sometimes redundant, but most reliable way of uniquely and meaningfully identifying objects
courseware_name = "/".join([current_chapter, current_sequential, current_vertical, name])
self.axis_path_to_courseware_name[path] = courseware_name
self.axis_url_name_to_courseware_name[url_name] = courseware_name
def parseActivity(self, log_item):
"""
Parses an ExperienceAPI/TinCan-esque Activity (actor, verb, object, result, meta)
from a single JSON-formatted log entry (as a string). Returns a dictionary
if activity can be parsed, None otherwise.
"""
# convert log_item string into dict
log_item_json = json.loads(log_item)
try:
event = str(log_item_json["event"]) # cast to string b/c as dict isn't consistent in logs
event_type = log_item_json["event_type"]
page = log_item_json["page"]
except Exception:
# malformed log_item
return None
try:
e = json.loads(event)
e_get = e["GET"]
e_post = e["POST"]
except ValueError:
# json object couldn't be decoded
# "event" field is truncated/malformed
pass
except KeyError:
# "event" field doesn't have a GET/POST field
pass
except TypeError:
# "event" field is just a string (no key/value pairs)
pass
### VIDEO ###
# TODO: is a specific event logged when the user changes playback speed?
# TODO: add video_duration to course axes and to the meta field here (use YouTube API)
if(re_video_play.search(event_type) or re_video_pause.search(event_type)):
# (note: video_play and video_pause are identical other than the verb name)
# event_type: [browser] "play_video"
# event: "{"id":"i4x-HarvardX-CB22x-video-39c9cccdd02846d998ae5cd894830626","code":"YTOR7kAvl7Y","currentTime":279.088,"speed":"1.0"}"
v = "video_play" if "play_video" == event_type else "video_pause"
o = self.__getCoursewareObject(event.split("video-")[1].split("\"")[0])
r = None
m = {"youtube_id": e["code"]}
try: m["playback_speed"] = e["speed"]
except KeyError: m["playback_speed"] = None
try: m["playback_position_secs"] = e["currentTime"] # sometimes this field is missing
except KeyError: m["playback_position_secs"] = None
elif(re_video_show_transcript.search(event_type) or re_video_hide_transcript.search(event_type)):
# event_type: [browser] "show_transcript" or "hide_transcript"
# event: '{"id":"i4x-HarvardX-CB22x-video-ffdbfae1bbb34cd9a610c88349c350ec","code":"IWUv8ltEJOs","currentTime":0}'
v = "video_show_transcript" if "show_transcript" == event_type else "video_hide_transcript"
o = self.__getCoursewareObject(event.split("video-")[1].split("\"")[0])
r = None
m = {"playback_position_secs": e["currentTime"]}
elif(re_video_change_speed.search(event_type)):
# event_type: [browser] "speed_change_video"
# event: '{"id":"i4x-HarvardX-CB22x-video-a4fc2d96c8354252bb3e405816308828","code":"IERh8MkASDI","current_time":334.4424743652344,"old_speed":"1.50","new_speed":"1.0"}'
v = "video_change_speed"
o = self.__getCoursewareObject(event.split("video-")[1].split("\"")[0])
r = None
m = {
"youtube_id": e["code"],
"playback_position_secs": e["current_time"], # note "current_time" is different than "currentTime"!
"new_playback_speed": e["new_speed"],
"old_playback_speed": e["old_speed"]
}
elif(re_video_seek.search(event_type)):
# event_type: [browser] "seek_video"
# event: '{"id":"i4x-HarvardX-CB22x-video-2b509bcac67b49f9bcc51b85072dcef0","code":"Ct_M-_bP81k","old_time":641.696984,"new_time":709,"type":"onSlideSeek"}'
v = "video_seek"
o = self.__getCoursewareObject(event.split("video-")[1].split("\"")[0])
r = None
m = {
"youtube_id": e["code"],
"new_playback_position_secs": e["new_time"],
"old_playback_position_secs": e["old_time"],
"type": e["type"]
}
### SEQUENTIAL ###
# TODO: give better names to the meta 'new' and 'old' fields (currently just ints)
elif(re_seq_goto.search(event_type) or re_seq_next.search(event_type) or re_seq_prev.search(event_type)):
# (note: seq_goto, seq_prev, and seq_next are identical other than the verb name)
# when a user navigates via sequential, two events are logged...
# event_type: [server] "/courses/HarvardX/ER22x/2013_Spring/modx/i4x://HarvardX/ER22x/sequential/lecture_01/goto_position"
# event_type: [browser] "seq_goto" <-- we use this one
# event: "{"old":1,"new":2,"id":"i4x://HarvardX/CB22x/sequential/fed323e44ab14407907a7f401f1bfa87"}"
v = event_type
try: o = self.__getCoursewareObject(event.split("sequential/")[1].split("\"")[0])
except IndexError:
o = self.__getCoursewareObject(event.split("videosequence/")[1].split("\"")[0]) # used in PH207x
r = None
m = {
"new": e["new"],
"id": e["id"]
}
try: m["old"] = e["old"] # sometimes the "old" field is missing
except KeyError: m["old"] = None
### POLL ###
elif(re_poll_view.search(event_type)):
# (note: polls are often wrapped by conditionals, but we don't log any events for conditionals)
# logged when a poll is loaded onscreen (whether answered or not); sometimes several at once
# event_type: [server] "/courses/HarvardX/ER22x/2013_Spring/modx/i4x://HarvardX/ER22x/poll_question/T13_poll/get_state"
v = "poll_view"
o = self.__getCoursewareObject(event_type.split("/")[-2])
r = None
m = None
elif(re_poll_answer.search(event_type)):
# logged when user clicks a poll answer; "result" field can be 'yes', 'no', or any other answer value
# event_type: [server] "/courses/HarvardX/ER22x/2013_Spring/modx/i4x://HarvardX/ER22x/poll_question/T7_poll/yes"
v = "poll_answer"
split = event_type.split("/")
o = self.__getCoursewareObject(split[-2])
r = split[-1]
m = None
### PROBLEM (CAPA) ###
elif(re_problem_view.search(event_type)):
# logged when a problem is loaded onscreen; often several at once
# event_type: [server] "/courses/HarvardX/CB22x/2013_Spring/modx/i4x://HarvardX/CB22x/problem/bb8a422a718a4788b174220ed0e9c0d7/problem_get"
v = "problem_view"
o = self.__getCoursewareObject(event_type.split("problem/")[1].split("/")[0])
r = None
m = None
elif((re_problem_check.search(event_type) or re_problem_check2.search(event_type)) and log_item_json["event_source"] == "server"):
# when a user clicks 'Check,' three events are logged...
# event_type: [browser] "problem_check"
# event_type: [server] "/courses/HarvardX/CB22x/2013_Spring/modx/i4x://HarvardX/CB22x/problem/249d6f5aa35d4c0e850ece425676eacd/problem_check"
# event_type: [server] "save_problem_check" OR "problem_check" <-- we use this one b/c event field contains correctness info
v = "problem_check"
o = self.__getCoursewareObject(event.split("problem/")[1].split("'")[0])
r = event.split("'")[3] # value of key "success"
m = None
elif(re_problem_save_success.search(event_type) or re_problem_save_fail.search(event_type)):
# when a user clicks 'Save,' three events are logged...
# event_type: [browser] "problem_save"
# event_type: [server] "/courses/HarvardX/CB22x/2013_Spring/modx/i4x://HarvardX/CB22x/problem/4c26fb3fcef14319964d818d73cc013d/problem_save";
# event_type: [server] "save_problem_success" OR "save_problem_fail" <-- we use this one to capture success
v = "problem_save"
o = self.__getCoursewareObject(event.split("problem/")[1].split("'")[0])
r = "success" if event_type.split("problem_")[1] == "save" else "fail"
m = None
elif(re_problem_show_answer.search(event_type)):
# when a user clicks 'Show Answer', three events are logged...
# event_type: [browser] "problem_show"
# event_type: [server] "/courses/HarvardX/CB22x/2013_Spring/modx/i4x://HarvardX/CB22x/problem/2a3fe3442faf4c5ab644768bdad794de/problem_show" <-- we use this
# event_type: [server] "showanswer" or "show_answer"
v = "problem_show_answer"
o = self.__getCoursewareObject(event.split("problem/")[1].split("'")[0])
r = None
m = None
### WIKI ###
# TODO: flesh this out with better object names
elif(re_wiki_view.search(event_type)):
v = "wiki_view"
o_name = event_type
o = {
"object_type" : "url",
"object_name" : o_name
}
r = None
m = None
### EMAIL ###
# TODO: not particularly reliable; leaving out for now
# problematic example: event_type = /courses/HarvardX/CB22x/2013_Spring/submission_history/[email protected]/i4x://HarvardX/CB22x/problem/6f869b8bb1e04ec5b2106afc80708c9b
# elif(re_email.search(event_type)):
# v = "email"
# o_name = event_type.split("/")[-1]
# o = {
# "object_type" : "email",
# "object_name" : o_name
# }
# r = None
# m = None
### ANNOTATION ### (only in CB22x)
# TODO: annocation_edit and annotation_delete -- requires looking at multiple events at once (difficult with current framework)
elif(re_annotation_create.search(event_type)):
# when the user 'Save's an annotation, two events are logged
# event_type: [server] https://courses.edx.org/courses/HarvardX/CB22x/2013_Spring/notes/api/annotations <-- use this one b/c has post info in event field
# event_type: [server] https://courses.edx.org/courses/HarvardX/CB22x/2013_Spring/notes/api/annotations/38650
v = "annotation_create"
try:
s = event.split("uri\\\":\\\"")[1]
uri = s.split("\\\"")[0]
o_name = uri
except Exception:
o_name = "[Unavailable]" # sometimes the annotation text is too long and the rest gets truncated
o = {
"object_type" : "asset_id",
"object_name" : o_name
}
r = None
m = None
### BOOK ###
elif(re_book_view.search(event_type)):
# we infer book view events from the annotation module, which logs the following every page load:
# event_type: [server] "/courses/HarvardX/CB22x/2013_Spring/notes/api/search"
# event: "{"POST": {}, "GET": {"limit": ["0"], "uri": ["/c4x/HarvardX/CB22x/asset/book_sourcebook_herodotus-kyrnos.html"]}}"
v = "book_view"
try:
s = event.split("uri\": [\"")[1]
uri = s.split("\"")[0]
o_name = uri
except Exception:
o_name = "[Unavailable]"
o = {
"object_type" : "asset_id",
"object_name" : o_name
}
r = None
m = None
elif(re_book_view_actual.search(event_type)):
# event_type: [browser] book
# event: '{"type":"gotopage","old":2,"new":249}' or '{"type":"nextpage","new":3}'
v = "book_view"
o = {
"object_type" : "book_page",
"object_name" : e["new"]
}
r = None
m = {e["type"]}
### FORUM - TOP LEVEL ###
# TODO: clean this mess up!!
# TODO: come up with way to give forum hashes human-readable names
elif(re_forum_view.search(event_type)):
v = "forum_view"
o_name = self.__getHashPath(event_type)
if(o_name == ""): o_name = None
o = {
"object_type" : "forum_hash",
"object_name" : o_name
}
r = None
try:
m = {
"sort_key": e_get["sort_key"][0],
"sort_order": e_get["sort_order"][0],
"page": e_get["page"][0]
}
except KeyError:
m = None # sometimes there won't be anything in the "event"
elif(re_forum_view_followed_threads.search(event_type)):
v = "forum_view_followed_threads"
o_name = ("".join(event_type.split("users/")[1:]).split("/")[0]) # user id is the trailing number
o = {
"object_type" : "forum_user_id",
"object_name" : o_name
}
r = None
m = {
"sort_key": e_get["sort_key"][0],
"sort_order": e_get["sort_order"][0],
"page": e_get["page"][0]
}
try: m["group_id"] = e_get["group_id"]
except KeyError: m["group_id"] = None
elif(re_forum_view_user_profile.search(event_type)):
v = "forum_view_user_profile"
o_name = "".join(event_type.split("users/")[1:]) # user id is the trailing number
o = {
"object_type" : "forum_user_id",
"object_name" : o_name
}
r = None
m = None
elif(re_forum_search.search(event_type)):
v = "forum_search"
r = None
try:
m = {
"text": e_get["text"][0].encode("utf-8"),
"sort_key": None,
"sort_order": None,
"page": None
}
except KeyError:
m = {
"sort_key": e_get["sort_key"][0],
"sort_order": e_get["sort_order"][0],
"page": e_get["page"][0]
}
try: m["text"] = e_get["commentable_ids"][0].encode("utf-8"),
except KeyError: m["text"] = None
o_name = m["text"]
o = {
"object_type" : "search_text",
"object_name" : o_name
}
### FORUM - THREADS ###
# (note: thread and comment events are coded separately in case we want to break apart later)
elif(re_forum_thread_create.search(event_type)):
v = "forum_create"
o = self.__getForumObject(event_type)
r = None
m = None # we could put the new thread's text here...
elif(re_forum_thread_close.search(event_type)):
v = "forum_close"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_delete.search(event_type)):
v = "forum_delete"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_downvote.search(event_type)):
v = "forum_downvote"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_flag_abuse.search(event_type)):
v = "forum_flag_abuse"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_follow.search(event_type)):
v = "forum_follow"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_pin.search(event_type)):
v = "forum_pin"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_reply.search(event_type)):
v = "forum_reply"
o = self.__getForumObject(event_type)
r = None
m = None # we could put the new comment's text here...
elif(re_forum_thread_unflag_abuse.search(event_type)):
v = "forum_unflag_abuse"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_unfollow.search(event_type)):
v = "forum_unfollow"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_unpin.search(event_type)):
v = "forum_unpin"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_unvote.search(event_type)):
v = "forum_unvote"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_update.search(event_type)):
v = "forum_update"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_upvote.search(event_type)):
v = "forum_upvote"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_view_inline.search(event_type)):
v = "forum_view_inline"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_thread_view.search(event_type)):
v = "forum_view"
o = self.__getForumObject(event_type)
r = None
m = None
### FORUM - COMMENTS ###
elif(re_forum_comment_delete.search(event_type)):
v = "forum_delete"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_downvote.search(event_type)):
v = "forum_downvote"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_endorse.search(event_type)):
v = "forum_endorse"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_flag_abuse.search(event_type)):
v = "forum_flag_abuse"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_reply.search(event_type)):
v = "forum_reply"
o = self.__getForumObject(event_type)
r = None
m = None # we could put the new comment's text here...
elif(re_forum_comment_unflag_abuse.search(event_type)):
v = "forum_unflag_abuse"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_unvote.search(event_type)):
v = "forum_unvote"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_update.search(event_type)):
v = "forum_update"
o = self.__getForumObject(event_type)
r = None
m = None
elif(re_forum_comment_upvote.search(event_type)):
v = "forum_upvote"
o = self.__getForumObject(event_type)
r = None
m = None
### PAGE ###
# need to make sure these objects can be found in course axes; otherwise, likely just noise/malformed urls
elif(re_page_view_courseware.search(event_type)):
# page_views inside of the courseware look like this...
# event_type: [server] /courses/HarvardX/ER22x/2013_Spring/courseware/9158300eee2e4eb7a51d5a01ee01afdd/c2dfcb30d6d2490e85b83f882544fb0f/
v = "page_view"
path = event_type.split("courseware")[1]
if(path[-1] == "/"): path = path[:-1]
try: o_name = self.axis_path_to_courseware_name[path]
except KeyError: return None # page is noise b/c not in axis
o = {
"object_type" : "courseware_name",
"object_name" : o_name
}
r = None
m = None
elif(re_page_view_main.search(event_type)):
# page_views outside of the courseware (top-level tabs) look like this...
# event_type: [server] /courses/HarvardX/CB22x/2013_Spring/info
v = "page_view"
last_item = event_type.split("/")[-1]
if(last_item == ""): # sometimes has trailing slash
last_item = event_type.split("/")[-2]
try: o_name = top_level_tabs[last_item]
except KeyError: return None # if not in our list of tabs, must be noise
o = {
"object_type" : "tab_name",
"object_name" : o_name
}
r = None
m = None
elif(re_page_close.search(event_type)):
# TODO: how reliable are page_close events within edX and across browsers?
# page: https://courses.edx.org/courses/HarvardX/CB22x/2013_Spring/courseware/74a6ab26887c474eae8a8632600d9618/7b1ef88acd3743eb922d82781a2371cc/
# or sometimes: 'https://www.edx.org/courses/HarvardX/CB22x/2013_Spring/courseware/69569a7536674a3c87d9675ddb48f100/a038464de48d45de8d8032c9b6382508/#'
v = "page_close"
try: path = page.split("courseware")[1]
except IndexError:
# print "page_close IndexError: " + page
return None # usually: https://courses.edx.org/courses/HarvardX/ER22x/2013_Spring/discussion/forum
if(len(path) > 0 and path[-1] == "#"): path = path[:-1]
if(len(path) > 0 and path[-1] == "/"): path = path[:-1]
try: o_name = self.axis_path_to_courseware_name[path]
except KeyError: return None # page is noise b/c not in axis
o = {
"object_type" : "courseware_name",
"object_name" : o_name
}
r = None
m = None
else:
return None
if(v == "page_view" and o_name == None): return None
# unicode is the worst
try: o.update((k, v.encode('utf8', 'replace')) for k, v in o.items())
except Exception: pass # NoneType
activity = {
"actor": log_item_json["username"]
,"verb": v
,"object": o
,"result": r
,"meta": m
,"time": log_item_json["time"]
,"ip": log_item_json["ip"]
,"event": event
,"event_type": event_type
,"page": page
,"agent": log_item_json["agent"]
}
for k, v in activity.items():
try: activity[k] = v.encode('utf8', 'replace')
except Exception: pass # NoneType
return activity
def __getHashPath(self, s):
return "/".join(re_hash.findall(s)).encode("utf-8")
def __getForumObject(self, event_type):
# just returning raw hashes for now
# TODO: how can we return something human-readable?
o = {
"object_type" : "forum_hash",
"object_name" : self.__getHashPath(event_type)
}
return o
def __getCoursewareObject(self, url_name):
# courseware_name format is {chapter}/{sequential}/{vertical}/{resource}
try: o_name = self.axis_url_name_to_courseware_name[url_name]
except KeyError: o_name = "[Axis Lookup Failed: " + url_name + "]"
o = {
"object_type" : "courseware_name",
"object_name" : o_name
}
return o
``` |
{
"source": "jimwaschura/Automation",
"score": 2
} |
#### File: Python/GuiStepscope/connect.py
```python
from pyBitwiseAutomation import StepscopeDevice
class Connect:
IPAddress = str("192.168.1.246") # e.g.
Device = StepscopeDevice()
@staticmethod
def getDevice() -> StepscopeDevice:
return Connect.Device
@staticmethod
def getIPAddress() -> str:
return Connect.IPAddress
@staticmethod
def getConnected() -> bool:
return Connect.getDevice().getIsConnected()
@staticmethod
def setIPAddress(newValue: str):
# print("Connect.setIPAddress(), newValue: [" + newValue + "]")
Connect.IPAddress = newValue
@staticmethod
def Disconnect():
# print("Connect.Disconnect()")
try:
Connect.getDevice().Send("quit\n".encode("utf8"))
Connect.getDevice().Disconnect()
except Exception as e:
print("Problem disconnecting: " + str(e))
@staticmethod
def Connect():
# print("Connect.Connect(), ipAddress=[" + Connect.getIPAddress() + "]")
try:
Connect.getDevice().Connect(Connect.getIPAddress())
except Exception as e:
print("Problem connecting: ", e)
raise e
# EOF
``` |
{
"source": "jimwatt/P3-BehavioralCloning",
"score": 3
} |
#### File: jimwatt/P3-BehavioralCloning/model.py
```python
import csv
import cv2
import numpy as np
#################################################################
# 1. Load and pre-process the data
#################################################################
# Specifiy the directories where we can find data
# data : training data provided by Udacity
# andrew : three laps counter-clockwise around track
# andrewright : one lap clockwise around the track
datadirs = ['../data/','../andrew/','../andrewright/']
# First, get every line from the log files we are going to consider, along with its image directory
samples = []
for datadir in datadirs:
print("Loading data from {} ".format(datadir))
# Read in the csv driving log file
datfile = datadir + 'driving_log.csv'
imgdir = datadir + 'IMG/'
with open(datfile, 'r') as f:
reader = csv.reader(f)
next(reader) # skip the header line
for line in reader:
samples.append((line,imgdir)) # store the line and the image directory
# split the samples into train and validation sets
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
################################################################
# 2. Define the generator for producing data efficiently in memory
################################################################
def generator(samples, batch_size):
numsamples = len(samples)
while True:
shuffle(samples)
for offset in range(0,numsamples,batch_size): # for each batch of samples from the log files
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for line,imgdir in batch_samples: # for each sample in the batch
steering_center = float(line[3]) # grab the steering value
# Augment the data using the left and right side cameras.
# (Using left and right cameras helps the vehicle learn to hold to the center of the road.)
# Create adjusted steering measurements for the side camera images
correction = 0.25 # this is a parameter to tune
steering_left = steering_center + correction
steering_right = steering_center - correction
# read in images from center, left and right cameras
filename = line[0].split('/')[-1]
img_center = cv2.imread(imgdir+filename)
img_center = cv2.cvtColor(img_center, cv2.COLOR_BGR2RGB)
filename = line[1].split('/')[-1]
img_left = cv2.imread(imgdir+filename)
img_left = cv2.cvtColor(img_left, cv2.COLOR_BGR2RGB)
filename = line[2].split('/')[-1]
img_right = cv2.imread(imgdir+filename)
img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2RGB)
# add all images and angles to data set
images.append(img_center)
images.append(img_left)
images.append(img_right)
measurements.append(steering_center)
measurements.append(steering_left)
measurements.append(steering_right)
# Further data augmentation
# For every image add the flipped version of the image
augmented_images, augmented_measurements = [], []
for image,measurement in zip(images,measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
# Convert image list to numpy arrays
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
# Note: for batch of size batch_size, we actually get a batch of size 6*batch_size. 3 camera views x 2 flip
yield shuffle(X_train, y_train)
# Set the batch size
batch_size = 32 # We will actually have 6*batch_size images in the batch because of data augmentation
# Create the training and validation generators
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
#######################################################################
# 3. Define the model network architecture
#######################################################################
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D
model = Sequential()
# This layer simply performs image Normalization
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
# This layer crops the image
model.add(Cropping2D(cropping=((70,25),(0,0))))
# LeNet Network (CNNs with max-pooling followed by densely connected layers)
model.add(Convolution2D(6,5,5,activation="relu"))
model.add(MaxPooling2D())
model.add(Convolution2D(6,5,5,activation="relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
###########################################################################
# 4. Train the network using the data.
###########################################################################
print("training ...")
model.compile(loss='mse', optimizer='adam') # use the Mean-square error cost, and the adam optimizer
model.fit_generator(generator=train_generator, samples_per_epoch=len(train_samples)*6,
validation_data=validation_generator, nb_val_samples=len(validation_samples)*6, nb_epoch=3)
###########################################################################
# 5. Save the model.
###########################################################################
model.save('model.h5')
``` |
{
"source": "jimwatt/P4-advancedlanelines",
"score": 3
} |
#### File: jimwatt/P4-advancedlanelines/calibrate.py
```python
def calibrateCamera():
print('Calibrating camera ...')
nx = 9
ny = 6
chessimgs = glob.glob('./camera_cal/*.jpg')
# termination criteria for sub-pixel accuracy in finding chessboard corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
objpoints = []
imgpoints = []
for chessimg in chessimgs:
img = cv2.imread(chessimg)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
img_size = (gray.shape[1], gray.shape[0])
if ret == True:
# If we found corners, draw them!
# cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
# cv2.imshow('img', img)
objpoints.append(objp)
cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners)
return cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
```
#### File: jimwatt/P4-advancedlanelines/lanelines.py
```python
import numpy as np
import cv2
import matplotlib.pyplot as plt
saveplots = False
#################################################################################'
# Functionality for finding lanelines
#################################################################################
# Given an image, return the image with all pixels set to zero except those in teh specified window cell.
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
# Given a y value in pixels, a quadtratic fit in pixels, and scale factors xmpp, and ympp, compute radius of curvature in meters
def calculateCurveRad(y,fit,xmpp,ympp):
A = fit[0]*xmpp/(ympp*ympp)
B = fit[1]*xmpp/ympp
return (1.0 + ((2.0*A*y + B)**2)**(1.5)) / np.abs(2*A)
# Given the binary warped image containing pixels corresponding to the lane lines, compute a polynomial fit to the lanelines, and draw them on the pristine image.
def findLaneLines(pristine,binary_warped, Minv):
# Take a histogram of the (bottom) half of the image
histogram = np.sum(binary_warped, axis=0)
if(saveplots):
plt.figure(80)
plt.plot(histogram)
plt.title("Histogram of Laneline Pixels")
plt.savefig("output_images/histogram.png")
# plt.close()
# Create an output image to draw on and visualize the result
out_img = np.uint8(np.dstack((binary_warped, binary_warped, binary_warped))*255)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 10
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
lpoly = np.poly1d(left_fit)
right_fit = np.polyfit(righty, rightx, 2)
rpoly = np.poly1d(right_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = lpoly(ploty)
right_fitx = rpoly(ploty)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
if(saveplots):
plt.figure(30)
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.savefig("output_images/searching.png")
# plt.close()
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (pristine.shape[1], pristine.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(pristine, 1, newwarp, 0.3, 0)
###############################################################################S
# Determine radius of curvature, and offset from center, and write on the image
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = calculateCurveRad(y_eval,left_fit,xm_per_pix,ym_per_pix)
right_curverad = calculateCurveRad(y_eval,right_fit,xm_per_pix,ym_per_pix)
# Compute offset from lane center
offcenter = -( 0.5*(lpoly(y_eval)+rpoly(y_eval)) - 0.5*binary_warped.shape[1] ) * xm_per_pix
cv2.putText(img=result,text="Left : %.1f km" % (left_curverad/1000.0), org=(20,110), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5, color=(255,255,255), thickness=3)
cv2.putText(img=result,text="Right : %.1f km" % (right_curverad/1000.0), org=(20,170), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5, color=(255,255,255), thickness=3)
cv2.putText(img=result,text="Offset : %d cm" % np.int(offcenter*100.0), org=(20,50), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5, color=(255,255,255), thickness=3)
return(result)
```
#### File: jimwatt/P4-advancedlanelines/perspective.py
```python
import cv2
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
saveplots = True
def getPerspectiveTransforms():
imgrect = mpimg.imread("./test_images/straight_lines1.jpg")
# Hard-code these handselected points from the road
warpedrectvertices = np.array([
[305,650],
[525, 500],
[760, 500],
[1000,650]], dtype= np.float32)
# This is where those hand-selected points defined above ought to be in our bird's-eye view
offsetx = 160
offsety = 60
imx = imgrect.shape[1]
imy = imgrect.shape[0]
rectvertices = np.array([
[offsetx, imy-offsety],
[offsetx, offsety],
[imx-offsetx, offsety],
[imx-offsetx, imy-offsety]], dtype = np.float32)
print("warpedrectvertices : ")
print(warpedrectvertices)
print("rectvertices : ")
print(rectvertices)
# Get the perspective transform and its inverse
M = cv2.getPerspectiveTransform(warpedrectvertices, rectvertices)
Minv = cv2.getPerspectiveTransform(rectvertices, warpedrectvertices)
# Plot the perspecetive transform
if(saveplots):
warped = cv2.warpPerspective(imgrect, M, (imx, imy))
cv2.circle(img=imgrect,center=(warpedrectvertices[0,0],warpedrectvertices[0,1]), radius=15, color=(255,0,0), thickness=-1)
cv2.circle(img=imgrect,center=(warpedrectvertices[1,0],warpedrectvertices[1,1]), radius=15, color=(0,255,0), thickness=-1)
cv2.circle(img=imgrect,center=(warpedrectvertices[2,0],warpedrectvertices[2,1]), radius=15, color=(0,0,255), thickness=-1)
cv2.circle(img=imgrect,center=(warpedrectvertices[3,0],warpedrectvertices[3,1]), radius=15, color=(255,255,0), thickness=-1)
cv2.circle(img=warped,center=(rectvertices[0,0],rectvertices[0,1]), radius=15, color=(255,0,0), thickness=-1)
cv2.circle(img=warped,center=(rectvertices[1,0],rectvertices[1,1]), radius=15, color=(0,255,0), thickness=-1)
cv2.circle(img=warped,center=(rectvertices[2,0],rectvertices[2,1]), radius=15, color=(0,0,255), thickness=-1)
cv2.circle(img=warped,center=(rectvertices[3,0],rectvertices[3,1]), radius=15, color=(255,255,0), thickness=-1)
plt.figure(1000)
plt.imshow(imgrect)
plt.title('Perspective Image')
plt.savefig('output_images/perspective.png')
plt.close()
plt.figure(1001)
plt.imshow(warped)
plt.title('Bird\'s-Eye Image')
plt.savefig('output_images/birdseye.png')
plt.close()
return M,Minv
``` |
{
"source": "jimwayneyeh/web3py-example",
"score": 2
} |
#### File: jimwayneyeh/web3py-example/read_block.py
```python
import logging.config
import yaml
import json
import datetime
from auto.infura_ropsten import w3
with open('logging.yaml', 'r') as fd:
config = yaml.safe_load(fd.read())
logging.config.dictConfig(config)
class EthereumBlockReader:
def __init__(self, w3=None, logger=None):
self.logger = logger or logging.getLogger(__name__)
if w3 is None:
raise ValueError('No web3 instance is given.')
self.w3 = w3
'''
Get blocks mined in the last 1 minute.
'''
def get_recent_blocks(self):
now = datetime.datetime.now()
minute_ago = datetime.datetime.now() - datetime.timedelta(minutes=1)
self.logger.debug(
'Attempt to collect transactions between %s ~ %s.', minute_ago, now)
# Traverse the blockchain from the latest block and collect blocks within
# the past 1 minute.
blocks = list()
block_num = self.w3.eth.blockNumber
while 1:
block = self.w3.eth.getBlock(block_num)
block_time = datetime.datetime.fromtimestamp(block['timestamp'])
if block_time < minute_ago:
self.logger.debug('Block at %s is too old.', block_time)
break
self.logger.info('Collect block #%s at %s', block_num, block_time)
blocks.append(block)
block_num -= 1
return blocks
def get_recent_transactions(self, blocks=None):
if blocks is None:
blocks = self.get_recent_blocks()
transactions = list()
for block in blocks:
self.logger.debug('Extract transactions from block #%s...', block['number'])
for txhash in block['transactions']:
tx = self.w3.eth.getTransaction(txhash)
transactions.append(tx)
return transactions
if (__name__ == '__main__'):
logger = logging.getLogger(__name__)
block_reader = EthereumBlockReader(w3)
# Print all of the transactions mined in the recent 1 minute.
transactions = block_reader.get_recent_transactions()
for transaction in transactions:
logger.debug('Transaction: %s', transaction)
'''
Read a specific transaction by its transaction hash.
'''
tx = w3.eth.getTransaction('0xc3280e863f2d7cb2362ce70dfe03d4552768d36d612892c152fe6dd5761399ba')
logger.debug('Specific transaction: %s', tx)
# Create the contract instance in order to read the input of transaction.
# Reference: https://ethereum.stackexchange.com/questions/20897
with open("read_block.abi.json") as f:
contract_abi = json.load(f)
contract_addr = w3.toChecksumAddress('0x145b234edc704f5906d2ad0a51908ed091323098')
my_contract = w3.eth.contract(address=contract_addr, abi=contract_abi)
# Parse the input of the transaction.
decoded_input = my_contract.decode_function_input(tx['input'])
logger.debug('input: %s', decoded_input)
# The return is a class which denotes the function. For example, here it is
# class 'web3.utils.datatypes.startCrowdsale'.
func_executed = decoded_input[0]
# We can list the available members through inspect.getmembers()
# Reference: https://stackoverflow.com/questions/1911281
logger.debug('function name: %s', func_executed.fn_name)
``` |
{
"source": "jimwinquist/algorithms",
"score": 4
} |
#### File: algorithms/sort/merge_sort.py
```python
def merge_sort(arr):
if arr is None or len(arr) < 2:
return arr
aux = arr[:]
return sort(arr, aux, 0, len(arr) - 1)
def sort(arr, aux, lo, hi):
if lo < hi:
mid = lo + (hi - lo) / 2
sort(arr, aux, lo, mid)
sort(arr, aux, mid + 1, hi)
merge(arr, aux, lo, mid, hi)
return arr
def merge(arr, aux, lo, mid, hi):
i = lo
j = mid + 1
for k in range(lo, hi + 1):
aux[k] = arr[k]
for k in range(lo, hi+1):
if i > mid:
arr[k] = aux[j]
j += 1
elif j > hi:
arr[k] = aux[i]
i += 1
elif aux[j] < aux[i]:
arr[k] = aux[j]
j += 1
else:
arr[k] = aux[i]
i += 1
if __name__ == "__main__":
assert(merge_sort([]) == [])
assert(merge_sort(None) is None)
assert(merge_sort([1]) == [1])
assert(merge_sort([4, 0, 2, 5, 1, 3]) == [0, 1, 2, 3, 4, 5])
assert(merge_sort([7, 4, 6, 5, 3, 5, 4, 2, 4, 1]) == [1, 2, 3, 4, 4, 4, 5, 5, 6, 7])
``` |
{
"source": "jimwsplk/flowmill-collector",
"score": 3
} |
#### File: collector/kernel/bpf_preprocess.py
```python
import os
import sys
from pcpp.preprocessor import Preprocessor, OutputDirective, Action
import argparse
#
# Pass-through preprocessor to improve BCC's parsing
#
class PassThruPreprocessor(Preprocessor):
def __init__(self,lexer=None):
super(PassThruPreprocessor, self).__init__(lexer)
self.passthrough = False
self.current_file_line_id = 0
self.file_line_table = []
def write_debug_info(self, debugfile):
lineid = 0
debugfile.write("int g_bpf_debug_line_info[] = {\n")
for x in self.file_line_table:
debugfile.write(" {0},\n".format(x["line"]))
lineid += 1
debugfile.write("};\n")
lineid = 0
debugfile.write("const char *g_bpf_debug_file_info[] = {\n")
for x in self.file_line_table:
debugfile.write(" \"{0}\",\n".format(x["file"]))
lineid += 1
debugfile.write("};\n")
def token(self):
"""Method to return individual tokens, overriding custom macros"""
tok = super(PassThruPreprocessor, self).token()
if tok and tok.value=="__FILELINEID__":
tok.value=str(self.current_file_line_id)
self.file_line_table.append({ "file": os.path.basename(self.macros['__FILE__'].value[0].value.strip("\"")), "line": tok.lineno })
self.current_file_line_id += 1
return tok
def on_include_not_found(self, is_malformed, is_system_include, curdir, includepath):
# If includes are not found, complain
sys.stderr.write("Unable to find include file: "+str(includepath)+"\n")
sys.exit(1)
def on_unknown_macro_in_defined_expr(self, tok):
# Pass through as expanded as possible, unexpanded without complaining if not possible
return None
def on_unknown_macro_in_expr(self, tok):
# Pass through as expanded as possible, unexpanded without complaining if not possible
return None
def on_directive_handle(self, directive, toks, ifpassthru, precedingtoks):
if directive.value=="pragma":
if len(toks)>=1 and toks[0].type=="CPP_ID" and toks[0].value=="passthrough":
if len(toks)==3 and toks[1].type=="CPP_WS" and toks[2].type=="CPP_ID" and toks[2].value=="on":
# Turn on passthrough
self.passthrough = True
raise OutputDirective(Action.IgnoreAndRemove)
elif len(toks)==3 and toks[1].type=="CPP_WS" and toks[2].type=="CPP_ID" and toks[2].value=="off":
# Turn on passthrough
self.passthrough = False
raise OutputDirective(Action.IgnoreAndRemove)
sys.stderr.write("Invalid passthrough pragma\n")
sys.exit(1)
if self.passthrough:
# Pass through without execution EVERYTHING if we have used pragma passthrough
raise OutputDirective(Action.IgnoreAndPassThrough)
if directive.value=="define":
# Process and ALSO pass through as well
return None
if directive.value=="include":
if toks[0].type=="CPP_STRING":
# Process #include "" normally
return True
else:
# Always pass through #include<>
raise OutputDirective(Action.IgnoreAndPassThrough)
# Attempt to process all other directives
return True
def on_directive_unknown(self, directive, toks, ifpassthru, precedingtoks):
raise OutputDirective(Action.IgnoreAndPassThrough)
#
# Parse the BPF
#
parser = argparse.ArgumentParser(description='Preprocess BPF')
parser.add_argument("infile", type=str, help="input file")
parser.add_argument("outfile", type=str, help="output file")
parser.add_argument("debugfile", type=str, help="output debug information file")
parser.add_argument("-I", dest="includedirs", metavar="includedir", default=[], type=str, action="append", help="include directory")
parser.add_argument("-D", dest="definitions", metavar="define", default=[], type=str, action="append", help="definitions")
args = parser.parse_args()
pp = PassThruPreprocessor()
if args.infile == "-":
infile = sys.stdin
else:
infile = open(args.infile, "rt")
if args.outfile == "-":
outfile = sys.stdout
else:
outfile = open(args.outfile, "wt")
if args.debugfile == "-":
print("Can't write debug information to stdout")
sys.exit(1)
else:
debugfile = open(args.debugfile, "wt")
for includedir in args.includedirs:
pp.add_path(includedir)
extra_defs = ""
for definition in args.definitions:
d = definition.split("=",1)
if len(d)==1:
extra_defs += "#define {}\n".format(d[0])
elif len(d)==2:
extra_defs += "#define {} {}\n".format(d[0], d[1])
all_source = extra_defs + infile.read()
pp.parse(all_source, infile.name)
pp.write(outfile)
pp.write_debug_info(debugfile)
```
#### File: bpf_src/tcp-processor/tcp-processor.py
```python
from bcc import BPF #, DEBUG_PREPROCESSOR
import io, os, sys
from pcpp.preprocessor import Preprocessor, OutputDirective, Action
import ctypes as ct
if "--debug" in sys.argv:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
#
# Wrapper for BCC that prints useful diagnostics
#
class BPFWrapper:
def __init__(self, bpf):
self._bpf = bpf
def attach_kprobe(self, event=b"", event_off=0, fn_name=b"", event_re=b""):
print("attach_kprobe: event={} fn_name={}".format(event, fn_name))
try:
self._bpf.attach_kprobe(event,event_off,fn_name,event_re)
except:
print(" failed for {}".format(event))
return
print(" succeeded for {}".format(event))
def attach_kprobe_all(self, events, event_off=0, fn_name=b"", event_re=b""):
print("attach_kprobe_all: events={} fn_name={}".format(str(events), fn_name))
for event in events:
try:
self._bpf.attach_kprobe(event,event_off,fn_name,event_re)
except:
print(" failed for {}".format(event))
continue
print(" succeeded for {}".format(event))
def attach_kretprobe(self, event=b"", fn_name=b"", event_re=b"", maxactive=0):
print("attach_kretprobe: event={} fn_name={}".format(event, fn_name))
try:
self._bpf.attach_kretprobe(event,fn_name,event_re,maxactive)
except:
print(" failed for {}".format(event))
return
print(" succeeded for {}".format(event))
def attach_kretprobe_all(self, events, fn_name=b"", event_re=b"", maxactive=0):
print("attach_kretprobe_all: events={} fn_name={}".format(str(events), fn_name))
for event in events:
try:
self._bpf.attach_kretprobe(event,fn_name,event_re, maxactive)
except:
print(" failed for {}".format(event))
continue
print(" succeeded for {}".format(event))
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
if name in self.__class__.__dict__:
return self.__class__.__dict__[name]
return getattr(self._bpf, name)
def __getitem__(self, key):
return self._bpf[key]
#
# Pass-through preprocessor to improve BCC's parsing
#
class PassThruPreprocessor(Preprocessor):
def __init__(self,lexer=None):
super(PassThruPreprocessor, self).__init__(lexer)
self.passthrough = False
self.current_file_line_id = 0
self.file_line_table = []
def write_debug_info(self, debugfile):
lineid = 0
debugfile.write("int g_bpf_debug_line_info[] = {\n")
for x in self.file_line_table:
debugfile.write(" {0},\n".format(x["line"]))
lineid += 1
debugfile.write("};\n")
lineid = 0
debugfile.write("const char *g_bpf_debug_file_info[] = {\n")
for x in self.file_line_table:
debugfile.write(" {0},\n".format(x["file"]))
lineid += 1
debugfile.write("};\n")
def token(self):
"""Method to return individual tokens, overriding custom macros"""
tok = super(PassThruPreprocessor, self).token()
if tok and tok.value=="__FILELINEID__":
tok.value=str(self.current_file_line_id)
self.file_line_table.append({ "file": self.macros['__FILE__'].value[0].value, "line": tok.lineno })
self.current_file_line_id += 1
return tok
def on_include_not_found(self, is_system_include, curdir, includepath):
# If includes are not found, complain
sys.stderr.write("Unable to find include file: "+str(includepath)+"\n")
sys.exit(1)
def on_unknown_macro_in_defined_expr(self, tok):
# Pass through as expanded as possible, unexpanded without complaining if not possible
return None
def on_unknown_macro_in_expr(self, tok):
# Pass through as expanded as possible, unexpanded without complaining if not possible
return None
def on_directive_handle(self, directive, toks, ifpassthru, precedingtoks):
if directive.value=="pragma":
if len(toks)>=1 and toks[0].type=="CPP_ID" and toks[0].value=="passthrough":
if len(toks)==3 and toks[1].type=="CPP_WS" and toks[2].type=="CPP_ID" and toks[2].value=="on":
# Turn on passthrough
self.passthrough = True
raise OutputDirective(Action.IgnoreAndRemove)
elif len(toks)==3 and toks[1].type=="CPP_WS" and toks[2].type=="CPP_ID" and toks[2].value=="off":
# Turn on passthrough
self.passthrough = False
raise OutputDirective(Action.IgnoreAndRemove)
sys.stderr.write("Invalid passthrough pragma\n")
sys.exit(1)
if self.passthrough:
# Pass through without execution EVERYTHING if we have used pragma passthrough
raise OutputDirective(Action.IgnoreAndPassThrough)
if directive.value=="define":
# Process and ALSO pass through as well
return None
if directive.value=="include":
if toks[0].type=="CPP_STRING":
# Process #include "" normally
return True
else:
# Always pass through #include<>
raise OutputDirective(Action.IgnoreAndPassThrough)
# Attempt to process all other directives
return True
def on_directive_unknown(self, directive, toks, ifpassthru, precedingtoks):
raise OutputDirective(Action.IgnoreAndPassThrough)
#
# Parse the BPF
#
pp = PassThruPreprocessor()
instr = "#define DEBUG_LOG 1\n#define ENABLE_TCP_DATA_STREAM 1\n#define _PROCESSING_BPF 1\n#define STANDALONE_TCP_PROCESSOR 1\n" + open("./bpf_tcp_processor.c","rt").read()
outfile = io.StringIO()
pp.add_path("../../../../src/")
pp.add_path("../../../../")
pp.parse(instr, source = "./bpf_tcp_processor.c")
pp.write(outfile)
preprocessed_bpf_text = outfile.getvalue()
# print("preproc: "+preprocessed_bpf_text)
print("debug info:")
pp.write_debug_info(sys.stdout)
b = BPFWrapper(BPF(text=preprocessed_bpf_text))
# Set up tail calls (mirroring bpf_handler.cc)
tail_calls = b.get_table("tail_calls")
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_ON_UDP_SEND_SKB__2"].value[0].value))] = b.load_func("on_udp_send_skb__2", BPF.KPROBE)
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_ON_UDP_V6_SEND_SKB__2"].value[0].value))] = b.load_func("on_udp_v6_send_skb__2", BPF.KPROBE)
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_ON_IP_SEND_SKB__2"].value[0].value))] = b.load_func("on_ip_send_skb__2", BPF.KPROBE)
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_ON_IP6_SEND_SKB__2"].value[0].value))] = b.load_func("on_ip6_send_skb__2", BPF.KPROBE)
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_HANDLE_RECEIVE_UDP_SKB"].value[0].value))] = b.load_func("handle_receive_udp_skb", BPF.KPROBE)
#tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_HANDLE_RECEIVE_UDP_SKB__2"].value[0].value))] = b.load_func("handle_receive_udp_skb__2", BPF.KPROBE)
tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_CONTINUE_TCP_SENDMSG"].value[0].value))] = b.load_func("continue_tcp_sendmsg", BPF.KPROBE)
tail_calls[ct.c_int(int(pp.macros["TAIL_CALL_CONTINUE_TCP_RECVMSG"].value[0].value))] = b.load_func("continue_tcp_recvmsg", BPF.KPROBE)
#
# Attach probes
#
# Create/destroy
b.attach_kprobe(event="tcp_init_sock", fn_name="handle_kprobe__tcp_init_sock")
b.attach_kprobe(event="security_sk_free", fn_name="handle_kprobe__security_sk_free")
# Accept
b.attach_kprobe(event="inet_csk_accept", fn_name="handle_kprobe__inet_csk_accept")
b.attach_kretprobe(event="inet_csk_accept", fn_name="handle_kretprobe__inet_csk_accept")
# Send
b.attach_kprobe(event="tcp_sendmsg", fn_name="handle_kprobe__tcp_sendmsg")
b.attach_kretprobe(event="tcp_sendmsg", fn_name="handle_kretprobe__tcp_sendmsg")
# Receive
b.attach_kprobe(event="tcp_recvmsg", fn_name="handle_kprobe__tcp_recvmsg")
b.attach_kretprobe(event="tcp_recvmsg", fn_name="handle_kretprobe__tcp_recvmsg")
#
# Print trace output
#
class TCPEventHTTPResponse(ct.Structure):
_fields_ = [
("code", ct.c_ushort),
("__pad0", ct.c_uint8*6),
("latency", ct.c_ulonglong)
]
class TCPEventTCPData(ct.Structure):
_fields_ = [
("length", ct.c_uint),
("streamtype", ct.c_uint8),
("is_server", ct.c_uint8),
("__pad0", ct.c_uint16),
("offset", ct.c_ulonglong)
]
class TCPEventData(ct.Union):
_fields_ = [
("http_response", TCPEventHTTPResponse),
("tcp_data", TCPEventTCPData),
("__pad0", ct.c_ulonglong),
("__pad1", ct.c_ulonglong)
]
class TCPEvent(ct.Structure):
_anonymous_ = ("u",)
_fields_ = [
("type", ct.c_uint),
("pid", ct.c_uint),
("ts", ct.c_ulonglong),
("sk", ct.c_ulonglong),
("u", TCPEventData)
]
class TCPDataHeader(ct.Structure):
_pack_ = 1
_fields_ = [
("length", ct.c_ulonglong),
]
class TCPDataMessage(ct.Structure):
_pack_ = 1
_fields_ = [
("hdr", TCPDataHeader),
("data", ct.c_ubyte * 256)
]
print("sizeof(TCPDataHeader) = {}".format(ct.sizeof(TCPDataHeader)))
print("sizeof(TCPDataMessage) = {}".format(ct.sizeof(TCPDataMessage)))
def print_tcp_event(cpu, data, size):
assert size >= ct.sizeof(TCPEvent)
event = ct.cast(data, ct.POINTER(TCPEvent)).contents
if event.type == 0:
print(">>> TCP_EVENT_TYPE_HTTP_RESPONSE(pid=%u, ts=%u, sk=0x%X, code=%u, latency=%u)" % (event.pid, event.ts, event.sk, event.http_response.code, event.http_response.latency))
elif event.type == 1:
print(">>> TCP_EVENT_TYPE_TCP_DATA(pid=%u, ts=%u, sk=0x%X, length=%u, streamtype=%u, is_server=%u, offset=%u)" % (event.pid, event.ts, event.sk, event.tcp_data.length, event.tcp_data.streamtype, event.tcp_data.is_server, event.tcp_data.offset))
else:
print(">>> UNKNOWN TCP EVENT")
def process_data_channel(cpu, data, size):
assert size >= ct.sizeof(TCPDataHeader)
header = ct.cast(data, ct.POINTER(TCPDataHeader)).contents
datalen = header.length
out = "### DATA(size: {}, datalen: {}): ".format(size, datalen)
print(out)
assert size >= ct.sizeof(TCPDataHeader) + datalen
message = ct.cast(data, ct.POINTER(TCPDataMessage)).contents
out = ct.cast(message.data, ct.c_char_p).value[0:datalen]
print(out)
b[b"tcp_events"].open_perf_buffer(print_tcp_event)
b[b"data_channel"].open_perf_buffer(process_data_channel)
while 1:
try:
had_message = False
fields = b.trace_fields(True)
if fields:
(task, pid, cpu, flags, ts, msg) = fields
if msg:
had_message=True
print(msg.decode('latin-1'))
if not had_message:
# Prefer to print messages instead of polling to speed through them
b.perf_buffer_poll(10)
except ValueError:
continue
```
#### File: kernel/recorder/mock_authz_server.py
```python
import http.server
class RequestHandler (http.server.BaseHTTPRequestHandler):
def do_GET(self):
payload = b'{"token":"XYZ","issuedAtS":"1590626143","expirationS":"1590626743"}'
self.send_response(200)
self.send_header("Content-Length", len(payload))
self.end_headers()
self.wfile.write(payload)
server_address = ('', 8000)
httpd = http.server.HTTPServer(server_address, RequestHandler)
httpd.serve_forever()
``` |
{
"source": "jimwwalker/perfrunner",
"score": 2
} |
#### File: cbagent/collectors/analytics.py
```python
from typing import List
from cbagent.collectors import Collector
class AnalyticsStats(Collector):
COLLECTOR = "analytics"
PORT = 9110
def __init__(self, settings, servers: List[str]):
super().__init__(settings)
self.servers = servers
def update_metadata(self):
self.mc.add_cluster()
for server in self.servers:
self.mc.add_server(server)
def get_stats(self, server: str) -> dict:
return self.get_http(path='/analytics/node/stats',
server=server,
port=self.PORT)
def sample(self):
for server in self.servers:
stats = self.get_stats(server)
self.update_metric_metadata(stats.keys(), server=server)
self.store.append(stats,
cluster=self.cluster,
server=server,
collector=self.COLLECTOR)
```
#### File: cbagent/collectors/secondary_debugstats.py
```python
from typing import Iterator
from cbagent.collectors import Collector
class SecondaryDebugStats(Collector):
COLLECTOR = "secondary_debugstats"
METRICS = (
"memory_used",
"memory_used_queue",
"memory_used_storage",
"num_connections",
)
PORT = 9102
def __init__(self, settings):
super().__init__(settings)
self.index_node = settings.index_node
def parse_timings(self, value: str) -> float:
# timings attribute has 3 space separated values - count, sum, sum of
# square.
stats = [int(x) for x in value.split(" ")]
if len(stats) == 3 and stats[0] > 0:
return stats[1] / stats[0] # Compute average value as sum/count.
return 0
def get_stats(self) -> dict:
return self.get_http(path='/stats',
server=self.index_node,
port=self.PORT)
def _get_secondary_debugstats(self, bucket=None, index=None) -> dict:
stats = self.get_stats()
samples = dict()
for metric in self.METRICS:
_metric = bucket and "{}:{}".format(bucket, metric) or metric
_metric = index and "{}:{}:{}".format(bucket, index, metric) or _metric
if _metric in stats:
value = stats[_metric]
if metric.startswith("timings/"):
value = self.parse_timings(value)
samples[metric.replace('/', '_')] = value
return samples
def sample(self):
stats = self._get_secondary_debugstats()
if stats:
self.update_metric_metadata(self.METRICS)
self.store.append(stats, cluster=self.cluster,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
class SecondaryDebugStatsBucket(SecondaryDebugStats):
COLLECTOR = "secondary_debugstats_bucket"
METRICS = (
"mutation_queue_size",
"num_nonalign_ts",
"ts_queue_size",
)
def sample(self):
for bucket in self.get_buckets():
stats = self._get_secondary_debugstats(bucket=bucket)
if stats:
self.update_metric_metadata(self.METRICS, bucket=bucket)
self.store.append(stats, cluster=self.cluster, bucket=bucket,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
for bucket in self.get_buckets():
self.mc.add_bucket(bucket)
class SecondaryDebugStatsIndex(SecondaryDebugStats):
COLLECTOR = "secondary_debugstats_index"
METRICS = (
"avg_scan_latency",
"avg_scan_wait_latency",
"avg_ts_interval",
"avg_ts_items_count",
"disk_store_duration",
"flush_queue_size",
"num_compactions",
"num_completed_requests",
"num_rows_returned",
"num_rows_scanned_aggr",
"scan_cache_hit_aggr",
"timings/dcp_getseqs",
"timings/storage_commit",
"timings/storage_del",
"timings/storage_get",
"timings/storage_set",
"timings/storage_snapshot_create",
)
def get_all_indexes(self) -> Iterator:
for index in self.indexes:
yield index, self.buckets[0]
def sample(self):
for index, bucket in self.get_all_indexes():
stats = self._get_secondary_debugstats(bucket=bucket, index=index)
if stats:
_index = "{}.{}".format(bucket, index)
self.update_metric_metadata(self.METRICS, index=_index)
self.store.append(stats,
cluster=self.cluster,
index=_index,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
for index, bucket in self.get_all_indexes():
self.mc.add_index("{}.{}".format(bucket, index))
```
#### File: cbagent/collectors/secondary_storage_stats.py
```python
from cbagent.collectors import Collector
class SecondaryStorageStats(Collector):
COLLECTOR = "secondary_storage_stats"
METRICS = (
"allocated",
"bytes_incoming",
"bytes_written",
"cache_hit_ratio",
"cache_hits",
"cache_misses",
"freed",
"lss_fragmentation",
"memory_size",
"num_cached_pages",
"num_pages",
"num_pages_swapin",
"num_pages_swapout",
"rcache_hit_ratio",
"rcache_hits",
"rcache_misses",
"reclaimed",
"reclaim_pending",
"resident_ratio",
"write_amp",
"mvcc_purge_ratio",
"memory_quota",
"lss_blk_read_bs",
"lss_blk_gc_reads_bs",
"lss_blk_rdr_reads_bs",
)
def __init__(self, settings):
super().__init__(settings)
self.index_node = settings.index_node
def get_all_indexes(self):
for index in self.indexes:
yield index, self.buckets[0]
def _get_secondary_storage_stats(self):
server = self.index_node
port = '9102'
uri = "/stats/storage"
samples = self.get_http(path=uri, server=server, port=port)
index_stats = dict()
for sample in samples:
stats = dict()
if "Index" not in sample:
continue
index = sample["Index"].split(":")[1]
for store in sample["Stats"]:
for metric, value in sample["Stats"][store].items():
if metric in self.METRICS:
key = store + "_" + metric
stats[key] = value
index_stats[index] = stats
return index_stats
def sample(self):
index_stats = self._get_secondary_storage_stats()
if index_stats:
for index, bucket in self.get_all_indexes():
if index in index_stats and index_stats[index]:
stats = index_stats[index]
index1 = "{}.{}".format(bucket, index)
self.update_metric_metadata(stats.keys(), index=index1)
self.store.append(stats, cluster=self.cluster,
index=index1, collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
for index, bucket in self.get_all_indexes():
self.mc.add_index("{}.{}".format(bucket, index))
```
#### File: perfrunner/helpers/remote.py
```python
from fabric import state
from fabric.api import run, settings
from logger import logger
from perfrunner.remote.linux import RemoteLinux
from perfrunner.remote.windows import RemoteWindows
from perfrunner.settings import ClusterSpec
class RemoteHelper:
def __new__(cls, cluster_spec: ClusterSpec, verbose: bool = False):
if not cluster_spec.ssh_credentials:
return None
state.env.user, state.env.password = cluster_spec.ssh_credentials
state.output.running = verbose
state.output.stdout = verbose
os = cls.detect_os(cluster_spec)
if os == 'Cygwin':
return RemoteWindows(cluster_spec, os)
else:
return RemoteLinux(cluster_spec, os)
@staticmethod
def detect_os(cluster_spec: ClusterSpec):
logger.info('Detecting OS')
with settings(host_string=cluster_spec.servers[0]):
os = run('python -c "import platform; print platform.dist()[0]"')
if os:
return os
else:
return 'Cygwin'
```
#### File: perfrunner/helpers/rest.py
```python
import json
import time
from collections import namedtuple
from typing import Callable, Dict, Iterator, List
import requests
from decorator import decorator
from requests.exceptions import ConnectionError
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.settings import BucketSettings, ClusterSpec
MAX_RETRY = 20
RETRY_DELAY = 10
ANALYTICS_PORT = 8095
EVENTING_PORT = 8096
@decorator
def retry(method: Callable, *args, **kwargs):
r = namedtuple('request', ['url'])('')
for _ in range(MAX_RETRY):
try:
r = method(*args, **kwargs)
except ConnectionError:
time.sleep(RETRY_DELAY * 2)
continue
if r.status_code in range(200, 203):
return r
else:
logger.warn(r.text)
logger.warn('Retrying {}'.format(r.url))
time.sleep(RETRY_DELAY)
logger.interrupt('Request {} failed after {} attempts'.format(
r.url, MAX_RETRY
))
class RestHelper:
def __init__(self, cluster_spec: ClusterSpec):
self.rest_username, self.rest_password = cluster_spec.rest_credentials
self.auth = self.rest_username, self.rest_password
self.cluster_spec = cluster_spec
@retry
def get(self, **kwargs) -> requests.Response:
return requests.get(auth=self.auth, **kwargs)
def _post(self, **kwargs) -> requests.Response:
return requests.post(auth=self.auth, **kwargs)
@retry
def post(self, **kwargs) -> requests.Response:
return self._post(**kwargs)
def _put(self, **kwargs) -> requests.Response:
return requests.put(auth=self.auth, **kwargs)
@retry
def put(self, **kwargs) -> requests.Response:
return self._put(**kwargs)
def _delete(self, **kwargs) -> requests.Response:
return requests.delete(auth=self.auth, **kwargs)
def delete(self, **kwargs) -> requests.Response:
return self._delete(**kwargs)
def set_data_path(self, host: str, path: str):
logger.info('Configuring data path on {}'.format(host))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'path': path,
}
self.post(url=api, data=data)
def set_index_path(self, host: str, path: str):
logger.info('Configuring index path on {}'.format(host))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'index_path': path,
}
self.post(url=api, data=data)
def set_analytics_paths(self, host: str, paths: List[str]):
logger.info('Configuring analytics path on {}: {}'.format(host, paths))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'cbas_path': paths,
}
self.post(url=api, data=data)
def set_auth(self, host: str):
logger.info('Configuring cluster authentication: {}'.format(host))
api = 'http://{}:8091/settings/web'.format(host)
data = {
'username': self.rest_username, 'password': self.rest_password,
'port': 'SAME'
}
self.post(url=api, data=data)
def rename(self, host: str):
logger.info('Changing server name: {}'.format(host))
api = 'http://{}:8091/node/controller/rename'.format(host)
data = {'hostname': host}
self.post(url=api, data=data)
def set_mem_quota(self, host: str, mem_quota: str):
logger.info('Configuring data RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'memoryQuota': mem_quota}
self.post(url=api, data=data)
def set_index_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring index RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'indexMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_fts_index_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring FTS RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'ftsMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_analytics_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring Analytics RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'cbasMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_eventing_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring eventing RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'eventingMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_query_settings(self, host: str, override_settings: dict):
api = 'http://{}:8093/admin/settings'.format(host)
settings = self.get(url=api).json()
for override, value in override_settings.items():
if override not in settings:
logger.error('Cannot change query setting {} to {}, setting invalid'
.format(override, value))
continue
settings[override] = value
logger.info('Changing {} to {}'.format(override, value))
self.post(url=api, data=json.dumps(settings))
def get_query_settings(self, host: str):
api = 'http://{}:8093/admin/settings'.format(host)
return self.get(url=api).json()
def set_index_settings(self, host: str, settings: dict):
api = 'http://{}:9102/settings'.format(host)
curr_settings = self.get_index_settings(host)
for option, value in settings.items():
if option in curr_settings:
logger.info('Changing {} to {}'.format(option, value))
self.post(url=api, data=json.dumps({option: value}))
else:
logger.warn('Skipping unknown option: {}'.format(option))
def get_index_settings(self, host: str) -> dict:
api = 'http://{}:9102/settings?internal=ok'.format(host)
return self.get(url=api).json()
def get_gsi_stats(self, host: str) -> dict:
api = 'http://{}:9102/stats'.format(host)
return self.get(url=api).json()
def create_index(self, host: str, bucket: str, name: str, field: str,
storage: str = 'memdb'):
api = 'http://{}:9102/createIndex'.format(host)
data = {
'index': {
'bucket': bucket,
'using': storage,
'name': name,
'secExprs': ['`{}`'.format(field)],
'exprType': 'N1QL',
'isPrimary': False,
'where': '',
'deferred': False,
'partitionKey': '',
'partitionScheme': 'SINGLE',
},
'type': 'create',
'version': 1,
}
logger.info('Creating index {}'.format(pretty_dict(data)))
self.post(url=api, data=json.dumps(data))
def set_services(self, host: str, services: str):
logger.info('Configuring services on {}: {}'.format(host, services))
api = 'http://{}:8091/node/controller/setupServices'.format(host)
data = {'services': services}
self.post(url=api, data=data)
def add_node(self, host: str, new_host: str, services: str = None):
logger.info('Adding new node: {}'.format(new_host))
api = 'http://{}:8091/controller/addNode'.format(host)
data = {
'hostname': new_host,
'user': self.rest_username,
'password': <PASSWORD>,
'services': services,
}
self.post(url=api, data=data)
def rebalance(self, host: str, known_nodes: List[str],
ejected_nodes: List[str]):
logger.info('Starting rebalance')
api = 'http://{}:8091/controller/rebalance'.format(host)
known_nodes = ','.join(map(self.get_otp_node_name, known_nodes))
ejected_nodes = ','.join(map(self.get_otp_node_name, ejected_nodes))
data = {
'knownNodes': known_nodes,
'ejectedNodes': ejected_nodes
}
self.post(url=api, data=data)
def increase_bucket_limit(self, host: str, num_buckets: int):
logger.info('increasing bucket limit to {}'.format(num_buckets))
api = 'http://{}:8091/internalSettings'.format(host)
data = {
'maxBucketCount': num_buckets
}
self.post(url=api, data=data)
def get_counters(self, host: str) -> dict:
api = 'http://{}:8091/pools/default'.format(host)
return self.get(url=api).json()['counters']
def is_not_balanced(self, host: str) -> int:
counters = self.get_counters(host)
return counters.get('rebalance_start') - counters.get('rebalance_success')
def get_failover_counter(self, host: str) -> int:
counters = self.get_counters(host)
return counters.get('failover_node')
def get_tasks(self, host: str) -> dict:
api = 'http://{}:8091/pools/default/tasks'.format(host)
return self.get(url=api).json()
def get_task_status(self, host: str, task_type: str) -> [bool, float]:
for task in self.get_tasks(host):
if task['type'] == task_type:
is_running = task['status'] == 'running'
progress = task.get('progress')
return is_running, progress
return False, 0
def delete_bucket(self, host: str, name: str):
logger.info('Deleting new bucket: {}'.format(name))
api = 'http://{host}:8091/pools/default/buckets/{bucket}'.format(host=host, bucket=name)
self.delete(url=api)
def create_bucket(self,
host: str,
name: str,
password: str,
ram_quota: int,
replica_number: int,
replica_index: int,
eviction_policy: str,
bucket_type: str,
conflict_resolution_type: str = None,
compression_mode: str = None):
logger.info('Adding new bucket: {}'.format(name))
api = 'http://{}:8091/pools/default/buckets'.format(host)
data = {
'name': name,
'bucketType': bucket_type,
'ramQuotaMB': ram_quota,
'evictionPolicy': eviction_policy,
'flushEnabled': 1,
'replicaNumber': replica_number,
'authType': 'sasl',
'saslPassword': password,
}
if bucket_type == BucketSettings.BUCKET_TYPE:
data['replicaIndex'] = replica_index
if conflict_resolution_type:
data['conflictResolutionType'] = conflict_resolution_type
if compression_mode:
data['compressionMode'] = compression_mode
logger.info('Bucket configuration: {}'.format(pretty_dict(data)))
self.post(url=api, data=data)
def flush_bucket(self, host: str, bucket: str):
logger.info('Flushing bucket: {}'.format(bucket))
api = 'http://{}:8091/pools/default/buckets/{}/controller/doFlush'.format(host, bucket)
self.post(url=api)
def configure_auto_compaction(self, host, settings):
logger.info('Applying auto-compaction settings: {}'.format(settings))
api = 'http://{}:8091/controller/setAutoCompaction'.format(host)
data = {
'databaseFragmentationThreshold[percentage]': settings.db_percentage,
'viewFragmentationThreshold[percentage]': settings.view_percentage,
'parallelDBAndViewCompaction': str(settings.parallel).lower()
}
self.post(url=api, data=data)
def get_auto_compaction_settings(self, host: str) -> dict:
api = 'http://{}:8091/settings/autoCompaction'.format(host)
return self.get(url=api).json()
def get_bucket_stats(self, host: str, bucket: str) -> dict:
api = 'http://{}:8091/pools/default/buckets/{}/stats'.format(host,
bucket)
return self.get(url=api).json()
def get_xdcr_stats(self, host: str, bucket: str) -> dict:
api = 'http://{}:8091/pools/default/buckets/@xdcr-{}/stats'.format(host,
bucket)
return self.get(url=api).json()
def add_remote_cluster(self,
local_host: str,
remote_host: str,
name: str,
secure_type: str,
certificate: str):
logger.info('Adding a remote cluster: {}'.format(remote_host))
api = 'http://{}:8091/pools/default/remoteClusters'.format(local_host)
payload = {
'name': name,
'hostname': remote_host,
'username': self.rest_username,
'password': <PASSWORD>,
}
if secure_type:
payload['secureType'] = secure_type
if certificate:
payload['demandEncryption'] = 1
payload['certificate'] = certificate
self.post(url=api, data=payload)
def get_remote_clusters(self, host: str) -> List[Dict]:
logger.info('Getting remote clusters')
api = 'http://{}:8091/pools/default/remoteClusters'.format(host)
return self.get(url=api).json()
def create_replication(self, host: str, params: dict):
logger.info('Starting replication with parameters {}'.format(params))
api = 'http://{}:8091/controller/createReplication'.format(host)
self.post(url=api, data=params)
def trigger_bucket_compaction(self, host: str, bucket: str):
logger.info('Triggering bucket {} compaction'.format(bucket))
api = 'http://{}:8091/pools/default/buckets/{}/controller/compactBucket'\
.format(host, bucket)
self.post(url=api)
def trigger_index_compaction(self, host: str, bucket: str, ddoc: str):
logger.info('Triggering ddoc {} compaction, bucket {}'.format(
ddoc, bucket
))
api = 'http://{}:8091/pools/default/buckets/{}/ddocs/_design%2F{}/controller/compactView'\
.format(host, bucket, ddoc)
self.post(url=api)
def create_ddoc(self, host: str, bucket: str, ddoc_name: str, ddoc: dict):
logger.info('Creating new ddoc {}, bucket {}'.format(
ddoc_name, bucket
))
api = 'http://{}:8091/couchBase/{}/_design/{}'.format(
host, bucket, ddoc_name)
data = json.dumps(ddoc)
headers = {'Content-type': 'application/json'}
self.put(url=api, data=data, headers=headers)
def query_view(self, host: str, bucket: str, ddoc_name: str,
view_name: str, params: dict):
logger.info('Querying view: {}/_design/{}/_view/{}'.format(
bucket, ddoc_name, view_name
))
api = 'http://{}:8091/couchBase/{}/_design/{}/_view/{}'.format(
host, bucket, ddoc_name, view_name)
self.get(url=api, params=params)
def get_version(self, host: str) -> str:
logger.info('Getting Couchbase Server version')
api = 'http://{}:8091/pools/'.format(host)
r = self.get(url=api).json()
return r['implementationVersion'] \
.replace('-rel-enterprise', '') \
.replace('-enterprise', '') \
.replace('-community', '')
def supports_rbac(self, host: str) -> bool:
"""Return true if the cluster supports RBAC."""
rbac_url = 'http://{}:8091/settings/rbac/roles'.format(host)
r = requests.get(auth=self.auth, url=rbac_url)
return r.status_code == requests.codes.ok
def is_community(self, host: str) -> bool:
logger.info('Getting Couchbase Server edition')
api = 'http://{}:8091/pools/'.format(host)
r = self.get(url=api).json()
return 'community' in r['implementationVersion']
def get_memcached_port(self, host: str) -> int:
logger.info('Getting memcached port from {}'.format(host))
api = 'http://{}:8091/nodes/self'.format(host)
r = self.get(url=api).json()
return r['ports']['direct']
def get_otp_node_name(self, host: str) -> str:
logger.info('Getting OTP node name from {}'.format(host))
api = 'http://{}:8091/nodes/self'.format(host)
r = self.get(url=api).json()
return r['otpNode']
def set_internal_settings(self, host: str, data: dict):
logger.info('Updating internal settings: {}'.format(data))
api = 'http://{}:8091/internalSettings'.format(host)
self.post(url=api, data=data)
def set_xdcr_cluster_settings(self, host: str, data: dict):
logger.info('Updating xdcr cluster settings: {}'.format(data))
api = 'http://{}:8091/settings/replications'.format(host)
self.post(url=api, data=data)
def run_diag_eval(self, host: str, cmd: str):
api = 'http://{}:8091/diag/eval'.format(host)
self.post(url=api, data=cmd)
def enable_auto_failover(self, host: str, failover_min: int, failover_max: int):
logger.info('Enabling auto-failover with the minimum timeout')
api = 'http://{}:8091/settings/autoFailover'.format(host)
for timeout in failover_min, failover_max:
data = {'enabled': 'true',
'timeout': timeout,
'failoverOnDataDiskIssues[enabled]': 'true',
'failoverOnDataDiskIssues[timePeriod]': 10
}
r = self._post(url=api, data=data)
if r.status_code == 200:
break
def get_certificate(self, host: str) -> str:
logger.info('Getting remote certificate')
api = 'http://{}:8091/pools/default/certificate'.format(host)
return self.get(url=api).text
def fail_over(self, host: str, node: str):
logger.info('Failing over node: {}'.format(node))
api = 'http://{}:8091/controller/failOver'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def graceful_fail_over(self, host: str, node: str):
logger.info('Gracefully failing over node: {}'.format(node))
api = 'http://{}:8091/controller/startGracefulFailover'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def add_back(self, host: str, node: str):
logger.info('Adding node back: {}'.format(node))
api = 'http://{}:8091/controller/reAddNode'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def set_delta_recovery_type(self, host: str, node: str):
logger.info('Enabling delta recovery: {}'.format(node))
api = 'http://{}:8091/controller/setRecoveryType'.format(host)
data = {
'otpNode': self.get_otp_node_name(node),
'recoveryType': 'delta' # alt: full
}
self.post(url=api, data=data)
def node_statuses(self, host: str) -> dict:
api = 'http://{}:8091/nodeStatuses'.format(host)
data = self.get(url=api).json()
return {node: info['status'] for node, info in data.items()}
def node_statuses_v2(self, host: str) -> dict:
api = 'http://{}:8091/pools/default'.format(host)
data = self.get(url=api).json()
return {node['hostname']: node['status'] for node in data['nodes']}
def get_node_stats(self, host: str, bucket: str) -> Iterator:
api = 'http://{}:8091/pools/default/buckets/{}/nodes'.format(host,
bucket)
data = self.get(url=api).json()
for server in data['servers']:
api = 'http://{}:8091{}'.format(host, server['stats']['uri'])
data = self.get(url=api).json()
yield data['hostname'], data['op']['samples']
def get_vbmap(self, host: str, bucket: str) -> dict:
logger.info('Reading vbucket map: {}/{}'.format(host, bucket))
api = 'http://{}:8091/pools/default/buckets/{}'.format(host, bucket)
data = self.get(url=api).json()
return data['vBucketServerMap']['vBucketMap']
def get_server_list(self, host: str, bucket: str) -> List[str]:
api = 'http://{}:8091/pools/default/buckets/{}'.format(host, bucket)
data = self.get(url=api).json()
return [server.split(':')[0]
for server in data['vBucketServerMap']['serverList']]
def exec_n1ql_statement(self, host: str, statement: str) -> dict:
api = 'http://{}:8093/query/service'.format(host)
data = {
'statement': statement,
}
response = self.post(url=api, data=data)
return response.json()
def explain_n1ql_statement(self, host: str, statement: str):
statement = 'EXPLAIN {}'.format(statement)
return self.exec_n1ql_statement(host, statement)
def get_query_stats(self, host: str) -> dict:
logger.info('Getting query engine stats')
api = 'http://{}:8093/admin/stats'.format(host)
response = self.get(url=api)
return response.json()
def delete_fts_index(self, host: str, index: str):
logger.info('Deleting FTS index: {}'.format(index))
api = 'http://{}:8094/api/index/{}'.format(host, index)
self.delete(url=api)
def create_fts_index(self, host: str, index: str, definition: dict):
logger.info('Creating a new FTS index: {}'.format(index))
api = 'http://{}:8094/api/index/{}'.format(host, index)
headers = {'Content-Type': 'application/json'}
data = json.dumps(definition, ensure_ascii=False)
self.put(url=api, data=data, headers=headers)
def get_fts_doc_count(self, host: str, index: str) -> int:
api = 'http://{}:8094/api/index/{}/count'.format(host, index)
response = self.get(url=api).json()
return response['count']
def get_fts_stats(self, host: str) -> dict:
api = 'http://{}:8094/api/nsstats'.format(host)
response = self.get(url=api)
return response.json()
def get_elastic_stats(self, host: str) -> dict:
api = "http://{}:9200/_stats".format(host)
response = self.get(url=api)
return response.json()
def delete_elastic_index(self, host: str, index: str):
logger.info('Deleting Elasticsearch index: {}'.format(index))
api = 'http://{}:9200/{}'.format(host, index)
self.delete(url=api)
def create_elastic_index(self, host: str, index: str, definition: dict):
logger.info('Creating a new Elasticsearch index: {}'.format(index))
api = 'http://{}:9200/{}'.format(host, index)
headers = {'Content-Type': 'application/json'}
data = json.dumps(definition, ensure_ascii=False)
self.put(url=api, data=data, headers=headers)
def get_elastic_doc_count(self, host: str, index: str) -> int:
api = "http://{}:9200/{}/_count".format(host, index)
response = self.get(url=api).json()
return response['count']
def get_index_status(self, host: str) -> dict:
api = 'http://{}:9102/getIndexStatus'.format(host)
response = self.get(url=api)
return response.json()
def get_index_stats(self, hosts: List[str]) -> dict:
api = 'http://{}:9102/stats'
data = {}
for host in hosts:
host_data = self.get(url=api.format(host))
data.update(host_data.json())
return data
def get_index_num_connections(self, host: str) -> int:
api = 'http://{}:9102/stats'.format(host)
response = self.get(url=api).json()
return response['num_connections']
def get_index_storage_stats(self, host: str) -> str:
api = 'http://{}:9102/stats/storage'.format(host)
return self.get(url=api).text
def get_index_storage_stats_mm(self, host: str) -> str:
api = 'http://{}:9102/stats/storage/mm'.format(host)
return self.get(url=api).text
def get_audit_settings(self, host: str) -> dict:
logger.info('Getting current audit settings')
api = 'http://{}:8091/settings/audit'.format(host)
return self.get(url=api).json()
def enable_audit(self, host: str, disabled: List[str]):
logger.info('Enabling audit')
api = 'http://{}:8091/settings/audit'.format(host)
data = {
'auditdEnabled': 'true',
}
if disabled:
data['disabled'] = ','.join(disabled)
self.post(url=api, data=data)
def get_rbac_roles(self, host: str) -> List[dict]:
logger.info('Getting the existing RBAC roles')
api = 'http://{}:8091/settings/rbac/roles'.format(host)
return self.get(url=api).json()
def delete_rbac_user(self, host: str, bucket: str):
logger.info('Deleting an RBAC user: {}'.format(bucket))
for domain in 'local', 'builtin':
api = 'http://{}:8091/settings/rbac/users/{}/{}'.format(host,
domain,
bucket)
r = self._delete(url=api)
if r.status_code == 200:
break
def add_rbac_user(self, host: str, user: str, password: str,
roles: List[str]):
logger.info('Adding an RBAC user: {}, roles: {}'.format(user,
roles))
data = {
'password': password,
'roles': ','.join(roles),
}
for domain in 'local', 'builtin':
api = 'http://{}:8091/settings/rbac/users/{}/{}'.format(host,
domain,
user)
r = self._put(url=api, data=data)
if r.status_code == 200:
break
def analytics_node_active(self, host: str) -> bool:
logger.info('Checking if analytics node is active: {}'.format(host))
api = 'http://{}:{}/analytics/cluster'.format(host, ANALYTICS_PORT)
status = self.get(url=api).json()
return status["state"] == "ACTIVE"
def exec_analytics_statement(self, analytics_node: str,
statement: str) -> requests.Response:
api = 'http://{}:{}/analytics/service'.format(analytics_node,
ANALYTICS_PORT)
data = {
'statement': statement
}
return self.post(url=api, data=data)
def get_analytics_stats(self, analytics_node: str) -> dict:
api = 'http://{}:9110/analytics/node/stats'.format(analytics_node)
return self.get(url=api).json()
def set_analytics_logging_level(self, analytics_node: str, log_level: str):
logger.info('Setting log level \"{}\" for analytics'.format(log_level))
api = 'http://{}:{}/analytics/config/service'.format(analytics_node, ANALYTICS_PORT)
data = {
'logLevel': log_level
}
r = self.put(url=api, data=data)
if r.status_code not in (200, 202,):
logger.warning('Unexpected request status code {}'.
format(r.status_code))
def set_analytics_page_size(self, analytics_node: str, page_size: str):
logger.info('Setting buffer cache page size \"{}\" for analytics'.format(page_size))
api = 'http://{}:{}/analytics/config/service'.format(analytics_node, ANALYTICS_PORT)
data = {
'storageBuffercachePagesize': page_size
}
r = self.put(url=api, data=data)
if r.status_code not in (200, 202,):
logger.warning('Unexpected request status code {}'.
format(r.status_code))
def restart_analytics_cluster(self, analytics_node: str):
logger.info('Restarting analytics cluster')
api = 'http://{}:{}/analytics/cluster/restart'.format(analytics_node, ANALYTICS_PORT)
r = self.post(url=api)
if r.status_code not in (200, 202,):
logger.warning('Unexpected request status code {}'.
format(r.status_code))
def validate_analytics_logging_level(self, analytics_node: str, log_level: str):
logger.info('Checking that analytics log level is set to {}'.format(log_level))
api = 'http://{}:{}/analytics/config/service'.format(analytics_node, ANALYTICS_PORT)
response = self.get(url=api).json()
if "logLevel" in response:
return response["logLevel"] == log_level
return False
def deploy_function(self, node: str, func: dict, name: str):
logger.info('Deploying function on node {}: {}'.format(node, pretty_dict(func)))
api = 'http://{}:8096/api/v1/functions/{}'.format(node, name)
self.post(url=api, data=json.dumps(func))
def undeploy_function(self, node: str, func: dict, name: str):
logger.info('Un-deploying function on node {}: {}'.format(node, pretty_dict(func)))
api = 'http://{}:8096/api/v1/functions/{}/settings/'.format(node, name)
self.post(url=api, data=func)
def get_num_events_processed(self, event: str, node: str, name: str):
logger.info('get stats on node {} for {}'.format(node, name))
data = {}
all_stats = self.get_eventing_stats(node=node)
for stat in all_stats:
if name == stat["function_name"]:
data = stat["event_processing_stats"]
break
logger.info(data)
if event == "ALL":
return data
if event in data:
return data[event]
return 0
def get_apps_with_status(self, node: str, status: str):
logger.info('get apps with status {} on node {}'.format(status, node))
api = 'http://{}:{}//api/v1/status'.format(node, EVENTING_PORT)
data = self.get(url=api).json()
apps = []
for app in data["apps"]:
if app["composite_status"] == status:
apps.append(app["name"])
return apps
def get_eventing_stats(self, node: str, full_stats: bool = False) -> dict:
logger.info('get eventing stats on node {}'.format(node))
api = 'http://{}:{}/api/v1/stats'.format(node, EVENTING_PORT)
if full_stats:
api += "?type=full"
return self.get(url=api).json()
def get_active_nodes_by_role(self, master_node: str, role: str) -> List[str]:
active_nodes = self.node_statuses(master_node)
active_nodes_by_role = []
for node in self.cluster_spec.servers_by_role(role):
if node + ":8091" in active_nodes:
active_nodes_by_role.append(node)
return active_nodes_by_role
def upload_cluster_certificate(self, node: str):
logger.info("Uploading cluster certificate to {}".format(node))
api = 'http://{}:8091/controller/uploadClusterCA'.format(node)
data = open('./certificates/inbox/ca.pem', 'rb').read()
self.post(url=api, data=data)
def reload_cluster_certificate(self, node: str):
logger.info("Reloading certificate on {}".format(node))
api = 'http://{}:8091/node/controller/reloadCertificate'.format(node)
self.post(url=api)
def enable_certificate_auth(self, node: str):
logger.info("Enabling certificate-based client auth on {}".format(node))
api = 'http://{}:8091/settings/clientCertAuth'.format(node)
data = open('./certificates/inbox/config.json', 'rb').read()
self.post(url=api, data=data)
```
#### File: perfrunner/tests/fts.py
```python
import os
import shutil
from logger import logger
from perfrunner.helpers import local
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.helpers.misc import pretty_dict, read_json
from perfrunner.helpers.profiler import with_profiles
from perfrunner.helpers.worker import jts_run_task, jts_warmup_task
from perfrunner.tests import PerfTest
class JTSTest(PerfTest):
result = dict()
def __init__(self, cluster_spec, test_config, verbose):
super().__init__(cluster_spec, test_config, verbose)
self.access = self.test_config.jts_access_settings
self.showfast = self.test_config.showfast
def download_jts(self):
if self.worker_manager.is_remote:
self.remote.init_jts(repo=self.access.jts_repo,
branch=self.access.jts_repo_branch,
worker_home=self.worker_manager.WORKER_HOME,
jts_home=self.access.jts_home_dir)
else:
local.init_jts(repo=self.access.jts_repo,
branch=self.access.jts_repo_branch,
jts_home=self.access.jts_home_dir)
@with_stats
@with_profiles
def run_test(self):
self.run_phase('jts run phase', jts_run_task, self.access, self.target_iterator)
self._download_logs()
def warmup(self):
if int(self.access.warmup_query_workers) > 0:
self.run_phase('jts warmup phase', jts_warmup_task, self.access, self.target_iterator)
def _download_logs(self):
local_dir = self.access.jts_logs_dir
if self.worker_manager.is_remote:
if os.path.exists(local_dir):
shutil.rmtree(local_dir, ignore_errors=True)
os.makedirs(local_dir)
self.remote.get_jts_logs(self.worker_manager.WORKER_HOME,
self.access.jts_home_dir,
self.access.jts_logs_dir)
else:
local.get_jts_logs(self.access.jts_home_dir, local_dir)
class FTSTest(JTSTest):
def __init__(self, cluster_spec, test_config, verbose):
super().__init__(cluster_spec, test_config, verbose)
self.fts_master_node = self.fts_nodes[0]
def delete_index(self):
self.rest.delete_fts_index(self.fts_master_node,
self.access.couchbase_index_name)
def create_index(self):
definition = read_json(self.access.couchbase_index_configfile)
definition.update({
'name': self.access.couchbase_index_name,
'sourceName': self.test_config.buckets[0],
})
if self.access.couchbase_index_type:
definition["params"]["store"]["indexType"] = self.access.couchbase_index_type
logger.info('Index definition: {}'.format(pretty_dict(definition)))
self.rest.create_fts_index(self.fts_master_node,
self.access.couchbase_index_name, definition)
def wait_for_index(self):
self.monitor.monitor_fts_indexing_queue(self.fts_master_node,
self.access.couchbase_index_name,
int(self.access.test_total_docs))
def wait_for_index_persistence(self):
self.monitor.monitor_fts_index_persistence(self.fts_nodes,
self.access.couchbase_index_name)
def cleanup_and_restore(self):
self.delete_index()
self.restore()
self.wait_for_persistence()
class FTSThroughputTest(FTSTest):
COLLECTORS = {'jts_stats': True, 'fts_stats': True}
def report_kpi(self):
self.reporter.post(*self.metrics.jts_throughput())
def run(self):
self.cleanup_and_restore()
self.create_index()
self.download_jts()
self.wait_for_index()
self.wait_for_index_persistence()
self.warmup()
self.run_test()
self.report_kpi()
class FTSLatencyTest(FTSTest):
COLLECTORS = {'jts_stats': True, 'fts_stats': True}
def report_kpi(self):
self.reporter.post(*self.metrics.jts_latency(percentile=80))
self.reporter.post(*self.metrics.jts_latency(percentile=95))
def run(self):
self.cleanup_and_restore()
self.create_index()
self.download_jts()
self.wait_for_index()
self.wait_for_index_persistence()
self.warmup()
self.run_test()
self.report_kpi()
class FTSIndexTest(FTSTest):
COLLECTORS = {'fts_stats': True}
def report_kpi(self, time_elapsed: int, size: int):
self.reporter.post(
*self.metrics.fts_index(time_elapsed)
)
self.reporter.post(
*self.metrics.fts_index_size(size)
)
@with_stats
@timeit
def build_index(self):
self.create_index()
self.wait_for_index()
def calculate_index_size(self) -> int:
metric = '{}:{}:{}'.format(self.test_config.buckets[0],
self.access.couchbase_index_name,
'num_bytes_used_disk')
size = 0
for host in self.fts_nodes:
stats = self.rest.get_fts_stats(host)
size += stats[metric]
return size
def run(self):
self.cleanup_and_restore()
time_elapsed = self.build_index()
self.wait_for_index_persistence()
size = self.calculate_index_size()
self.report_kpi(time_elapsed, size)
```
#### File: perfrunner/tests/kv.py
```python
from logger import logger
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.helpers.worker import (
pillowfight_data_load_task,
pillowfight_task,
)
from perfrunner.tests import PerfTest
from perfrunner.workloads.pathoGen import PathoGen
from perfrunner.workloads.tcmalloc import WorkloadGen
class KVTest(PerfTest):
@with_stats
def access(self, *args):
super().access(*args)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyTest(KVTest):
"""Enable reporting of GET latency."""
COLLECTORS = {'latency': True}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyTest(ReadLatencyTest):
"""Enable reporting of GET and SET latency."""
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class DGMTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
class DGMCompactionTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.reset_kv_stats()
self.compact_bucket(wait=False)
self.access()
self.report_kpi()
class DGMCompactedTest(DGMTest):
def run(self):
self.load()
self.wait_for_persistence()
self.compact_bucket()
self.hot_load()
self.reset_kv_stats()
self.access()
self.report_kpi()
class ReadLatencyDGMTest(KVTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
@with_stats
def custom_load(self, *args):
super().load(*args)
def load(self, *args):
self.COLLECTORS["latency"] = False
self.custom_load()
self.COLLECTORS["latency"] = True
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class MixedLatencyDGMTest(ReadLatencyDGMTest):
def _report_kpi(self):
for operation in ('get', 'set'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation)
)
class ReadLatencyDGMCompactionTest(DGMCompactionTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
self.reporter.post(
*self.metrics.kv_latency(operation='get')
)
class ReadLatencyDGMCompactedTest(DGMCompactedTest):
COLLECTORS = {'disk': True, 'latency': True, 'net': False}
def _report_kpi(self):
for percentile in 99.9, 99.99:
self.reporter.post(
*self.metrics.kv_latency(operation='get', percentile=percentile)
)
class DurabilityTest(KVTest):
"""Enable reporting of persistTo=1 and replicateTo=1 latency."""
COLLECTORS = {'durability': True}
def _report_kpi(self):
for operation in ('replicate_to', 'persist_to'):
self.reporter.post(
*self.metrics.kv_latency(operation=operation,
collector='durability')
)
class SubDocTest(MixedLatencyTest):
"""Enable reporting of SubDoc latency."""
COLLECTORS = {'latency': True}
class XATTRTest(MixedLatencyTest):
"""Enable reporting of XATTR latency."""
COLLECTORS = {'latency': True}
def run(self):
self.load()
self.xattr_load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class DrainTest(DGMCompactionTest):
"""Enable reporting of average disk write queue size."""
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_disk_write_queue()
)
class InitialLoadTest(DrainTest):
@with_stats
def load(self, *args, **kwargs):
super().load(*args, **kwargs)
def run(self):
self.load()
self.report_kpi()
class IngestionTest(KVTest):
COLLECTORS = {'disk': True, 'net': False}
@with_stats
def access(self, *args, **kwargs):
super(KVTest, self).access(*args, **kwargs)
self.wait_for_persistence()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_total_queue_age()
)
class WarmupTest(PerfTest):
"""Measure the time it takes to perform cluster warm up."""
COLLECTORS = {'net': False}
@with_stats
def warmup(self):
self.remote.stop_server()
self.remote.drop_caches()
return self._warmup()
@timeit
def _warmup(self):
self.remote.start_server()
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_warmup(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.wait_for_persistence()
time_elapsed = self.warmup()
self.report_kpi(time_elapsed)
class FragmentationTest(PerfTest):
"""Implement the append-only workload.
Scenario:
1. Single node.
2. Load X items, 700-1400 bytes, average 1KB (11-22 fields).
3. Append data
3.1. Mark first 80% of items as working set.
3.2. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.3. Mark first 40% of items as working set.
3.4. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
3.5. Mark first 20% of items as working set.
3.6. Randomly update 75% of items in working set by adding 1 field at a time (62 bytes).
4. Repeat step #3 5 times.
See workloads/tcmalloc.py for details.
Scenario described above allows to spot issues with memory/allocator
fragmentation.
"""
COLLECTORS = {'net': False}
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0],
password).run()
def calc_fragmentation_ratio(self) -> float:
ratios = []
for target in self.target_iterator:
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(target.node, port, target.bucket,
stats='memory')
ratio = int(stats[b'mem_used']) / int(stats[b'total_heap_bytes'])
ratios.append(ratio)
ratio = 100 * (1 - sum(ratios) / len(ratios))
ratio = round(ratio, 1)
logger.info('Fragmentation: {}'.format(ratio))
return ratio
def _report_kpi(self):
ratio = self.calc_fragmentation_ratio()
self.reporter.post(
*self.metrics.fragmentation_ratio(ratio)
)
def run(self):
self.load_and_append()
self.report_kpi()
class FragmentationLargeTest(FragmentationTest):
@with_stats
def load_and_append(self):
password = self.test_config.bucket.password
WorkloadGen(self.test_config.load_settings.items,
self.master_node, self.test_config.buckets[0], password,
small=False).run()
class PathoGenTest(FragmentationTest):
@with_stats
def access(self, *args):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=False,
host=target.node, port=8091,
bucket=target.bucket, password=<PASSWORD>)
pg.run()
def _report_kpi(self):
self.reporter.post(
*self.metrics.avg_memcached_rss()
)
self.reporter.post(
*self.metrics.max_memcached_rss()
)
def run(self):
self.access()
self.report_kpi()
class PathoGenFrozenTest(PathoGenTest):
@with_stats
def access(self):
for target in self.target_iterator:
pg = PathoGen(num_items=self.test_config.load_settings.items,
num_workers=self.test_config.load_settings.workers,
num_iterations=self.test_config.load_settings.iterations,
frozen_mode=True,
host=target.node, port=8091,
bucket=target.bucket, password=<PASSWORD>)
pg.run()
class ThroughputTest(KVTest):
def _measure_curr_ops(self) -> int:
ops = 0
for bucket in self.test_config.buckets:
for server in self.cluster_spec.servers:
port = self.rest.get_memcached_port(server)
stats = self.memcached.get_stats(server, port, bucket)
for stat in b'cmd_get', b'cmd_set':
ops += int(stats[stat])
return ops
def _report_kpi(self):
total_ops = self._measure_curr_ops()
self.reporter.post(
*self.metrics.kv_throughput(total_ops)
)
class EvictionTest(KVTest):
COLLECTORS = {'net': False}
def reset_kv_stats(self):
pass
def _measure_ejected_items(self) -> int:
ejected_items = 0
for bucket in self.test_config.buckets:
for hostname, _ in self.rest.get_node_stats(self.master_node,
bucket):
host = hostname.split(':')[0]
port = self.rest.get_memcached_port(host)
stats = self.memcached.get_stats(host, port, bucket)
ejected_items += int(stats[b'vb_active_auto_delete_count'])
ejected_items += int(stats[b'vb_pending_auto_delete_count'])
ejected_items += int(stats[b'vb_replica_auto_delete_count'])
return ejected_items
def _report_kpi(self):
ejected_items = self._measure_ejected_items()
self.reporter.post(
*self.metrics.kv_throughput(ejected_items)
)
class PillowFightTest(PerfTest):
"""Use cbc-pillowfight from libcouchbase to drive cluster."""
ALL_BUCKETS = True
def load(self, *args):
PerfTest.load(self, task=pillowfight_data_load_task)
@with_stats
def access(self, *args):
self.download_certificate()
PerfTest.access(self, task=pillowfight_task)
def _report_kpi(self, *args):
self.reporter.post(
*self.metrics.max_ops()
)
def run(self):
self.load()
self.wait_for_persistence()
self.access()
self.report_kpi()
class CompressionTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
@with_stats
@timeit
def wait_for_compression(self):
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
self.monitor.monitor_compression(self.memcached, master, bucket)
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.compression_throughput(time_elapsed)
)
def run(self):
self.load()
time_elapsed = self.wait_for_compression()
self.report_kpi(time_elapsed)
class CompactionTest(KVTest):
COLLECTORS = {'net': False}
@with_stats
@timeit
def compact(self):
self.compact_bucket()
def _report_kpi(self, time_elapsed):
self.reporter.post(
*self.metrics.elapsed_time(time_elapsed)
)
def run(self):
self.load()
self.wait_for_persistence()
self.hot_load()
self.access_bg()
time_elapsed = self.compact()
self.report_kpi(time_elapsed)
class MemoryOverheadTest(PillowFightTest):
COLLECTORS = {'iostat': False, 'net': False}
PF_KEY_SIZE = 20
def _report_kpi(self):
self.reporter.post(
*self.metrics.memory_overhead(key_size=self.PF_KEY_SIZE)
)
@with_stats
def access(self, *args):
self.sleep()
class CpuUtilizationTest(KVTest):
def _report_kpi(self, *args, **kwargs):
self.reporter.post(
*self.metrics.cpu_utilization()
)
class KVImport(PerfTest):
@with_stats
def load(self, *args):
self.restore_local()
self.wait_for_persistence()
def run(self):
self.load()
```
#### File: perfrunner/utils/install.py
```python
from argparse import ArgumentParser
from collections import namedtuple
from typing import Iterator
import requests
import validators
from requests.exceptions import ConnectionError
from logger import logger
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.settings import ClusterSpec
LOCATIONS = (
'http://172.23.120.24/builds/latestbuilds/couchbase-server/mad-hatter/{build}/',
'http://172.23.120.24/builds/latestbuilds/couchbase-server/alice/{build}/',
'http://172.23.120.24/builds/latestbuilds/couchbase-server/vulcan/{build}/',
'http://172.23.120.24/builds/latestbuilds/couchbase-server/spock/{build}/',
'http://172.23.120.24/builds/latestbuilds/couchbase-server/watson/{build}/',
'http://172.23.120.24/builds/releases/{release}/',
'http://172.23.120.24/builds/releases/{release}/ce/',
)
PKG_PATTERNS = {
'rpm': (
'couchbase-server-{edition}-{release}-{build}-centos{os}.x86_64.rpm',
'couchbase-server-{edition}-{release}-centos{os}.x86_64.rpm',
'couchbase-server-{edition}-{release}-centos6.x86_64.rpm',
),
'deb': (
'couchbase-server-{edition}_{release}-{build}-ubuntu{os}_amd64.deb',
'couchbase-server-{edition}_{release}-ubuntu{os}_amd64.deb',
),
'exe': (
'couchbase-server-{edition}_{release}-{build}-windows_amd64.msi',
'couchbase-server-{edition}_{release}-{build}-windows_amd64.exe',
'couchbase-server-{edition}_{release}-windows_amd64.exe',
),
}
Build = namedtuple('Build', ['filename', 'url'])
class CouchbaseInstaller:
def __init__(self, cluster_spec, options):
self.remote = RemoteHelper(cluster_spec, options.verbose)
self.options = options
@property
def url(self) -> str:
if validators.url(self.options.version):
return self.options.version
else:
return self.find_package(edition=self.options.edition)
@property
def release(self) -> str:
return self.options.version.split('-')[0]
@property
def build(self) -> str:
split = self.options.version.split('-')
if len(split) > 1:
return split[1]
def find_package(self, edition: str) -> [str, str]:
for url in self.url_iterator(edition):
if self.is_exist(url):
return url
logger.interrupt('Target build not found')
def url_iterator(self, edition: str) -> Iterator[str]:
os_release = None
if self.remote.package == 'rpm':
os_release = self.remote.detect_centos_release()
elif self.remote.package == 'deb':
os_release = self.remote.detect_ubuntu_release()
for pkg_pattern in PKG_PATTERNS[self.remote.package]:
for loc_pattern in LOCATIONS:
url = loc_pattern + pkg_pattern
yield url.format(release=self.release, build=self.build,
edition=edition, os=os_release)
@staticmethod
def is_exist(url):
try:
status_code = requests.head(url).status_code
except ConnectionError:
return False
if status_code == 200:
return True
return False
def download(self):
"""Download and save a copy of the specified package."""
if self.remote.package == 'rpm':
logger.info('Saving a local copy of {}'.format(self.url))
with open('couchbase.rpm', 'wb') as fh:
resp = requests.get(self.url)
fh.write(resp.content)
else:
logger.interrupt('Unsupported package format')
def kill_processes(self):
self.remote.kill_processes()
def uninstall_package(self):
self.remote.uninstall_couchbase()
def clean_data(self):
self.remote.clean_data()
def install_package(self):
logger.info('Using this URL: {}'.format(self.url))
self.remote.upload_iss_files(self.release)
self.remote.install_couchbase(self.url)
def install(self):
self.kill_processes()
self.uninstall_package()
self.clean_data()
self.install_package()
def get_args():
parser = ArgumentParser()
parser.add_argument('-v', '--version', '--url',
required=True,
help='the build version or the HTTP URL to a package')
parser.add_argument('-c', '--cluster',
required=True,
help='the path to a cluster specification file')
parser.add_argument('-e', '--edition',
choices=['enterprise', 'community'],
default='enterprise',
help='the cluster edition')
parser.add_argument('--verbose',
action='store_true',
help='enable verbose logging')
parser.add_argument('--local-copy',
action='store_true',
help='save a local copy of a package')
return parser.parse_args()
def main():
args = get_args()
cluster_spec = ClusterSpec()
cluster_spec.parse(fname=args.cluster)
installer = CouchbaseInstaller(cluster_spec, args)
installer.install()
if args.local_copy:
installer.download()
if __name__ == '__main__':
main()
```
#### File: perfrunner/workloads/jts.py
```python
from perfrunner.helpers.local import run_custom_cmd
from perfrunner.settings import PhaseSettings, TargetSettings
CMD = " -test_duration {test_duration}" \
" -test_total_docs {test_total_docs}" \
" -test_query_workers {test_query_workers}" \
" -test_kv_workers {test_kv_workers}" \
" -test_kv_throughput_goal {test_kv_throughput_goal}" \
" -test_data_file {test_data_file}" \
" -test_driver {test_driver}" \
" -test_stats_limit {test_stats_limit}" \
" -test_stats_aggregation_step {test_stats_aggregation_step}" \
" -test_debug {test_debug}" \
" -test_query_type {test_query_type} " \
" -test_query_limit {test_query_limit}" \
" -test_query_field {test_query_field}" \
" -test_mutation_field {test_mutation_field}" \
" -test_worker_type {test_worker_type}" \
" -couchbase_index_name {couchbase_index_name}" \
" -couchbase_cluster_ip {couchbase_cluster_ip}" \
" -couchbase_bucket {couchbase_bucket}" \
" -couchbase_user {couchbase_user}" \
" -couchbase_password {<PASSWORD>}"
def jts_run(workload_settings: PhaseSettings, target: TargetSettings,
timer: int, worker_id: int):
settings = workload_settings
params = CMD.format(test_duration=settings.time,
test_total_docs=settings.test_total_docs,
test_query_workers=settings.test_query_workers,
test_kv_workers=settings.test_kv_workers,
test_kv_throughput_goal=settings.test_kv_throughput_goal,
test_data_file=settings.test_data_file,
test_driver=settings.test_driver,
test_stats_limit=settings.test_stats_limit,
test_stats_aggregation_step=settings.test_stats_aggregation_step,
test_debug=settings.test_debug,
test_query_type=settings.test_query_type,
test_query_limit=settings.test_query_limit,
test_query_field=settings.test_query_field,
test_mutation_field=settings.test_mutation_field,
test_worker_type=settings.test_worker_type,
couchbase_index_name=settings.couchbase_index_name,
couchbase_cluster_ip=target.node,
couchbase_bucket=target.bucket,
couchbase_user=target.bucket,
couchbase_password=target.password)
run_custom_cmd(settings.jts_home_dir, settings.jts_run_cmd, params)
def jts_warmup(workload_settings: PhaseSettings, target: TargetSettings,
timer: int, worker_id: int):
settings = workload_settings
params = CMD.format(test_duration=settings.warmup_time,
test_total_docs=settings.test_total_docs,
test_query_workers=settings.warmup_query_workers,
test_kv_workers="0",
test_kv_throughput_goal="0",
test_data_file=settings.test_data_file,
test_driver=settings.test_driver,
test_stats_limit=settings.test_stats_limit,
test_stats_aggregation_step=settings.test_stats_aggregation_step,
test_debug=settings.test_debug,
test_query_type=settings.test_query_type,
test_query_limit=settings.test_query_limit,
test_query_field=settings.test_query_field,
test_mutation_field=settings.test_mutation_field,
test_worker_type="warmup",
couchbase_index_name=settings.couchbase_index_name,
couchbase_cluster_ip=target.node,
couchbase_bucket=target.bucket,
couchbase_user=target.bucket,
couchbase_password=target.password)
run_custom_cmd(settings.jts_home_dir, settings.jts_run_cmd, params)
``` |
{
"source": "JimXiongGM/BLINK",
"score": 3
} |
#### File: blink/candidate_ranking/bert_reranking.py
```python
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from tqdm import tqdm
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
class BertForReranking(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForReranking, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
entity_mask=None,
):
num_choices = input_ids.shape[1]
# from batch_size x cands x tokens -> (batch_size x cands) x tokens
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1))
if position_ids is not None
else None
)
outputs = self.bert(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
entity_mask = (1.0 - entity_mask) * -1000.0
reshaped_logits = reshaped_logits + entity_mask
outputs = (reshaped_logits,)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs
class BertReranker:
def __init__(self, parameters):
if "path_to_model" not in parameters:
parameters["path_to_model"] = parameters["bert_model"]
self.parameters = parameters
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not parameters["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# Load the fine-tuned model and the tokenizer used by it
self.model = BertReranker.get_model(parameters)
self.model.to(self.device)
self.tokenizer = BertReranker.get_tokenizer(parameters)
print("The reranking model is loaded")
def rerank(self, mentions, sentences):
model = self.model
tokenizer = self.tokenizer
p = self.parameters
device = self.device
data, tensor_data = BertReranker._process_mentions_for_model(
p["context_key"],
mentions,
tokenizer,
p["max_seq_length"],
p["top_k"],
p["silent"],
sentences=sentences,
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=p["evaluation_batch_size"]
)
softmax = torch.nn.Softmax(dim=1)
for input_ids, input_mask, segment_ids, mention_ids, entity_mask in tqdm(
dataloader, desc="Inferring"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
mention_ids = mention_ids.numpy()
entity_mask = entity_mask.to(device)
with torch.no_grad():
logits = self.model(
input_ids, segment_ids, input_mask, entity_mask=entity_mask
)[0]
probs = softmax(logits)
logits = logits.detach().cpu().numpy()
probs = probs.detach().cpu().numpy()
predictions = np.argmax(logits, axis=1)
for idx, mention_idx in enumerate(mention_ids):
pred = predictions[idx].item()
mentions[mention_idx]["predicted_candidate_idx"] = pred
mentions[mention_idx]["prob_assigned_to_candidate"] = probs[idx][
pred
].item()
return mentions
def get_scheduler_and_optimizer(self, parameters, train_tensor_data, logger):
model = self.model
num_train_optimization_steps = (
int(
len(train_tensor_data)
/ parameters["train_batch_size"]
/ parameters["gradient_accumulation_steps"]
)
* parameters["num_train_epochs"]
)
num_warmup_steps = int(
num_train_optimization_steps * parameters["warmup_proportion"]
)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=parameters["learning_rate"],
correct_bias=False,
)
scheduler = WarmupLinearSchedule(
optimizer,
warmup_steps=num_warmup_steps,
t_total=num_train_optimization_steps,
)
logger.info(" Num optimization steps = %d", num_train_optimization_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return optimizer, scheduler
@staticmethod
def get_model(parameters):
model = BertForReranking.from_pretrained(
parameters["path_to_model"],
num_labels=parameters["top_k"],
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), "local"),
)
if parameters["dataparallel_bert"]:
model.bert = torch.nn.DataParallel(model.bert)
print("Data parallel Bert")
return model
@staticmethod
def get_tokenizer(parameters):
tokenizer = BertTokenizer.from_pretrained(
parameters["path_to_model"], do_lower_case=parameters["lowercase_flag"]
)
return tokenizer
@staticmethod
def _get_candidate_representation(
context_tokens, candidate_desc, tokenizer, max_seq_length, max_sub_seq_length
):
"""Tokenizes and truncates description; combines it with the tokenized context and generates one input sample for bert"""
candidate_desc_tokens = tokenizer.tokenize(candidate_desc)
candidate_desc_tokens = candidate_desc_tokens[:max_sub_seq_length]
tokens = (
["[CLS]"] + context_tokens + ["[SEP]"] + candidate_desc_tokens + ["[SEP]"]
)
segment_ids = [0] * (len(context_tokens) + 2) + [1] * (
len(candidate_desc_tokens) + 1
)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return {
"tokens": tokens,
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
}
@staticmethod
def _get_mention_context_end2end(mention, sentences):
"""Given a mention and a list of sentences that follow the blink conventions, it returns a left and right context for the mention"""
sent_idx = mention["sent_idx"]
prev_sent = sentences[sent_idx - 1] if sent_idx > 0 else ""
next_sent = sentences[sent_idx + 1] if sent_idx + 1 < len(sentences) else ""
prev_sent = sentences[sent_idx - 1] if False else ""
next_sent = sentences[sent_idx + 1] if False else ""
sent = sentences[sent_idx]
curr_sent_prev = sent[: mention["start_pos"]].strip()
curr_sent_next = sent[mention["end_pos"] :].strip()
left_context = "{} {}".format(prev_sent, curr_sent_prev).strip()
right_context = "{} {}".format(curr_sent_next, next_sent).strip()
return (left_context, right_context)
@staticmethod
def _select_field(samples, field):
"""Helper function that returns a list of lists, each of which contains the information for all candidates for each sample"""
return [
[cand[field] for cand in sample["candidate_features"]] for sample in samples
]
@staticmethod
def _get_context_token_representation(
context_key,
sample,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
mention_text_key="text",
tagged=True,
):
"""Tags the mention, trims the context and concatenates everything to form the context representation"""
mention_tokens = (
[start_token] + tokenizer.tokenize(sample[mention_text_key]) + [end_token]
)
max_sub_seq_length = (max_sub_seq_length - len(mention_tokens)) // 2
context_left, context_right = sample[context_key]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
if len(context_left) > max_sub_seq_length:
context_left = context_left[-max_sub_seq_length:]
if len(context_right) > max_sub_seq_length:
context_right = context_right[:max_sub_seq_length]
context_tokens = context_left + mention_tokens + context_right
return context_tokens
@staticmethod
def _process_mentions_for_model(
context_key,
mentions,
tokenizer,
max_seq_length,
top_k,
silent,
start_token="[unused0]",
end_token="[unused1]",
debug=False,
tagged=True,
sentences=None,
candidates_key="candidates",
gold_key="gold_pos",
logger=None,
):
processed_mentions = []
if debug:
mentions = mentions[:200]
max_sub_seq_length = (max_seq_length - 3) // 2
if silent:
iter_ = mentions
else:
iter_ = tqdm(mentions)
for idx, mention in enumerate(iter_):
# if sentences is not none that means that we are processing end2end data for inference
if sentences is not None:
mention[context_key] = BertReranker._get_mention_context_end2end(
mention, sentences
)
context_tokens = BertReranker._get_context_token_representation(
context_key,
mention,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
)
candidates = mention[candidates_key]
candidate_features = []
for candidate in candidates[:top_k]:
candidate_desc = " ".join(candidate["sentences"])
candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
candidate_features.append(candidate_obj)
entity_mask = [1] * len(candidate_features) + [0] * (
top_k - len(candidate_features)
)
if len(candidates) < top_k:
candidate_desc = ""
padding_candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
for _ in range(top_k - len(candidates)):
candidate_features.append(padding_candidate_obj)
assert len(candidate_features) == top_k
assert len(entity_mask) == top_k
if sentences is not None:
processed_mentions.append(
{
"candidate_features": candidate_features,
"mention_idx": idx,
"entity_mask": entity_mask,
}
)
else:
label = mention[gold_key] - 1
processed_mentions.append(
{
"candidate_features": candidate_features,
"label": label,
"entity_mask": entity_mask,
}
)
all_input_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "input_ids"),
dtype=torch.long,
)
all_input_mask = torch.tensor(
BertReranker._select_field(processed_mentions, "input_mask"),
dtype=torch.long,
)
all_segment_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "segment_ids"),
dtype=torch.long,
)
all_entity_masks = torch.tensor(
[s["entity_mask"] for s in processed_mentions], dtype=torch.float
)
data = {
"all_input_ids": all_input_ids,
"all_input_mask": all_input_mask,
"all_segment_ids": all_segment_ids,
"all_entity_masks": all_entity_masks,
}
if sentences is not None:
all_mention_indices = torch.tensor(
[s["mention_idx"] for s in processed_mentions], dtype=torch.long
)
data["all_mention_indices"] = all_mention_indices
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_mention_indices,
all_entity_masks,
)
else:
all_label = torch.tensor(
[s["label"] for s in processed_mentions], dtype=torch.long
)
data["all_label"] = all_label
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label,
all_entity_masks,
)
if logger != None:
logger.info("all_input_ids shape: {}".format(all_input_ids.shape))
logger.info("all_input_mask shape: {}".format(all_input_mask.shape))
logger.info("all_segment_ids shape: {}".format(all_segment_ids.shape))
logger.info("all_entity_masks shape: {}".format(all_entity_masks.shape))
if sentences is not None:
logger.info(
"all_mention_indices shape: {}".format(all_mention_indices.shape)
)
else:
logger.info("all_label shape: {}".format(all_label.shape))
return data, tensor_data
```
#### File: blink/candidate_retrieval/data_ingestion.py
```python
import argparse
import pysolr
import pickle
import emoji
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--processed_data_file_path",
type=str,
help="The full path to the data file",
required=True,
)
parser.add_argument(
"--collection_name",
type=str,
help="The solr collection name, in which the ingestion should be performed",
required=True,
)
parser.add_argument(
"--add_sentence_data", dest="add_sentence_data", action="store_true"
)
parser.set_defaults(add_sentence_data=False)
parser.add_argument(
"--remove_disambiguation_pages",
dest="remove_disambiguation_pages",
action="store_true",
)
parser.set_defaults(remove_disambiguation_pages=False)
parser.add_argument("--min_tokens", type=int, default=0)
args = parser.parse_args()
processed_data_path = args.processed_data_file_path
collection_name = args.collection_name
# processed_data_path = "/scratch/martinjosifoski/data/en-wiki-filtered-wikidata"
def remove_all_docs():
solr.delete(q="*:*")
def load_data():
return pickle.load(open(processed_data_path, "rb"))
def get_data_for_key(data, title):
obj = {}
obj["id"] = data[title]["wikipedia_id"]
obj["title"] = title
if ("wikidata_info" in data[title]) and (
data[title]["wikidata_info"]["wikidata_id"] is not None
):
obj["wikidata_id"] = data[title]["wikidata_info"]["wikidata_id"]
else:
obj["wikidata_id"] = data[title]["wikidata_id_from_index"]
description = data[title]["intro_concatenated"]
obj["desc"] = description
if "wikidata_info" in data[title]:
if "description" in data[title]["wikidata_info"]:
wikidata_description = data[title]["wikidata_info"]["description"]
else:
wikidata_description = ""
if ("aliases" in data[title]["wikidata_info"]) and (
data[title]["wikidata_info"]["aliases"]
) is not None:
aliases = " ".join(
[
'"{}"'.format(alias)
for alias in data[title]["wikidata_info"]["aliases"]
if alias not in emoji.UNICODE_EMOJI
]
)
else:
aliases = ""
else:
aliases = ""
wikidata_description = ""
obj["aliases"] = aliases
obj["wikidata_desc"] = wikidata_description
obj["num_tokens"] = data[title]["num_tokens"]
obj["num_incoming_links"] = data[title].get("num_incoming_links", 0)
if args.add_sentence_data:
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
obj[key] = data[title].get(key, "")
return obj
print("Loading data")
title2data = load_data()
for key in title2data:
title2data[key]["intro_concatenated"] = " ".join(
[line for line in title2data[key]["intro_lines"] if line != ""]
)
# Filter documents with less then `args.min_tokens` tokens
if args.min_tokens != 0:
print("Removing documents with less then {} tokens".format(args.min_tokens))
print("Number of docs BEFORE removal:", len(title2data))
title2data = {
key: value
for key, value in title2data.items()
if value["num_tokens"] >= args.min_tokens
}
print("Number of docs AFTER removal:", len(title2data))
print("")
# Remove disambiguation pages
if args.remove_disambiguation_pages:
print("Remove disambiguation pages")
print("Number of docs BEFORE removal:", len(title2data))
titles_to_delete = []
for title in title2data:
parsed_obj = title2data[title]
if ("disambiguation" in title) or ("Disambiguation" in title):
titles_to_delete.append(title)
else:
if (parsed_obj.get("wikidata_info", None) is not None) and (
parsed_obj["wikidata_info"].get("description", None) is not None
):
wikidata_info = parsed_obj["wikidata_info"]
if ("disambiguation page" in wikidata_info["description"]) or (
"Disambiguation page" in wikidata_info["description"]
):
titles_to_delete.append(title)
for title in titles_to_delete:
del title2data[title]
print("Number of docs AFTER removal:", len(title2data))
print("Number of removed docs:", len(titles_to_delete))
print("")
ingestion_data = [get_data_for_key(title2data, key) for key in title2data]
print("Starting ingestion")
wall_start = time.time()
l = 0
r = step = 10000
solr = pysolr.Solr(
"http://localhost:8983/solr/{}".format(collection_name),
always_commit=True,
timeout=100,
)
c = 0
for r in range(r, len(ingestion_data), step):
c += 1
if (c % 10) == 0:
print("Processed", c, "batches")
temp_data = ingestion_data[l:r]
solr.add(temp_data, commit=True)
l = r
solr.add(ingestion_data[l : len(ingestion_data)], commit=True)
solr.commit()
print("The processing took:", (time.time() - wall_start) / 60, " minutes")
```
#### File: blink/common/params.py
```python
import argparse
import importlib
import os
import sys
import datetime
ENT_START_TAG = "[unused0]"
ENT_END_TAG = "[unused1]"
ENT_TITLE_TAG = "[unused2]"
class BlinkParser(argparse.ArgumentParser):
"""
Provide an opt-producer and CLI arguement parser.
More options can be added specific by paassing this object and calling
''add_arg()'' or add_argument'' on it.
:param add_blink_args:
(default True) initializes the default arguments for BLINK package.
:param add_model_args:
(default False) initializes the default arguments for loading models,
including initializing arguments from the model.
"""
def __init__(
self,
add_blink_args=True,
add_model_args=False,
description="BLINK parser",
):
super().__init__(
description=description,
allow_abbrev=False,
conflict_handler="resolve",
formatter_class=argparse.HelpFormatter,
add_help=add_blink_args,
)
self.blink_home = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
os.environ["BLINK_HOME"] = self.blink_home
self.add_arg = self.add_argument
self.overridable = {}
if add_blink_args:
self.add_blink_args()
if add_model_args:
self.add_model_args()
def add_blink_args(self, args=None):
"""
Add common BLINK args across all scripts.
"""
parser = self.add_argument_group("Common Arguments")
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--data_parallel",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda",
action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument("--top_k", default=10, type=int)
parser.add_argument(
"--seed", type=int, default=52313, help="random seed for initialization"
)
parser.add_argument(
"--zeshel",
default=True,
type=bool,
help="Whether the dataset is from zeroshot.",
)
def add_model_args(self, args=None):
"""
Add model args.
"""
parser = self.add_argument_group("Model Arguments")
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_context_length",
default=128,
type=int,
help="The maximum total context input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_cand_length",
default=128,
type=int,
help="The maximum total label input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=False,
help="The full path to the model to load.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pull_from_layer",
type=int,
default=-1,
help="Layers to pull from BERT",
)
parser.add_argument(
"--lowercase",
action="store_false",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--context_key", default="context", type=str)
parser.add_argument(
"--out_dim",
type=int,
default=1,
help="Output dimention of bi-encoders.",
)
parser.add_argument(
"--add_linear",
action="store_true",
help="Whether to add an additonal linear projection on top of BERT.",
)
parser.add_argument(
"--data_path",
default="data/zeshel",
type=str,
help="The path to the train data.",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="The output directory where generated output file (model, etc.) is to be dumped.",
)
def add_training_args(self, args=None):
"""
Add model training args.
"""
parser = self.add_argument_group("Model Training Arguments")
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--train_batch_size",
default=8,
type=int,
help="Total batch size for training.",
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Number of training epochs.",
)
parser.add_argument(
"--print_interval",
type=int,
default=10,
help="Interval of loss printing",
)
parser.add_argument(
"--eval_interval",
type=int,
default=100,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1, help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--type_optimization",
type=str,
default="all_encoder_layers",
help="Which type of layers to optimize in BERT",
)
parser.add_argument(
"--shuffle",
type=bool,
default=False,
help="Whether to shuffle train data",
)
def add_eval_args(self, args=None):
"""
Add model evaluation args.
"""
parser = self.add_argument_group("Model Evaluation Arguments")
parser.add_argument(
"--eval_batch_size",
default=8,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--mode",
default="valid",
type=str,
help="Train / validation / test",
)
parser.add_argument(
"--save_topk_result",
action="store_true",
help="Whether to save prediction results.",
)
parser.add_argument(
"--encode_batch_size", default=8, type=int, help="Batch size for encoding."
)
parser.add_argument(
"--cand_pool_path",
default=None,
type=str,
help="Path for cached candidate pool (id tokenization of candidates)",
)
parser.add_argument(
"--cand_encode_path",
default=None,
type=str,
help="Path for cached candidate encoding",
)
```
#### File: BLINK/elq/build_faiss_index.py
```python
import argparse
import logging
import numpy
import os
import time
import torch
from index.faiss_indexer import (
DenseFlatIndexer,
DenseIVFFlatIndexer,
DenseHNSWFlatIndexer,
)
import candidate_ranking.utils as utils
logger = utils.get_logger()
def main(params):
output_path = params["output_path"]
logger.info(
"Loading candidate encoding from path: %s" % params["candidate_encoding"]
)
candidate_encoding = torch.load(params["candidate_encoding"])
vector_size = candidate_encoding.size(1)
index_buffer = params["index_buffer"]
if params["faiss_index"] == "hnsw":
logger.info("Using HNSW index in FAISS")
index = DenseHNSWFlatIndexer(vector_size, index_buffer)
elif params["faiss_index"] == "ivfflat":
logger.info("Using IVF Flat index in FAISS")
index = DenseIVFFlatIndexer(vector_size, 75, 100)
else:
logger.info("Using Flat index in FAISS")
index = DenseFlatIndexer(vector_size, index_buffer)
logger.info("Building index.")
index.index_data(candidate_encoding.numpy())
logger.info("Done indexing data.")
if params.get("save_index", None):
index.serialize(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_path",
required=True,
type=str,
help="output file path",
)
parser.add_argument(
"--candidate_encoding",
default="models/all_entities_large.t7",
type=str,
help="file path for candidte encoding.",
)
parser.add_argument(
"--faiss_index",
type=str,
choices=["hnsw", "flat", "ivfflat"],
help="Which faiss index to use",
)
parser.add_argument(
"--save_index",
action="store_true",
help="If enabled, save index",
)
parser.add_argument(
"--index_buffer",
type=int,
default=50000,
help="Temporal memory data buffer size (in samples) for indexer",
)
params = parser.parse_args()
params = params.__dict__
main(params)
``` |
{
"source": "Jimx-/tsdb-fork",
"score": 2
} |
#### File: tsdb-fork/scripts/run_tsbs.py
```python
import argparse
import os
import subprocess
import sys
import shutil
import psutil
import time
#DATASETS = ('s1d-i10m-10000', 's1d-i1h-20000', 's10m-i1m-100000',
# 's1d-i1h-100000', 's10h-i1h-1000000')
#DATASETS_N = (100000, 200000, 1000000, 1000000, 10000000)
DATASETS = ('s1d-i1h-20000', 's1d-i1h-100000', 's10h-i1h-1000000')
DATASETS_N = (200000, 1000000, 10000000)
DATASETS_IDX = (2, 4, 5)
def run_benchmark(benchmark_path, dataset_path, full_db):
result_path = os.path.join(benchmark_path, 'bench-results')
try:
os.makedirs(result_path)
except FileExistsError:
pass
# Normal fresh insert.
retval = run_insert_fresh(benchmark_path, dataset_path, result_path,
full_db)
if retval != 0:
return retval
if not full_db:
# Normal query.
retval = run_query(benchmark_path, dataset_path, result_path, full_db)
if retval != 0:
return retval
# Randomized time range query.
if not full_db:
retval = run_query(benchmark_path, dataset_path, result_path, full_db,
False, True)
if retval != 0:
return retval
# Bitmap-only fresh insert.
retval = run_insert_fresh(benchmark_path, dataset_path, result_path,
full_db, True)
if retval != 0:
return retval
# retval = run_query_resource(benchmark_path, dataset_path, result_path,
# full_db, True)
if not full_db:
# Bitmap-only query.
retval = run_query(benchmark_path, dataset_path, result_path, full_db,
True)
if retval != 0:
return retval
# Randomized time range query.
# if not full_db:
# retval = run_query(benchmark_path, dataset_path, result_path, full_db,
# True, True)
# if retval != 0:
# return retval
# Normal re-insert.
retval = run_insert(benchmark_path, dataset_path, result_path, full_db)
if retval != 0:
return retval
# Bitmap-only mixed workload.
retval = run_mixed(benchmark_path, dataset_path, result_path, full_db)
if retval != 0:
return retval
return 0
def make_data_path(benchmark_path, dataset, full_db, bitmap_only):
return os.path.join(
benchmark_path, 'bench',
dataset + (full_db and '-full' or '') + (bitmap_only and '-bm' or ''))
def make_option(full_db, bitmap_only, randomize=False):
opt = ''
if full_db:
opt += '-f'
if bitmap_only:
opt += ' -b'
if randomize:
opt += ' -m'
return opt
def make_label(full_db, bitmap_only, randomize=False):
label = full_db and 'fulldb' or 'hybrid'
return label + (bitmap_only and '-bm' or '') + (randomize and '-rand'
or '')
def run_insert_fresh(benchmark_path,
dataset_path,
result_path,
full_db,
bitmap_only=False):
retval = 0
for dataset in DATASETS:
data_path = make_data_path(benchmark_path, dataset, full_db,
bitmap_only)
if os.path.exists(data_path):
shutil.rmtree(data_path)
os.makedirs(data_path)
cmd = '{}/tsbs -w insert -r {} -d {} {}'.format(
benchmark_path, data_path,
os.path.join(dataset_path, 'prometheus-data-cpu-only-' + dataset),
make_option(full_db, bitmap_only))
with open(
os.path.join(
result_path, '{}-{}-fresh-insert.txt'.format(
dataset, make_label(full_db, bitmap_only))), 'w') as f:
print('Benchmarking fresh insert for {} {}...'.format(
dataset, make_label(full_db, bitmap_only)))
code = subprocess.call(cmd.split(' '), stdout=f)
if code != 0:
print('Non-zero return code for {}: {}'.format(dataset, code))
retval = code
return retval
def run_insert(benchmark_path, dataset_path, result_path, full_db):
retval = 0
for dataset in DATASETS:
data_path = make_data_path(benchmark_path, dataset, full_db, False)
backup_path = data_path + '_backup'
if not os.path.exists(backup_path):
shutil.copytree(data_path, backup_path)
cmd = '{}/tsbs -w insert -r {} -d {} {}'.format(
benchmark_path, data_path,
os.path.join(dataset_path, 'prometheus-data-cpu-only-' + dataset),
make_option(full_db, False))
with open(
os.path.join(
result_path,
'{}-{}-insert.txt'.format(dataset,
make_label(full_db, False))),
'w') as f:
print('Benchmarking insert for {} {}...'.format(
dataset, make_label(full_db, False)))
code = subprocess.call(cmd.split(' '), stdout=f)
if code != 0:
print('Non-zero return code for {}: {}'.format(dataset, code))
retval = code
return retval
def run_query(benchmark_path,
dataset_path,
result_path,
full_db,
bitmap_only=False,
randomize=False):
retval = 0
for s, dataset in enumerate(DATASETS, 1):
data_path = make_data_path(benchmark_path, dataset, full_db,
bitmap_only)
for q in range(1, 9):
cmd = '{}/tsbs -w query -r {} -q {} -g {} {}'.format(
benchmark_path, data_path, q, DATASETS_IDX[s - 1],
make_option(full_db, bitmap_only, randomize))
with open(
os.path.join(
result_path, 's{}-q{}-{}-query.txt'.format(
DATASETS_IDX[s - 1], q,
make_label(full_db, bitmap_only, randomize))),
'w') as f:
print('Benchmarking query for s{}-q{} {}...'.format(
s, q, make_label(full_db, bitmap_only, randomize)))
code = subprocess.call(cmd.split(' '), stdout=f)
if code != 0:
print('Non-zero return code for s{}-q{}: {}'.format(
s, q, code))
retval = code
return retval
def run_mixed(benchmark_path, dataset_path, result_path, full_db):
retval = 0
for dataset, N in zip(DATASETS, DATASETS_N):
data_path = os.path.join(
benchmark_path, 'bench',
dataset + '-mixed' + (full_db and '-full' or ''))
backup_path = make_data_path(benchmark_path, dataset, full_db, True)
for ratio in (0.0, 0.3, 0.7, 1.0):
for size in (0.1, 0.4, 0.7, 1.0):
if os.path.exists(data_path):
shutil.rmtree(data_path)
shutil.copytree(backup_path, data_path)
cmd = '{}/tsbs -w mixed -r {} -d {} -a 1.5 -s {} -t {} {} -p'.format(
benchmark_path, data_path,
os.path.join(dataset_path,
'prometheus-data-cpu-only-' + dataset),
int(size * N), ratio, make_option(full_db, True))
with open(
os.path.join(
result_path, '{}-{}-mixed-{}-{}.txt'.format(
dataset, full_db and 'fulldb' or 'hybrid',
ratio, size)), 'w') as f:
print(
'Benchmarking mixed workload for {} {} size={} ratio={}...'
.format(dataset, full_db and 'fulldb' or 'hybrid',
size, ratio))
code = subprocess.call(cmd.split(' '), stdout=f)
if code != 0:
print('Non-zero return code for {}: {}'.format(
dataset, code))
retval = code
return retval
def run_insert_checkpoint(benchmark_path, dataset_path, result_path):
retval = 0
dataset = DATASETS[-1]
# No checkpoints.
data_path = make_data_path(benchmark_path, dataset, False,
False) + '-no-checkpoint'
try:
os.makedirs(data_path)
except FileExistsError:
pass
cmd = '{}/tsbs -w insert -r {} -d {} -c disabled'.format(
benchmark_path, data_path,
os.path.join(dataset_path, 'prometheus-data-cpu-only-' + dataset))
with open(
os.path.join(
result_path, '{}-{}-fresh-insert-no-checkpoint.txt'.format(
dataset, make_label(False, False))), 'w') as f:
print('Benchmarking fresh insert for {} no checkpoint...'.format(
dataset))
code = subprocess.call(cmd.split(' '), stdout=f)
if code != 0:
print('Non-zero return code for {}: {}'.format(dataset, code))
return code
# Checkpoints.
data_path = make_data_path(benchmark_path, dataset, False,
False) + '-with-checkpoint'
try:
os.makedirs(data_path)
except FileExistsError:
pass
cmd = '{}/tsbs -w insert -r {} -d {} -c print'.format(
benchmark_path, data_path,
os.path.join(dataset_path, 'prometheus-data-cpu-only-' + dataset))
with open(
os.path.join(
result_path, '{}-{}-fresh-insert-with-checkpoint.txt'.format(
dataset, make_label(False, False))), 'w') as f:
with open(
os.path.join(
result_path, '{}-{}-fresh-insert-checkpoints.txt'.format(
dataset, make_label(False, False))), 'w') as ferr:
print('Benchmarking fresh insert for {} no checkpoint...'.format(
dataset))
code = subprocess.call(cmd.split(' '), stdout=f, stderr=ferr)
if code != 0:
print('Non-zero return code for {}: {}'.format(dataset, code))
retval = code
return retval
def run_query_resource(benchmark_path,
dataset_path,
result_path,
full_db,
bitmap_only=False,
randomize=False):
retval = 0
for s, (dataset, N) in enumerate(zip(DATASETS, DATASETS_N), 1):
data_path = make_data_path(benchmark_path, dataset, full_db,
bitmap_only)
for q in range(1, 3):
cmd = '{}/tsbs -w query -r {} -q {} -g {} {} -k {} -s {}'.format(
benchmark_path, data_path, q, DATASETS_IDX[s - 1],
make_option(full_db, bitmap_only, randomize), q <= 2 and 10000
or 4, int(N * 0.1))
with open(
os.path.join(
result_path, 's{}-q{}-{}-query-resource.txt'.format(
DATASETS_IDX[s - 1], q,
make_label(full_db, bitmap_only, randomize))),
'w') as f:
print('Benchmarking query for s{}-q{} {}...'.format(
s, q, make_label(full_db, bitmap_only, randomize)))
p = subprocess.Popen(cmd.split(' '))
proc = psutil.Process(p.pid)
f.write('cpu,rss,vms\n')
code = p.poll()
while code == None:
cpu_percent = proc.cpu_percent()
mem_info = proc.memory_info()
f.write(f'{cpu_percent},{mem_info.rss},{mem_info.vms}\n')
time.sleep(1)
code = p.poll()
if code != 0:
print('Non-zero return code for s{}-q{}: {}'.format(
s, q, code))
retval = code
return retval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--benchmark-path',
type=str,
help='Path to benchmark data')
parser.add_argument('--dataset-path', type=str, help='Path to datasets')
args = parser.parse_args()
retval = run_benchmark(args.benchmark_path, args.dataset_path, False)
if retval != 0:
sys.exit(retval)
retval = run_benchmark(args.benchmark_path, args.dataset_path, True)
sys.exit(retval)
``` |
{
"source": "jimy-byerley/glcontext",
"score": 3
} |
#### File: glcontext/glcontext/__init__.py
```python
import os
__version__ = '2.3.4'
def default_backend():
"""Get default backend based on the detected platform.
Supports detecting an existing context and standalone contexts.
If no context if found for the platform we return the linux backend.
Example::
# Get the available backend
backend = get_default_backend(standalone=False/True)
# Create an opengl 3.3 context or detect the currently active
# context requiring at least opengl 3.3 support.
ctx = backend(330)
Returns:
A backend object for creating and/or detecting context
"""
PLATFORMS = {'windows', 'linux', 'darwin'}
import platform
target = platform.system().lower()
for known in PLATFORMS:
if target.startswith(known):
target = known
if target not in PLATFORMS:
target = 'linux'
if target == 'windows':
return _wgl()
if target == 'linux':
return _x11()
if target == 'darwin':
return _darwin()
raise ValueError("Cannot find suitable default backend")
def get_backend_by_name(name: str):
"""Request a specific backend by name"""
if name == 'egl':
return _egl()
raise ValueError("Cannot find supported backend: '{}'".format(name))
def _wgl():
"""Create wgl backend"""
from glcontext import wgl
def create(*args, **kwargs):
_apply_env_var(kwargs, 'glversion', 'GLCONTEXT_GLVERSION', arg_type=int)
_apply_env_var(kwargs, 'libgl', 'GLCONTEXT_WIN_LIBGL')
kwargs = _strip_kwargs(kwargs, ['glversion', 'mode', 'libgl'])
return wgl.create_context(**kwargs)
return create
def _x11():
"""Create x11 backend"""
from glcontext import x11
from ctypes.util import find_library
def create(*args, **kwargs):
if not kwargs.get('libgl'):
kwargs['libgl'] = find_library('GL')
if not kwargs.get('libx11'):
kwargs['libx11'] = find_library("X11")
_apply_env_var(kwargs, 'glversion', 'GLCONTEXT_GLVERSION', arg_type=int)
_apply_env_var(kwargs, 'libgl', 'GLCONTEXT_LINUX_LIBGL')
_apply_env_var(kwargs, 'libx11', 'GLCONTEXT_LINUX_LIBX11')
kwargs = _strip_kwargs(kwargs, ['glversion', 'mode', 'libgl', 'libx11'])
return x11.create_context(**kwargs)
return create
def _darwin():
"""Create darwin/cgl context"""
from glcontext import darwin
def create(*args, **kwargs):
return darwin.create_context(**_strip_kwargs(kwargs, ['mode']))
return create
def _egl():
from glcontext import egl
from ctypes.util import find_library
def create(*args, **kwargs):
if not kwargs.get('libgl'):
kwargs['libgl'] = find_library('GL')
if not kwargs.get('libegl'):
kwargs['libegl'] = find_library('EGL')
_apply_env_var(kwargs, 'device_index', 'GLCONTEXT_DEVICE_INDEX', arg_type=int)
_apply_env_var(kwargs, 'glversion', 'GLCONTEXT_GLVERSION', arg_type=int)
_apply_env_var(kwargs, 'libgl', 'GLCONTEXT_LINUX_LIBGL')
_apply_env_var(kwargs, 'libegl', 'GLCONTEXT_LINUX_LIBEGL')
kwargs = _strip_kwargs(kwargs, ['glversion', 'mode', 'libgl', 'libegl', 'device_index'])
return egl.create_context(**kwargs)
return create
def _strip_kwargs(kwargs: dict, supported_args: list):
"""Strips away unwanted keyword arguments.
The backends are using ``PyArg_ParseTupleAndKeywords`` to
parse the incoming ``kwargs`` data. It's not well suited
to handle additional arguments.
- Removes None key arguments
- Removes unsupported arguments
"""
return {k: v for k, v in kwargs.items() if v is not None and k in supported_args}
def _apply_env_var(kwargs, arg_name, env_name, arg_type=str):
"""Injects an environment variable into the arg dict if present"""
value = os.environ.get(env_name, kwargs.get(arg_name))
if value:
kwargs[arg_name] = arg_type(value)
``` |
{
"source": "JimYeung/Self-Driving-Car",
"score": 3
} |
#### File: JimYeung/Self-Driving-Car/train.py
```python
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn import utils
from sklearn.model_selection import train_test_split
import matplotlib.image as mpimg
from imgaug import augmenters as iaa
import cv2
import pandas as pd
import ntpath #edit the path
import random
import tensorflow as tf
from arch_collection import nvidia_model
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
def path_leaf(path):
#Triming the data by keeping the tail only, i.e. center/right/left_***.jpg
head, tail = ntpath.split(path)
return tail
def balance_data(thres, num_bins, data):
#Balancing the data
remove_list = []
hist, bins = np.histogram(data['steering'], num_bins) #bins = categories of steering angle
for j in range(num_bins):
list_ = []
for i in range(len(data['steering'])):
if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j+1]:
list_.append(i) #categorizing the steering angle
random.shuffle(list_)
#print(len(list_))
list_ = list_[thres: ] #eject samples that is beyond the threshold
remove_list.extend(list_) #listing the unwanted data
data.drop(data.index[remove_list], inplace=True) #removing the unwanted data by accessing their index
return data, remove_list
def load_img_steering(datadir, df): #df = dataframe
image_path = []
steering = []
for i in range(len(df)):
indexed_data = df.iloc[i] #iloc: select data via index i
center, left, right = indexed_data[0], indexed_data[1], indexed_data[2]
image_path.append(os.path.join(datadir, center.strip()))#strip() to remove any spaces
steering.append(float(indexed_data[3]))
image_paths = np.asarray(image_path)
steerings = np.asarray(steering)
return image_paths, steerings
def zoom(image):
zoom = iaa.Affine(scale=(1,1.3))
image = zoom.augment_image(image)
return image
def pan(image):
pan = iaa.Affine(translate_percent={'x': (-0.1,0.1), 'y': (-0.1,0.1)})
image = pan.augment_image(image)
return image
def img_random_brightness(image):
brightness = iaa.Multiply((0.2,1.2))
image = brightness.augment_image(image)
return image
def img_random_flip(image, steering_angle):
image = cv2.flip(image, 1)
steering_angle = -steering_angle
return image, steering_angle
def img_preprocess(img):
img = img[60:135,:,:]
img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
img = cv2.GaussianBlur(img, (3,3), 0)
img = cv2.resize(img, (200,66)) #Resize to fit NVidia data architecture
return img/255
def random_augment(image, steering_angle):
image = mpimg.imread(image)
if np.random.rand() < 0.5:
image = pan(image)
if np.random.rand() < 0.5:
image = zoom(image)
if np.random.rand() < 0.5:
image = img_random_brightness(image)
if np.random.rand() < 0.5:
image, steering_angle = img_random_flip(image, steering_angle)
return image, steering_angle
def batch_generator(image_paths, steering_ang, batch_size, istraining):
while True:
batch_img = []
batch_steering = []
for i in range(batch_size):
random_index = random.randint(0, len(image_paths)-1)
if istraining:
im, steering = random_augment(image_paths[random_index], steering_ang[random_index])
else:
im = mpimg.imread(image_paths[random_index])
steering = steering_ang[random_index]
im = img_preprocess(im)
batch_img.append(im)
batch_steering.append(steering)
yield (np.array(batch_img), np.asarray(batch_steering))
def main():
#Retrieve the data
dir_path = os.path.dirname(os.path.realpath(__file__))
columns = ['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed']
data = pd.read_csv(os.path.join(dir_path, 'data/driving_log.csv'), names = columns)
pd.set_option('display.max_colwidth', None)
data['center'] = data['center'].apply(path_leaf)
data['left'] = data['left'].apply(path_leaf)
data['right'] = data['right'].apply(path_leaf)
print(data.head()) #Check-pt
#Balancing the data
print("\nBalancing the data...." )
samples_per_bin = 500 #Threshold for uniforming the samples
num_bins = 25 #no. of categories of steering angles
print('total data: ', len(data))
balanced_data, remove_list = balance_data(samples_per_bin, num_bins, data)
print('removed: {} remaining: {}'.format(len(remove_list), len(data)))
#Loading the img_path, steerings angle
image_paths, steerings = load_img_steering(dir_path + '\data\IMG', balanced_data)
#Splitting Data
print("\nSplitting the Data....")
X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6)
print('Training Samples: {} Valid Samples: {}'. format(len(X_train), len(X_valid)))
#Data Augmentation
'''
# Check point
image = image_paths[100]
print(image)
original_image = mpimg.imread(image)
preprocessed_image = zoom(original_image)
fig, axs = plt.subplots(1, 2, figsize=(15,10))
fig.tight_layout()
axs[0].imshow(original_image)
axs[0].set_title('original_image')
axs[1].imshow(preprocessed_image)
axs[1].set_title('preprocessed_image')
plt.show()
'''
#Batch Generator
'''
# Check Point
X_train_gen, y_train_gen = next(batch_generator(X_train, y_train, 2, 1))
X_valid_gen, y_valid_gen = next(batch_generator(X_valid, y_valid, 2, 0))
fig, axs = plt.subplots(1, 2, figsize=(15,10))
fig.tight_layout()
axs[0].imshow(X_train_gen[0])
axs[0].set_title('Training_image')
axs[1].imshow(X_valid_gen[0])
axs[1].set_title('Valid_image')
plt.show()
'''
#Import Model
model = nvidia_model()
print(model.summary())
history = model.fit(batch_generator(X_train, y_train,130, 1),
steps_per_epoch=350,
epochs=10,
validation_data=batch_generator(X_valid, y_valid,130,0),
validation_steps=250,
verbose=1, shuffle=1)
save = input("Would like to save trained model as model.h5? y/n")
if save == "y":
model.save('model.h5')
print("model is saved as 'model.h5'")
else:
print("model is not saved.")
pass
if __name__== "__main__":
main()
``` |
{
"source": "JimyMa/UniformBSGaussianUE",
"score": 3
} |
#### File: udntools/bs/band_constrain_bs.py
```python
from abc import abstractmethod
from . import BaseBS
class BandConstrainBS(BaseBS):
def __init__(self,
bs_number,
layer=1,
power=1.0,
bs_distribution="uniform"):
super(BandConstrainBS, self).__init__(bs_number, layer, power, bs_distribution)
@abstractmethod
def set_bs_to_region(self):
# Get initialized BS Position
pass
@abstractmethod
def set_uniform_bs_to_region(self):
pass
@abstractmethod
def select_ue(self):
pass
```
#### File: udntools/region/base_region.py
```python
import numpy as np
class BaseRegion(object):
_atom = 0.3
def __init__(self,
x_min,
x_max,
y_min,
y_max):
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
'''
ground_position x_range/atom * y_range/atom * 2 tensor
'''
self.ground_position_ = self.get_ground()
def get_ground(self):
x = np.arange(self.x_min, self.x_max + self._atom, self._atom)
y = np.arange(self.y_min, self.y_max + self._atom, self._atom)
ground_x, ground_y = np.meshgrid(x, y)
ground_x = ground_x[:, :, np.newaxis]
ground_y = ground_y[:, :, np.newaxis]
ground_position = np.concatenate([ground_x, ground_y], axis=2)
return ground_position
def set_atom(self, atom):
self._atom = atom
def get_atom(self):
return self._atom
```
#### File: udntools/utils/dim2_distance.py
```python
import numpy as np
def dim2_distance(matrix_a, matrix_b):
"""
distance of two 2dim points
:param matrix_a: num_samplesA * 2 matrix
:param matrix_b: 2 * num_samplesA matrix
:return: num_samplesA * num_samplesB matrix
"""
distance_vector = matrix_a[:, :, np.newaxis] - matrix_b[np.newaxis, :, :]
distance = np.sqrt(distance_vector[:, 0, :] ** 2.0
+ distance_vector[:, 1, :] ** 2.0)
# distance: num_samplesA * num_samplesB matrix
return distance
``` |
{
"source": "JimySheepman/hands-on-project",
"score": 3
} |
#### File: hands-on-project/keylog_timer/keylog_timer.py
```python
from pynput.keyboard import Listener
import logging
import sys
import time
class Colors:
CRED2 = "\33[91m"
CBLUE2 = "\33[94m"
ENDC = "\033[0m"
banner = ("""
██╗ ██╗███████╗██╗ ██╗██╗ ██████╗ ██████╗ ████████╗██╗███╗ ███╗███████╗██████╗
██║ ██╔╝██╔════╝╚██╗ ██╔╝██║ ██╔═══██╗██╔════╝ ╚══██╔══╝██║████╗ ████║██╔════╝██╔══██╗
█████╔╝ █████╗ ╚████╔╝ ██║ ██║ ██║██║ ███╗ ██║ ██║██╔████╔██║█████╗ ██████╔╝
██╔═██╗ ██╔══╝ ╚██╔╝ ██║ ██║ ██║██║ ██║ ██║ ██║██║╚██╔╝██║██╔══╝ ██╔══██╗
██║ ██╗███████╗ ██║ ███████╗╚██████╔╝╚██████╔╝ ██║ ██║██║ ╚═╝ ██║███████╗██║ ██║
╚═╝ ╚═╝╚══════╝ ╚═╝ ╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
v1.0 """)
for col in banner:
print(Colors.CRED2 + col, end="")
sys.stdout.flush()
time.sleep(0.0025)
x = ("""
Author: <NAME> | JimySheepman
Github: https://github.com/JimySheepman \n """)
for col in x:
print(Colors.CBLUE2 + col, end="")
sys.stdout.flush()
time.sleep(0.0040)
y = "\n\t\tPress Ctrl + c to terminate the program...\n"
for col in y:
print(Colors.CRED2 + col, end="")
sys.stdout.flush()
time.sleep(0.0040)
z = "\n"
for col in z:
print(Colors.ENDC + col, end="")
sys.stdout.flush()
time.sleep(0.4)
logging.basicConfig(filename=("time_log.txt"), level=logging.DEBUG, format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
```
#### File: hands-on-project/WebCrawler/spider.py
```python
from urllib.request import urlopen #used in python to connect to webpage
from link_finder import LinkFinder #import link finder in link_finder.py
from crawler import * #import ALL in general.py
class Spider:
#class variables (shared among all spiders instances)
project_Name = ''
base_url = ''
domain_name = ''
queue_file = '' #saved inside text file, for resuming later
crawled_file = '' #saved inside text file, for resuming later
queue = set() #using this to stored in ram .
crawled = set() #using this to stored in ram.
def __init__(self, project_name, base_url, domain_name):
Spider.project_name = project_name
Spider.base_url = base_url
Spider.domain_name = domain_name
Spider.queue_file = Spider.project_name + '/queue.txt'
Spider.crawled_file = Spider.project_name + '/crawled.txt'
self.boot()
self.crawl_page('First Spider', Spider.base_url)
@staticmethod
def boot():
create_project_dir(Spider.project_name)
create_data_files(Spider.project_name, Spider.base_url)
#both queue and crawled are retrieved from file and saved in ram for faster operation.
Spider.queue = file_to_set(Spider.queue_file)
Spider.crawled = file_to_set(Spider.crawled_file)
@staticmethod
def crawl_page(thread_name, page_url):
if page_url not in Spider.crawled:
print(thread_name + ' now crawling ' + page_url)
#print('Queue: ' + str(len(Spider.queue) + ' | Crawled: ' + str(len(Spider.crawled))))
print('Queue: {} | Crawled: {}'.format(len(Spider.queue), len(Spider.crawled)))
Spider.add_links_to_queue(Spider.gather_links(page_url))
#remove from queue list after completed.
Spider.queue.remove(page_url)
#add links into crawled list after crawled.
Spider.crawled.add(page_url)
#call both sets : file_to_set and set_to_file and convert them to file
Spider.update_files()
@staticmethod
def gather_links(page_url):
html_string = ''
#using error catching on networking
try:
response = urlopen(page_url)
#make sure its a html page and not some pdf format
if 'text/html' in response.getheader('Content-Type'):
#python read in html bytes format
html_bytes = response.read()
#convert into human readable character (utf-8)
html_string = html_bytes.decode('utf-8')
#create a linkfinder object
finder = LinkFinder(Spider.base_url, page_url)
#feed in the html strings
finder.feed(html_string)
except:
print('Error: cannot crawl page!')
return set()
return finder.page_links()
@staticmethod
def add_links_to_queue(links):
for url in links:
#checks if url are being crawled
if url in Spider.queue:
continue
#checks if url was crawled
if url in Spider.crawled:
continue
#checks if spider is in correct site,
#spiders may accidentally crawled google,FB, Insta,
#if the site consist them. this is to make sure we are crawling
#the only site we told spider to crawl. eg: thenewboston.com/******
if Spider.domain_name not in url:
continue
#add into queue
Spider.queue.add(url)
@staticmethod
def update_files():
set_to_file(Spider.queue, Spider.queue_file)
set_to_file(Spider.crawled, Spider.crawled_file)
``` |
{
"source": "jimzers/ray",
"score": 2
} |
#### File: serve/tests/test_fastapi.py
```python
from fastapi import FastAPI
import requests
import pytest
import inspect
from ray import serve
from ray.serve.utils import make_fastapi_class_based_view
def test_fastapi_function(serve_instance):
client = serve_instance
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.ingress(app)
class FastAPIApp:
pass
client.deploy("f", FastAPIApp)
resp = requests.get(f"http://localhost:8000/f/100")
assert resp.json() == {"result": 100}
resp = requests.get(f"http://localhost:8000/f/not-number")
assert resp.status_code == 422 # Unprocessable Entity
assert resp.json()["detail"][0]["type"] == "type_error.integer"
def test_ingress_prefix(serve_instance):
client = serve_instance
app = FastAPI()
@app.get("/{a}")
def func(a: int):
return {"result": a}
@serve.ingress(app, path_prefix="/api")
class App:
pass
client.deploy("f", App)
resp = requests.get(f"http://localhost:8000/api/100")
assert resp.json() == {"result": 100}
def test_class_based_view(serve_instance):
client = serve_instance
app = FastAPI()
@app.get("/other")
def hello():
return "hello"
@serve.ingress(app)
class A:
def __init__(self):
self.val = 1
@app.get("/calc/{i}")
def b(self, i: int):
return i + self.val
@app.post("/calc/{i}")
def c(self, i: int):
return i - self.val
client.deploy("f", A)
resp = requests.get(f"http://localhost:8000/f/calc/41")
assert resp.json() == 42
resp = requests.post(f"http://localhost:8000/f/calc/41")
assert resp.json() == 40
resp = requests.get(f"http://localhost:8000/f/other")
assert resp.json() == "hello"
def test_make_fastapi_cbv_util():
app = FastAPI()
class A:
@app.get("/{i}")
def b(self, i: int):
pass
# before, "self" is treated as a query params
assert app.routes[-1].endpoint == A.b
assert app.routes[-1].dependant.query_params[0].name == "self"
assert len(app.routes[-1].dependant.dependencies) == 0
make_fastapi_class_based_view(app, A)
# after, "self" is treated as a dependency instead of query params
assert app.routes[-1].endpoint == A.b
assert len(app.routes[-1].dependant.query_params) == 0
assert len(app.routes[-1].dependant.dependencies) == 1
self_dep = app.routes[-1].dependant.dependencies[0]
assert self_dep.name == "self"
assert inspect.isfunction(self_dep.call)
assert "get_current_servable" in str(self_dep.call)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
``` |
{
"source": "jimzers/youtube_tts_data_generator",
"score": 3
} |
#### File: youtube_tts_data_generator/youtube_tts_data_generator/text_cleaner.py
```python
import re
from unidecode import unidecode
from .number_cleaner import normalize_numbers
class Cleaner(object):
def __init__(self):
self._whitespace_re = re.compile(r"\s+")
self._abbreviations = [
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
for x in [
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("maj", "major"),
("gen", "general"),
("drs", "doctors"),
("rev", "reverend"),
("lt", "lieutenant"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("esq", "esquire"),
("ltd", "limited"),
("col", "colonel"),
("ft", "fort"),
("rs", "rupees"),
]
]
def expand_abbreviations(self, text):
for regex, replacement in self._abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(self, text):
return re.sub(self._whitespace_re, " ", text)
def clean_english_text(self, text):
"""Pipeline for English text, including number and abbreviation expansion."""
text = normalize_numbers(text)
text = unidecode(text)
text = text.lower()
text = self.expand_abbreviations(text)
text = self.collapse_whitespace(text)
return text
``` |
{
"source": "jimzhong/udp-on-xbee",
"score": 3
} |
#### File: udp-on-xbee/udponxbee/frame.py
```python
from struct import pack, unpack, calcsize
from enum import Enum
import logging
START_DELIMITER = 0x7E
class XBeeOutFrame(object):
def __bytes__(self):
raise NotImplementedError("Subclass should implement this method")
@staticmethod
def calc_checksum(partial_frame):
'''
partial_frame do not contain first 3 bytes and the last byte of checksum
'''
return pack("!B", 0xff - (sum(partial_frame) & 0xff))
class XBeeInFrame(object):
AT_RESPONSE = 0x88
MODEM_STATUS = 0x8A
TX_STATUS = 0x8B
RX_PACKET = 0x90
@staticmethod
def verify_frame(frame):
val = sum(frame[3:]) & 0xff
return val == 0xff
@classmethod
def from_bytes(cls, data):
decoder = {
cls.AT_RESPONSE: XBeeATResponse,
cls.MODEM_STATUS: XBeeModemStatus,
cls.TX_STATUS: XBeeTXStatus,
cls.RX_PACKET: XBeeRXPacket,
}
if data[0] != START_DELIMITER:
raise ValueError("Delimiter is incorrect.")
if cls.verify_frame(data) == False:
raise ValueError("Frame is corrupted.")
if data[3] in decoder:
return decoder[data[3]](data)
else:
raise ValueError("Unknown frame of type 0x{:x}".format(data[3]))
class XBeeATResponse(XBeeInFrame):
def __init__(self, data):
'''
value is a bytearray
'''
assert data[3] == self.AT_RESPONSE
self.frame_id = data[4]
self.status = data[7]
self.key = data[5:7].decode()
self.value = data[8:-1]
def __str__(self):
return "ATResponse: {} = {}".format(self.key, self.value)
class XBeeRXPacket(XBeeInFrame):
def __init__(self, frame):
assert frame[3] == self.RX_PACKET
self.addr64 = int.from_bytes(frame[4:12], 'big')
self.addr16 = int.from_bytes(frame[12:14], 'big')
self.data = frame[15:-1]
def __str__(self):
return "RXPacket from {:x} of {} bytes".format(self.addr64, len(self.data))
class XBeeTXStatus(XBeeInFrame):
class DeliveryStatus(Enum):
SUCCESS = 0
MAC_ACK_FAILURE = 0x01
CCA_FAILURE = 0x02
INVALID_DEST_ENDPOINT = 0x15
NETWORK_ACK_FAILURE = 0x21
NOT_JOINED = 0x22
SELF_ADDRESSED = 0x23
ADDRESS_NOT_FOUND = 0x24
ROUTE_NOT_FOUND = 0x25
BROADCAST_SOURCE_FAIL = 0x26
INVALID_BINDING_TABLE_INDEX = 0x2B
RESOURCE_BUSY_1 = 0x2c
ATTEMPT_BROADCAST_WITH_APS = 0x2d
ATTEMPT_UNICAST_WITH_APS_BUT_EE00 = 0x2e
RESOURCE_BUSY_2 = 0x32
DATA_PAYLOAD_TOO_LARGE = 0x74
INDIRECT_MESSAGE_UNREQ = 0x75
def __init__(self, frame):
assert frame[3] == self.TX_STATUS
self.frame_id = frame[4]
self.addr16 = int.from_bytes(frame[5:7], 'big')
self.delivery_status = self.DeliveryStatus(frame[8])
self.discovery_status = frame[9]
def __str__(self):
return "TXStatus: delivery={}, discovery={}, frame={}".format(
self.delivery_status, self.discovery_status, self.frame_id)
class XBeeModemStatus(XBeeInFrame):
class Status(Enum):
HW_RESET= 0
WDT_RESET = 1
JOIN = 2
DISASSOC = 3
COORDINATOR_START = 6
KEY_UPDATE = 7
def __init__(self, frame):
assert frame[3] == self.MODEM_STATUS
self.status = self.Status(frame[4])
def __str__(self):
return "ModemStatus: {}".format(self.status)
class XBeeTXRequest(XBeeOutFrame):
TX_REQUEST_CMD = 0x10
TX_REQ_HEADER_FMT = "!BBQHBB"
TX_REQ_HEADER_SIZE = calcsize(TX_REQ_HEADER_FMT)
def __init__(self, addr64, *data, **kwargs):
self.data = b''.join(data)
if isinstance(addr64, bytes):
self.addr64 = int.from_bytes(addr64, 'big')
if isinstance(addr64, str):
self.addr64 = int(addr64, 16)
elif isinstance(addr64, int):
self.addr64 = addr64
else:
raise TypeError("Addr64 should be bytes, string or int")
self.frame_id = kwargs.get("frame_id", 0)
def __bytes__(self):
length = len(self.data) + self.TX_REQ_HEADER_SIZE
ohdr = pack("!BH", 0x7e, length)
ihdr = pack(self.TX_REQ_HEADER_FMT, self.TX_REQUEST_CMD, self.frame_id, self.addr64, 0xfffe, 0, 0)
checksum = 0xff - ((sum(ihdr) + sum(self.data)) & 0xff)
checksum = pack("!B", checksum)
return b"".join([ohdr, ihdr, self.data, checksum])
def __str__(self):
return "TXRequest to {:x} of {} bytes".format(self.addr64, len(self.data))
class XBeeATRequest(XBeeOutFrame):
AT_REQUEST_CMD = 0x08
AT_HEADER_FMT = "!BB2s"
AT_HEADER_SIZE = calcsize(AT_HEADER_FMT)
def __init__(self, key, value=b'', frame_id=1):
'''
value should be a hex string
'''
self.key = key
self.value = value
self.frame_id = frame_id
def __bytes__(self):
length = len(self.value) + self.AT_HEADER_SIZE
ohdr = pack("!BH", START_DELIMITER, length)
ihdr = pack(self.AT_HEADER_FMT, self.AT_REQUEST_CMD, self.frame_id, self.key.encode())
checksum = 0xff - ((sum(ihdr) + sum(self.value)) & 0xff)
checksum = pack("!B", checksum)
return b"".join([ohdr, ihdr, self.value, checksum])
def __str__(self):
return ("ATRequest {} = {}".format(self.key, self.value))
if __name__ == "__main__":
from binascii import hexlify, unhexlify
frame = XBeeTXRequest("eeeeee", b'TxData1B')
frame = XBeeATRequest("NI")
frame = XBeeInFrame.from_bytes(unhexlify("7e00028a066f"))
# frame = XBeeInFrame.from_bytes(unhexlify("7e00058801424400f0"))
# frame = XBeeInFrame.from_bytes(unhexlify("7e0011900013a20040522baa7d84015278446174610d"))
print(frame)
``` |
{
"source": "jimzhu/OpenCTR-benchmarks",
"score": 3
} |
#### File: libs/CollMetric/utils.py
```python
from collections import defaultdict
import numpy as np
from scipy.sparse import dok_matrix, lil_matrix
from tqdm import tqdm
def citeulike(tag_occurence_thres=10):
user_dict = defaultdict(set)
for u, item_list in enumerate(open("citeulike-t/users.dat").readlines()):
items = item_list.strip().split(" ")
# ignore the first element in each line, which is the number of items the user liked.
for item in items[1:]:
user_dict[u].add(int(item))
n_users = len(user_dict)
n_items = max([item for items in user_dict.values() for item in items]) + 1
user_item_matrix = dok_matrix((n_users, n_items), dtype=np.int32)
for u, item_list in enumerate(open("citeulike-t/users.dat").readlines()):
items = item_list.strip().split(" ")
# ignore the first element in each line, which is the number of items the user liked.
for item in items[1:]:
user_item_matrix[u, int(item)] = 1
n_features = 0
for l in open("citeulike-t/tag-item.dat").readlines():
items = l.strip().split(" ")
if len(items) >= tag_occurence_thres:
n_features += 1
print("{} features over tag_occurence_thres ({})".format(n_features, tag_occurence_thres))
features = dok_matrix((n_items, n_features), dtype=np.int32)
feature_index = 0
for l in open("citeulike-t/tag-item.dat").readlines():
items = l.strip().split(" ")
if len(items) >= tag_occurence_thres:
features[[int(i) for i in items], feature_index] = 1
feature_index += 1
return user_item_matrix, features
def split_data(user_item_matrix, split_ratio=(3, 1, 1), seed=1):
# set the seed to have deterministic results
np.random.seed(seed)
train = dok_matrix(user_item_matrix.shape)
validation = dok_matrix(user_item_matrix.shape)
test = dok_matrix(user_item_matrix.shape)
# convert it to lil format for fast row access
user_item_matrix = lil_matrix(user_item_matrix)
for user in tqdm(range(user_item_matrix.shape[0]), desc="Split data into train/valid/test"):
items = list(user_item_matrix.rows[user])
if len(items) >= 5:
np.random.shuffle(items)
train_count = int(len(items) * split_ratio[0] / sum(split_ratio))
valid_count = int(len(items) * split_ratio[1] / sum(split_ratio))
for i in items[0: train_count]:
train[user, i] = 1
for i in items[train_count: train_count + valid_count]:
validation[user, i] = 1
for i in items[train_count + valid_count:]:
test[user, i] = 1
print("{}/{}/{} train/valid/test samples".format(
len(train.nonzero()[0]),
len(validation.nonzero()[0]),
len(test.nonzero()[0])))
return train, validation, test
```
#### File: daisy/utils/metrics.py
```python
import numpy as np
def precision_at_k(r, k):
"""
Precision calculation method
Parameters
----------
r : List, list of the rank items
k : int, top-K number
Returns
-------
pre : float, precision value
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
# return np.mean(r)
pre = sum(r) / len(r)
return pre
def recall_at_k(rs, test_ur, k):
"""
Recall calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
test_ur : Dict, {user : items} for test set ground truth
k : int, top-K number
Returns
-------
rec : float recall value
"""
assert k >= 1
res = []
for user in test_ur.keys():
r = np.asarray(rs[user])[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
if len(test_ur[user]) == 0:
raise KeyError(f'Invalid User Index: {user}')
res.append(sum(r) / len(test_ur[user]))
rec = np.mean(res)
return rec
def mrr_at_k(rs, k):
"""
Mean Reciprocal Rank calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
k : int, topK number
Returns
-------
mrr : float, MRR value
"""
assert k >= 1
res = 0
for r in rs.values():
r = np.asarray(r)[:k] != 0
for index, item in enumerate(r):
if item == 1:
res += 1 / (index + 1)
mrr = res / len(rs)
return mrr
def ap(r):
"""
Average precision calculation method
Parameters
----------
r : List, Relevance scores (list or numpy) in rank order (first element is the first item)
Returns
-------
a_p : float, Average precision value
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
a_p = np.sum(out) / len(r)
return a_p
def map_at_k(rs):
"""
Mean Average Precision calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
Returns
-------
m_a_p : float, MAP value
"""
m_a_p = np.mean([ap(r) for r in rs])
return m_a_p
def dcg_at_k(r, k):
"""
Discounted Cumulative Gain calculation method
Parameters
----------
r : List, Relevance scores (list or numpy) in rank order
(first element is the first item)
k : int, top-K number
Returns
-------
dcg : float, DCG value
"""
assert k >= 1
r = np.asfarray(r)[:k] != 0
if r.size:
dcg = np.sum(np.subtract(np.power(2, r), 1) / np.log2(np.arange(2, r.size + 2)))
return dcg
return 0.
def ndcg_at_k(r, k):
"""
Normalized Discounted Cumulative Gain calculation method
Parameters
----------
r : List, Relevance scores (list or numpy) in rank order
(first element is the first item)
k : int, top-K number
Returns
-------
ndcg : float, NDCG value
"""
assert k >= 1
idcg = dcg_at_k(sorted(r, reverse=True), k)
if not idcg:
return 0.
ndcg = dcg_at_k(r, k) / idcg
return ndcg
def hr_at_k(rs, test_ur):
"""
Hit Ratio calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
test_ur : (Deprecated) Dict, {user : items} for test set ground truth
Returns
-------
hr : float, HR value
"""
# another way for calculating hit rate
# numer, denom = 0., 0.
# for user in test_ur.keys():
# numer += np.sum(rs[user])
# denom += len(test_ur[user])
# return numer / denom
uhr = 0
for r in rs.values():
if np.sum(r) != 0:
uhr += 1
hr = uhr / len(rs)
return hr
def auc_at_k(rs):
"""
Area Under Curve calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
Returns
-------
m_auc : float, AUC value
"""
uauc = 0.
for user in rs.keys():
label_all = rs[user]
pos_num = len(list(filter(lambda x: x == 1, label_all)))
neg_num = len(label_all) - pos_num
pos_rank_num = 0
for j in range(len(pred_all)):
if label_all[j] == 1:
pos_rank_num += j + 1
auc = (pos_rank_num - pos_num * (pos_num + 1) / 2) / (pos_num * neg_num)
uauc += auc
m_auc = uauc / len(rs)
return m_auc
def f1_at_k(rs, test_ur):
"""
F1-score calculation method
Parameters
----------
rs : Dict, {user : rank items} for test set
test_ur : Dict, {user : items} for test set ground truth
Returns
-------
fs : float, F1-score value
"""
uf1 = 0.
for user in rs.keys():
r = rs[user]
r = np.asarray(r) != 0
# start calculate precision
prec_k = sum(r) / len(r)
# start calculate recall
if len(test_ur[user]) == 0:
raise KeyError(f'Invalid User Index: {user}')
rec_k = sum(r) / len(test_ur[user])
# start calculate f1-score
f1_k = 2 * prec_k * rec_k / (rec_k + prec_k)
uf1 += f1_k
fs = uf1 / len(rs)
return fs
def RecallPrecision_ATk(test_data, r, k):
"""
test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
k : top-k
"""
right_pred = r[:, :k].sum(1)
precis_n = k
recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
for i in range(len(recall_n)):
if recall_n[i] == 0:
recall_n[i] = 1
recall = np.sum(right_pred/recall_n)
precis = np.sum(right_pred)/precis_n
# print('recall')
# embed()
return {'recall': recall, 'precision': precis}
def MRRatK_r(r, k):
"""
Mean Reciprocal Rank
"""
pred_data = r[:, :k]
scores = np.log2(1./np.arange(1, k+1))
pred_data = pred_data/scores
pred_data = pred_data.sum(1)
return np.sum(pred_data)
def NDCGatK_r(test_data,r,k):
"""
Normalized Discounted Cumulative Gain
rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
"""
assert len(r) == len(test_data)
pred_data = r[:, :k]
test_matrix = np.zeros((len(pred_data), k))
for i, items in enumerate(test_data):
length = k if k <= len(items) else len(items)
test_matrix[i, :length] = 1
max_r = test_matrix
idcg = np.sum(max_r * 1./np.log2(np.arange(2, k + 2)), axis=1)
dcg = pred_data*(1./np.log2(np.arange(2, k + 2)))
dcg = np.sum(dcg, axis=1)
idcg[idcg == 0.] = 1.
ndcg = dcg/idcg
ndcg[np.isnan(ndcg)] = 0.
# print('NDCG')
# embed()
return np.sum(ndcg)
def HRK_r(r,k):
pred_data = r[:, :k]
hits = np.sum(np.sum(pred_data, axis = 1) != 0)
return hits
```
#### File: evaluator/python/evaluate_loo.py
```python
import itertools
import numpy as np
from concurrent.futures import ThreadPoolExecutor
import sys
import heapq
def argmax_top_k(a, top_k=50):
ele_idx = heapq.nlargest(top_k, zip(a, itertools.count()))
return np.array([idx for ele, idx in ele_idx], dtype=np.intc)
def hit(rank, ground_truth):
last_idx = sys.maxsize
for idx, item in enumerate(rank):
if item == ground_truth:
last_idx = idx
break
result = np.zeros(len(rank), dtype=np.float32)
result[last_idx:] = 1.0
return result
def ndcg(rank, ground_truth):
last_idx = sys.maxsize
for idx, item in enumerate(rank):
if item == ground_truth:
last_idx = idx
break
result = np.zeros(len(rank), dtype=np.float32)
result[last_idx:] = 1.0/np.log2(last_idx+2)
return result
def mrr(rank, ground_truth):
last_idx = sys.maxsize
for idx, item in enumerate(rank):
if item == ground_truth:
last_idx = idx
break
result = np.zeros(len(rank), dtype=np.float32)
result[last_idx:] = 1.0/(last_idx+1)
return result
def eval_score_matrix_loo(score_matrix, test_items, top_k=50, thread_num=None):
def _eval_one_user(idx):
scores = score_matrix[idx] # all scores of the test user
test_item = test_items[idx] # all test items of the test user
ranking = argmax_top_k(scores, top_k) # Top-K items
result = []
result.extend(hit(ranking, test_item))
result.extend(ndcg(ranking, test_item))
result.extend(mrr(ranking, test_item))
result = np.array(result, dtype=np.float32).flatten()
return result
with ThreadPoolExecutor(max_workers=thread_num) as executor:
batch_result = executor.map(_eval_one_user, range(len(test_items)))
result = list(batch_result) # generator to list
return np.array(result) # list to ndarray
```
#### File: libs/LR-GCCF/data_utils.py
```python
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch.utils.data as data
import pdb
from torch.autograd import Variable
import torch
import math
import random
def load_all(test_num=100):
""" We load all the three file here to save time in each epoch. """
train_data = pd.read_csv(
'../data/ml-1m.train.rating',
sep='\t', header=None, names=['user', 'item'],
usecols=[0, 1], dtype={0: np.int32, 1: np.int32})
user_num = train_data['user'].max() + 1
item_num = train_data['item'].max() + 1
train_data = train_data.values.tolist()
# load ratings as a dok matrix
train_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)
for x in train_data:
train_mat[x[0], x[1]] = 1.0
test_data = []
with open('../data/ml-1m.test.rating', 'r') as fd:
line = fd.readline()
while line != None and line != '':
arr = line.split('\t')
u = eval(arr[0])[0]
test_data.append([u, eval(arr[0])[1]])
for i in arr[1:]:
test_data.append([u, int(i)])
line = fd.readline()
return train_data, test_data, user_num, item_num, train_mat
class BPRData(data.Dataset):
def __init__(self,train_dict=None,num_item=0, num_ng=1, is_training=None, data_set_count=0,all_rating=None):
super(BPRData, self).__init__()
self.num_item = num_item
self.train_dict = train_dict
self.num_ng = num_ng
self.is_training = is_training
self.data_set_count = data_set_count
self.all_rating=all_rating
self.set_all_item=set(range(num_item))
def ng_sample(self):
# assert self.is_training, 'no need to sampling when testing'
# print('ng_sample----is----call-----')
self.features_fill = []
for user_id in self.train_dict:
positive_list=self.train_dict[user_id]#self.train_dict[user_id]
all_positive_list=self.all_rating[user_id]
#item_i: positive item ,,item_j:negative item
# temp_neg=list(self.set_all_item-all_positive_list)
# random.shuffle(temp_neg)
# count=0
# for item_i in positive_list:
# for t in range(self.num_ng):
# self.features_fill.append([user_id,item_i,temp_neg[count]])
# count+=1
for item_i in positive_list:
for t in range(self.num_ng):
item_j=np.random.randint(self.num_item)
while item_j in all_positive_list:
item_j=np.random.randint(self.num_item)
self.features_fill.append([user_id,item_i,item_j])
def __len__(self):
return self.num_ng*self.data_set_count#return self.num_ng*len(self.train_dict)
def __getitem__(self, idx):
features = self.features_fill
user = features[idx][0]
item_i = features[idx][1]
item_j = features[idx][2]
return user, item_i, item_j
class resData(data.Dataset):
def __init__(self,train_dict=None,batch_size=0,num_item=0,all_pos=None):
super(resData, self).__init__()
self.train_dict = train_dict
self.batch_size = batch_size
self.all_pos_train=all_pos
self.features_fill = []
for user_id in self.train_dict:
self.features_fill.append(user_id)
self.set_all=set(range(num_item))
def __len__(self):
return math.ceil(len(self.train_dict)*1.0/self.batch_size)#这里的self.data_set_count==batch_size
def __getitem__(self, idx):
user_test=[]
item_test=[]
split_test=[]
for i in range(self.batch_size):#这里的self.data_set_count==batch_size
index_my=self.batch_size*idx+i
if index_my == len(self.train_dict):
break
user = self.features_fill[index_my]
item_i_list = list(self.train_dict[user])
item_j_list = list(self.set_all-self.all_pos_train[user])
# pdb.set_trace()
u_i=[user]*(len(item_i_list)+len(item_j_list))
user_test.extend(u_i)
item_test.extend(item_i_list)
item_test.extend(item_j_list)
split_test.append([(len(item_i_list)+len(item_j_list)),len(item_j_list)])
#实际上只用到一半去计算,不需要j的。
return torch.from_numpy(np.array(user_test)), torch.from_numpy(np.array(item_test)), split_test
``` |
{
"source": "jimzucker/aws-forecast",
"score": 2
} |
#### File: jimzucker/aws-forecast/get_forecast.py
```python
import sys
import logging
import boto3
import os
import datetime
from dateutil.relativedelta import relativedelta
from botocore.exceptions import ClientError
import json
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
from base64 import b64decode
# Initialize you log configuration using the base class
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger()
AWSGENIE_SECRET_MANAGER="awsgenie_secret_manager"
SLACK_SECRET_KEY_NAME="slack_url"
SNS_SECRET_KEY_NAME="sns_arn"
AWS_LAMBDA_FUNCTION_NAME = ""
try:
AWS_LAMBDA_FUNCTION_NAME = os.environ['AWS_LAMBDA_FUNCTION_NAME']
except Exception as e:
logger.info("Not running as lambda")
def get_secret(sm_client,secret_key_name):
# if AWS_LAMBDA_FUNCTION_NAME == "":
try:
text_secret_data = ""
get_secret_value_response = sm_client.get_secret_value( SecretId=AWSGENIE_SECRET_MANAGER )
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
logger.error("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
logger.error("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
logger.error("The request had invalid params:", e)
# Secrets Manager decrypts the secret value using the associated KMS CMK
# Depending on whether the secret was a string or binary, only one of these fields will be populated
if 'SecretString' in get_secret_value_response:
text_secret_data = json.loads(get_secret_value_response['SecretString']).get(secret_key_name)
else:
#binary_secret_data = get_secret_value_response['SecretBinary']
logger.error("Binary Secrets not supported")
# Your code goes here.
return text_secret_data
# else:
# return ""
def send_slack(slack_url, message):
#make it a NOP if URL is NULL
if slack_url == "":
return
slack_message = {
'text': message
}
req = Request(slack_url, json.dumps(slack_message).encode('utf-8'))
try:
response = urlopen(req)
response.read()
logger.debug("Message posted to slack")
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
logger.error("SLACK_URL= %s", slack_url)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
logger.error("slack_url= %s", slack_url)
def send_sns(boto3_session, sns_arn, message):
#make it a NOP if URL is NULL
if sns_arn == "":
return
try:
sns_client = boto3_session.client('sns')
response = sns_client.publish(
TopicArn=sns_arn,
Message=message
)
except Exception as e:
logger.error("SNS publish request failed ARN: %s", sns_arn)
logger.error(e)
def display_output(boto3_session, message):
secrets_manager_client = boto3_session.client('secretsmanager')
try:
slack_url='https://' + get_secret(secrets_manager_client, SLACK_SECRET_KEY_NAME)
send_slack(slack_url, message)
except Exception as e:
logger.info("Disabling Slack, URL not found")
try:
sns_arn=get_secret(secrets_manager_client, SNS_SECRET_KEY_NAME)
send_sns(boto3_session, sns_arn, message)
except Exception as e:
logger.info("Disabling SNS, Arn not found")
print(message)
def calc_forecast(boto3_session):
#create the clients we need for ce & org
ce = boto3_session.client('ce')
org = boto3_session.client('organizations')
sts = boto3_session.client('sts')
#initialize the standard filter
not_filter= {
"Not": {
"Dimensions": {
"Key": "RECORD_TYPE",
"Values": [ "Credit", "Refund" ]
}
}
}
utcnow = datetime.datetime.utcnow()
today = utcnow.strftime('%Y-%m-%d')
first_day_of_month = utcnow.strftime('%Y-%m') + "-01"
first_day_next_month = (utcnow + relativedelta(months=1)).strftime("%Y-%m-01")
first_day_prior_month = (utcnow + relativedelta(months=-1)).strftime("%Y-%m-01")
logger.debug("today=",today)
logger.debug("first_day_of_month=",first_day_of_month)
logger.debug("first_day_next_month=",first_day_next_month)
logger.debug("first_day_prior_month=",first_day_prior_month)
#Get total cost_and_usage
results = []
data = ce.get_cost_and_usage(
TimePeriod={'Start': first_day_of_month, 'End': first_day_next_month}
, Granularity='MONTHLY', Metrics=['UnblendedCost'], Filter=not_filter
)
results = data['ResultsByTime']
amount_usage = float(results[0]['Total']['UnblendedCost']['Amount'])
try:
data = ce.get_cost_and_usage(
TimePeriod={'Start': first_day_prior_month, 'End': first_day_of_month}
, Granularity='MONTHLY', Metrics=['UnblendedCost'], Filter=not_filter
)
results = data['ResultsByTime']
amount_usage_prior_month = float(results[0]['Total']['UnblendedCost']['Amount'])
except Exception as e:
amount_usage_prior_month = 0
#Total Forecast
try:
data = ce.get_cost_forecast(
TimePeriod={'Start': today, 'End': first_day_next_month}
, Granularity='MONTHLY', Metric='UNBLENDED_COST', Filter=not_filter
)
amount_forecast = float(data['Total']['Amount'])
except Exception as e:
amount_forecast = amount_usage
forecast_variance = 100
if amount_usage_prior_month > 0 :
forecast_variance = (amount_forecast-amount_usage_prior_month) / amount_usage_prior_month *100
result = {
"account_name": 'Total',
"amount_usage": amount_usage,
"amount_forecast": amount_forecast,
"forecast_variance": forecast_variance
}
output=[]
output.append(result)
#Get usage caose for all accounts
results = []
next_page_token = None
while True:
if next_page_token:
kwargs = {'NextPageToken': next_page_token}
else:
kwargs = {}
data = ce.get_cost_and_usage(
TimePeriod={'Start': first_day_of_month, 'End': first_day_next_month}
, Granularity='MONTHLY', Metrics=['UnblendedCost'], Filter=not_filter
, GroupBy=[{'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'}]
, **kwargs)
results += data['ResultsByTime']
next_page_token = data.get('NextPageToken')
if not next_page_token:
break
# Print each account
for result_by_time in results:
for group in result_by_time['Groups']:
amount_usage = float(group['Metrics']['UnblendedCost']['Amount'])
linked_account = group['Keys'][0]
#create filter
linked_account_filter = {
"And": [
{
"Dimensions": {
"Key": "LINKED_ACCOUNT",
"Values": [
linked_account
]
}
},
not_filter
]
}
#get prior-month usage, it may not exist
try:
data = ce.get_cost_and_usage(
TimePeriod={'Start': first_day_prior_month, 'End': first_day_of_month}
, Granularity='MONTHLY', Metrics=['UnblendedCost'], Filter=linked_account_filter
)
results = data['ResultsByTime']
amount_usage_prior_month = float(results[0]['Total']['UnblendedCost']['Amount'])
except Exception as e:
amount_usage_prior_month = 0
#Forecast, there maybe insuffcient data on a new account
try:
data = ce.get_cost_forecast(
TimePeriod={'Start': today, 'End': first_day_next_month}
, Granularity='MONTHLY', Metric='UNBLENDED_COST', Filter=linked_account_filter
)
amount_forecast = float(data['Total']['Amount'])
except Exception as e:
amount_forecast = amount_usage
variance = 100
if amount_usage_prior_month > 0 :
variance = (amount_forecast-amount_usage_prior_month) / amount_usage_prior_month *100
try:
account_name=org.describe_account(AccountId=linked_account)['Account']['Name']
except AWSOrganizationsNotInUseException as e:
account_name=linked_account
result = {
"account_name": account_name,
"amount_usage": amount_usage,
"amount_forecast": amount_forecast,
"forecast_variance": variance
}
output.append(result)
return output
def format_rows(output,account_width):
#print the heading
mtd_width=8
forecast_width=8
change_width=6
output_rows=[]
row = {
"Account": 'Account'.ljust(account_width),
"MTD": 'MTD'.rjust(mtd_width),
"Forecast": 'Forecast'.rjust(forecast_width),
"Change": 'Change'.rjust(change_width)
}
output_rows.append(row)
#print in decending order by forecast
lines = sorted(output, key=lambda k: k.get('amount_forecast'), reverse=True)
for line in lines :
if len(lines) == 2 and line.get('account_name') == 'Total':
continue
change = "{0:,.1f}%".format(line.get('forecast_variance'))
row = {
"Account": line.get('account_name')[:account_width].ljust(account_width),
"MTD": "${0:,.0f}".format(line.get('amount_usage')).rjust(mtd_width),
"Forecast": "${0:,.0f}".format(line.get('amount_forecast')).rjust(forecast_width),
"Change": change.rjust(change_width)
}
output_rows.append(row)
return output_rows
def publish_forecast(boto3_session) :
#read params
columns_displayed = ["Account", "MTD", "Forecast", "Change"]
if 'GET_FORECAST_COLUMNS_DISPLAYED' in os.environ:
columns_displayed=os.environ['GET_FORECAST_COLUMNS_DISPLAYED']
columns_displayed = columns_displayed.split(',')
account_width=17
if 'GET_FORECAST_ACCOUNT_COLUMN_WIDTH' in os.environ:
account_width=os.environ['GET_FORECAST_ACCOUNT_COLUMN_WIDTH']
output = calc_forecast(boto3_session)
formated_rows = format_rows(output, account_width)
message=""
for line in formated_rows :
formated_line=""
for column in columns_displayed :
if formated_line != "" :
formated_line += " "
formated_line += line.get(column)
message += formated_line.rstrip() + "\n"
display_output(boto3_session, message)
def lambda_handler(event, context):
try:
publish_forecast(boto3)
except Exception as e:
print(e)
raise Exception("Cannot connect to Cost Explorer with boto3")
def main():
try:
boto3_session = boto3.session.Session()
if 'GET_FORECAST_AWS_PROFILE' in os.environ:
profile_name=os.environ['GET_FORECAST_AWS_PROFILE']
logger.info("Setting AWS Proflie ="+profile_name)
boto3_session = boto3.session.Session(profile_name=profile_name)
try:
publish_forecast(boto3_session)
except Exception as e:
raise e
except Exception as e:
logger.error(e);
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
``` |
{
"source": "jimzucker/gitreflog",
"score": 2
} |
#### File: jimzucker/gitreflog/githubreflog.py
```python
import sys, json, requests, subprocess, errno, re, getopt
from colorama import Fore, Back, Style
from subprocess import Popen, PIPE
#from giturlparse import parse
#----------------------------------------------------------------------------------------------------------------------
SHACOLW=7
EVENTCOLW=15
BRANCHCOLW=20
MSGCOLW=60
DFLTCOLW=10
HANDLEDEVENTS=["CreateEvent","DeleteEvent","PushEvent","CommitCommentEvent"]
#SKIPEVENTS=["ForkEvent","IssuesEvent","WatchEvent","IssueCommentEvent","PullRequestEvent","PullRequestReviewCommentEvent"]
#Function to print the output
def filter(filterPattern, p, sha, type, repo, branch, login, message, filler):
if filterPattern == "":
printIt( p, sha, type, repo, branch, login, message, filler)
else:
#apply filter
pattern = re.compile(".*" + filterPattern + ".*")
tempString = sha + " " + type + " " + repo + " " + branch + " " + login + " " + message
match = pattern.search(tempString)
if match is not None:
printIt( p, sha, type, repo, branch, login, message, filler)
return
def printIt( p, sha, type, repo, branch, login, message, filler):
#clean up comments that have \n in them
message = message.replace("\n"," ")
#print Fore.RED+sha[:SHACOLW].ljust(SHACOLW," ")+Fore.RESET,type[:EVENTCOLW].ljust(EVENTCOLW,filler),repo[:DFLTCOLW].ljust(DFLTCOLW,filler),branch[:BRANCHCOLW].ljust(BRANCHCOLW,filler),Fore.BLUE+login.ljust(DFLTCOLW," ")+Fore.RESET,message.ljust(MSGCOLW," ")
line = Fore.RED+sha[:SHACOLW].ljust(SHACOLW," ")+Fore.RESET
line += " " + type[:EVENTCOLW].ljust(EVENTCOLW,filler)
line += " " + repo[:DFLTCOLW].ljust(DFLTCOLW,filler)
line += " " + branch[:BRANCHCOLW].ljust(BRANCHCOLW,filler)
line += " " + Fore.BLUE+login.ljust(DFLTCOLW," ")+Fore.RESET
line += " " + message.ljust(MSGCOLW," ")
line += "\n"
try:
p.stdin.write(line)
except:
# Stop loop on "Invalid pipe" or "Invalid argument".
# No sense in continuing with broken pipe.
exit(1)
return
#----------------------------------------------------------------------------------------------------------------------
#process arguments
gitRemote=subprocess.check_output(["git", "config", "--local", "remote.origin.url"])
urlAttributes = re.match('((git|ssh|http(s)?)|(git@[\w\.]+))(:(//)?)([\w\.@\:/\-~]+)(\.git)(/)?', gitRemote)
#this code will handle parsing all git url formats, but we dont need that right now
#urlAttributes = parse(gitRemote)
#print(json.dumps(urlAttributes, indent=4))
#get the default ID/Pwd
gitUser=""
gitRepo=""
filterPattern=""
try:
if urlAttributes.groups()[0] == "[email protected]" :
gitUser=urlAttributes.groups()[6].split('/')[0]
gitRepo=urlAttributes.groups()[6].split('/')[1]
else:
if urlAttributes.groups()[6].split('/')[0] == "github.com":
gitUser=urlAttributes.groups()[6].split('/')[1]
gitRepo=urlAttributes.groups()[6].split('/')[2]
else:
print urlAttributes.groups()
print 'ERROR: Not a GITHUB repo, other remote repos not currently supported'
sys.exit(2)
except:
pass
#get params
try:
opts, args = getopt.getopt(sys.argv[1:],'u:r:f:h')
except:
print 'githubreflog.py [-u <github user>] [-r <github repo>] [-f <filter pattern>]'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'githubreflog.py [-u <github user>] [-r <github repo>] [-f <filter pattern>]'
sys.exit()
elif opt == '-u':
gitUser = arg
elif opt == '-r':
gitRepo = arg
elif opt == '-f':
filterPattern = arg
#
#build rest call to github
#
url="https://api.github.com/repos/" + gitUser + "/" + gitRepo + "/events?per_page=100"
response = requests.get(url, stream=True)
#open a pipe
p = Popen('less', stdin=PIPE)
#get the total # of pages from the header
try:
numPages = int( response.headers["link"].split(",")[1].split("?")[1].split("&")[1].split("=")[1].split(">")[0])
except:
numPages = 1
for i in range(1,numPages+1):
printIt(p, "sha", "type", "repo", "branch", "login", "message", " ")
if i > 1:
response = requests.get(url+'&page='+str(i), stream=True)
rows=json.loads(response.text)
for r in rows:
type = r.get("type","")
if type not in HANDLEDEVENTS:
continue
repo = r.get("repo","").get("name","").split("/")[1][:DFLTCOLW]
login = r["actor"]["login"][:DFLTCOLW]
#set message to Date/Time for event lines
message = r.get("created_at")
payload = r.get("payload")
if payload :
#CommitCommentEvent has a different payload
if type == "CommitCommentEvent":
comment = payload.get("comment")
sha = comment.get("commit_id")
branch = ""
filter(filterPattern, p, sha, type, repo, branch, login, message, " ")
else:
#Enhance message for a Create Event to show the source branch
if type == "CreateEvent":
message = message + " " + payload.get("ref_type","") + ":" + payload.get("master_branch","")
sha = payload.get("head","")[:SHACOLW]
branch = payload.get("ref")
if branch:
refsplit = branch.split("/")
if len(refsplit) == 3:
branch = refsplit[2]
else:
branch = ""
filter(filterPattern, p, sha, type, repo, branch, login, message," ")
commits = payload.get("commits")
if commits:
for c in commits:
if c:
shaCommit = c["sha"][:SHACOLW]
message = c["message"][:MSGCOLW]
filter(filterPattern, p, shaCommit,"", "", "", login,message,".")
else:
commits = ""
else:
sha = ""
branch = ""
filter(filterPattern, p, sha, type, repo, branch, login, message," ")
#clean up the pipe
p.stdin.close()
p.wait()
exit(0)
``` |
{
"source": "JimzyLui/Network-Survival-Kit",
"score": 3
} |
#### File: JimzyLui/Network-Survival-Kit/clientBrowser.py
```python
import argparse
import urllib.request
import dataCollector
def run(url, verbose_tf):
dataCollector.start()
url = check_url(url)
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as obj_response:
#print(html)
url_real = obj_response.geturl()
info = obj_response.info()
statuscode = obj_response.getcode()
header = obj_response.getheaders()
# build data summary
data = f"URL: {url}\n"
data += f'Real URL: {url_real}'
data += f'Request Status: {statuscode}'
data += f'Request Header Info: {header}'
data += f'Server Info: {info}'
html = obj_response.read(9000).decode('utf-8')
data += f"HTML:\n{html}"
dataCollector.collect(data)
print(data)
def check_url(url):
if url[4] =='http':
return url
elif url[3]=='ftp':
return url
elif url[3]=='udp':
return url
else:
url = 'http://' + url
return url
def print_header(hdr):
# print(hdr)
print('Header: ')
for h in hdr:
print(" {h}".format(h))
def print_rpt_line(port, status):
print("Port {}: {}".format(port, status))
if __name__ == "__main__":
# create parser
parser = argparse.ArgumentParser(
prog="clientBrowser",
description="Pulls html from any domain",
add_help=True
)
parser.add_argument("url", help="Domain or IP address")
parser.add_argument("--verbose", action='store_true', help="Show processing details")
args = parser.parse_args()
verbose = args.verbose
if verbose:
print('url: {}'.format(args.url))
run(args.url, verbose)
```
#### File: JimzyLui/Network-Survival-Kit/portScanner.py
```python
import argparse
import socket
# import sys
import ipMapping
import dataCollector
def run(domain_or_ip, portfrom, portto, verbose_tf):
dataCollector.start()
ip = ipMapping.run(domain_or_ip)
if portto=='':
portto = portfrom
portfrom = 1
summary = ''
for port in range(int(portfrom), int(portto)):
# check the port
rpt_line = scan_port(ip, port, verbose_tf)
summary += rpt_line
dataCollector.collect(summary)
def scan_port(ip, port, verbose_tf):
if verbose_tf:
print('...scanning port {}'.format(port))
# info = socket.getaddrinfo(ip, port)
# set the default timeout to 1 sec
socket.setdefaulttimeout(1)
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get socket to connect
response_code = s.connect_ex((ip,port))
rpt_line = ''
if response_code==0:
rpt_line = print_rpt_line(port, 'Open')
s.close()
return rpt_line
def print_rpt_line(port, status):
rpt_line = f"Port {port}: {status}"
print(rpt_line)
return rpt_line
if __name__ == "__main__":
# create parser
parser = argparse.ArgumentParser(
prog="portScanner",
description="Scans ports on a host or ip address",
add_help=True
)
parser.add_argument("domain_or_ip", help="Domain or IP address")
parser.add_argument("--portfrom", default='5', help="Starting port or port range starting at 1")
# parser.add_argument("-from", default='5', help="Starting port or port range starting at 1")
parser.add_argument("--portto", default='', help="upper port in port range")
# parser.add_argument("-to", default='', help="upper port in port range")
parser.add_argument("--verbose", action='store_true', help="Show processing details")
args = parser.parse_args()
verbose = args.verbose
if verbose:
print('domain: {}'.format(args.domain_or_ip))
print('portfrom: {}'.format(args.portfrom))
print('portto: {}'.format(args.portto))
# print(args)
run(args.domain_or_ip, args.portfrom, args.portto, verbose)
``` |
{
"source": "JimzyLui/pyattck",
"score": 3
} |
#### File: JimzyLui/pyattck/platformstats.py
```python
import argparse
import sys
import pyattck
def run():
global platformTally
platformTally = {}
att = pyattck.Attck()
for malware in att.malwares:
arrPlatforms = malware.platforms
if arrPlatforms is None:
arrPlatforms = ['None']
tallyPlatformList(arrPlatforms)
# print(platformTally)
printStats(platformTally)
# print out the stats
def printStats(platformTally):
print(f' NUMBER OF')
print(f'PLATFORM MALWARE THREATS')
print(f'________ _______________')
for keys, values in platformTally.items():
print(f"{keys:<12} {values:>6}")
def tallyPlatformList(arrPlatforms):
global platformTally
# arrPlatforms = ['a', 'b', 'c', 'b', 'd', 'c']
# keep track of the number of potential threats to each platform
# platformTally = {}
for p in arrPlatforms:
if p not in platformTally:
platformTally[p] = 1
else:
platformTally[p] += 1
```
#### File: JimzyLui/pyattck/research.py
```python
import argparse
import pyattck
def research(platform, verbose):
platform = platform.lower()
print('entered: ', platform)
att = pyattck.Attck()
# print(att.malwares[0].platforms[0].title()) // this works
for x in att.malwares:
if x.platforms is not None:
arr_platforms = list(map(str.lower, x.platforms))
if arr_platforms.count(platform) > 0:
print('Malware Name: ', x.name)
print('Platforms:', x.platforms)
if verbose:
print('Description: ', x.description, '\n')
if __name__ == "__main__":
# create parser
parser = argparse.ArgumentParser(prog="research", description="")
parser.add_argument("platform",
help="the platform that the malware attacks")
parser.add_argument("--verbose", action="store_true", help="Show details")
args = parser.parse_args()
verbose = args.verbose
if verbose:
print('Running: {argv[0]}')
research(args.platform, verbose)
``` |
{
"source": "JimzyLui/twitterScraper",
"score": 3
} |
#### File: JimzyLui/twitterScraper/downloadImage.py
```python
import urllib.request
import os
import sys
def downloadImages(strQueryString, arrUrls):
for url in arrUrls:
downloadImage(strQueryString, url)
def downloadImage(strQueryString, url):
try:
strPath = setup(strQueryString)
print(f"Downloading {url} to {strQueryString}")
image_name = str(url).split('/')[-1]
download_jpg(url, strPath, image_name)
except Exception as error:
print(error)
def download_jpg(url, filePath, fileName):
fullPath = f"{filePath}/{fileName}"
urllib.request.urlretrieve(url, fullPath)
def setup(strQueryString):
dirName = 'images_' + strQueryString.replace(' ', '_')
strPath = f"{dirName}"
try:
os.mkdir(strPath)
except:
pass
return strPath
```
#### File: JimzyLui/twitterScraper/searchForTwitterImages.py
```python
import argparse
import requests
from bs4 import BeautifulSoup
import sys
import moment
from datetime import datetime
import re
import downloadImage
def searchForTwitterImages_interactive(processDownload=False, strMsgIgnore=''):
print(f"** Interactive mode enabled ** .{strMsgIgnore}\r\n")
strQueryString = input('Please enter Twitter search terms: ')
arrSearchTerms = strQueryString.split()
searchForTwitterImages(arrSearchTerms, processDownload)
def searchForTwitterImages(arrSearchTerms, processDownload=False):
url = 'https://twitter.com/search'
userAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"
strQueryString = ' '.join(arrSearchTerms)
if len(arrSearchTerms) == 0:
print(f"No search terms entered. Exiting program.\r\n")
return
print(f"Searching on: {strQueryString}...\r\n")
response = requests.get(url, params={'q': strQueryString}, headers={
'user-agent': userAgent})
response.encoding = 'utf-8'
soup = BeautifulSoup(response.text, 'html.parser')
arrImages = []
for img in soup.select('div.AdaptiveMedia-photoContainer img'):
if img:
arrImages.append(img['src'])
iResults = len(arrImages)
if iResults > 0:
fileName = print_image_urls(strQueryString, arrImages)
print(f"{len(arrImages)} results found and written to {fileName}\r\n")
if processDownload:
downloadImage.downloadImages(strQueryString, arrImages)
else:
print(f"No results found.")
def print_image_urls(searchTerms, listUrls):
try:
fileName = generateFileName(searchTerms)
strTimestamp = moment.now().format('YYYYMMDD HH:mm')
with open(fileName, 'a') as f:
f.write(
f"[{strTimestamp}] {len(listUrls)} results for Twitter search on: {searchTerms}\r\n")
for url in listUrls:
f.write(f" {url}\r\n")
f.write(f"\r\n")
except IOError as e:
print(f"IO Error: \r\n{e}")
except:
print(f"Unexpected error: {sys.exc_info()[0]}")
raise
finally:
return fileName
def generateFileName(strSearchTerms):
strYYYYMMDD = moment.now().format('YYYYMMDD')
strSearch = strSearchTerms.replace(' ', '_')
fileName = f"{strYYYYMMDD}.search.{strSearch}.txt"
return fileName
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Get profile image URLs from Twitter.')
parser.add_argument('-i', '--interactive',
dest='i',
action='store_true',
help='makes this program interactive where it will prompt the user for input')
parser.add_argument('-s', '--search_terms',
dest='search',
nargs='+',
help='search terms upon which to query twitter')
parser.add_argument('searchDefault', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.i:
strIgnore = ''
if args.search and len(args.search) > 0:
strIgnore = ' Ignoring command line search criteria.'
searchForTwitterImages_interactive(strIgnore)
else:
print(f"Search terms: {args}")
if args.search:
searchForTwitterImages(args.search)
elif args.searchDefault:
searchForTwitterImages(args.searchDefault)
else:
print(f"No search terms given. Switching to interactive mode.")
searchForTwitterImages_interactive()
``` |
{
"source": "Jin02/SOCEngine",
"score": 2
} |
#### File: SOCEngine/Script/FBX-Conv-Launcher.py
```python
import sys
import os, os.path
import subprocess
if sys.version_info < (3,):
range = xrange
def CheckParameter():
fbxConvPath = None
outputType = None
runDir = None
count = len(sys.argv)-1
if count >= 6:
for i in range(1, count):
if sys.argv[i] == "-FBXConvPath":
fbxConvPath = sys.argv[i+1]
elif sys.argv[i] == "-OutputType":
outputType = sys.argv[i+1]
elif sys.argv[i] == "-RunStartDir":
runDir = sys.argv[i+1]
else:
i-=1
i+=1
OutputTypes = ['json', 'binary']
if not outputType in OutputTypes:
outputType = None
result = (fbxConvPath is not None) and (outputType is not None) and (runDir is not None)
return result, fbxConvPath, outputType, runDir
def Dump():
print ("Paramater Error!!\n")
print ("-FBXConvPath \"Your Fbx-conv exe path\" -OutputType json or binary -RunStartDir \"Your Path\"\n")
print ('Example 1 :')
print ("-FBXConvPath ../Tool/fbx-conv.exe -OuputType json -RunStartDir ../../SOCTestProj/Resources\n")
print ("OutputTypes only support the json, binary\n")
return
CONSOLE_LINE = "***********************************************"
print (CONSOLE_LINE + '\n')
print ("SOC Framework FBX-Conv Launcher\n")
result, fbxConvPath, outputType, runStartDir = CheckParameter()
if result == False:
Dump()
print (CONSOLE_LINE)
exit()
if outputType == 'json':
outputType = 'g3dj'
elif outputType == 'binary':
outputType = 'g3db'
else:
assert("invalid output type.")
supported3DFileFormats = ['.obj', '.fbx', '.dae']
targetDir = os.path.normpath(runStartDir)
for (path, dirs, files) in os.walk(targetDir):
for fileNameWithExtension in files:
fileExtension = fileNameWithExtension[fileNameWithExtension.rfind('.'):]
if not (fileExtension.lower() in supported3DFileFormats):
continue
fileName = fileNameWithExtension[:fileNameWithExtension.find('.')]
fileFullPath = path + "\\" + fileNameWithExtension
outputFileNameWithExtension = fileName + "_" + fileExtension[1:] + '.' + outputType
outputFilePath = fileFullPath[:fileFullPath.rfind('\\')] + '\\' + outputFileNameWithExtension
if os.path.isfile(outputFilePath):
continue
command = fbxConvPath + " -b 1024 " " -f " + fileFullPath + ' ' + outputFilePath
print (command)
res = subprocess.run(command)
print ("Generated File : " + outputFileNameWithExtension)
print ("Done!\n")
print (CONSOLE_LINE)
```
#### File: SOCEngine/Script/HeaderOrganizer.py
```python
import sys
import os, os.path
import shutil
if sys.version_info < (3,):
range = xrange
def CheckParameter():
outputPath = None
searchStartDir = None
isIncludeFolder = None
excludePaths = None
count = len(sys.argv)-1
if count >= 8:
for i in range(1, count):
if sys.argv[i] == "-OutputPath":
outputPath = os.path.abspath(sys.argv[i+1])
elif sys.argv[i] == "-SearchStartDir":
searchStartDir = os.path.abspath(sys.argv[i+1])
elif sys.argv[i] == "-IsIncludeFolder":
isIncludeFolder = sys.argv[i+1]
elif sys.argv[i] == "-ExcludePaths":
excludePaths = sys.argv[i+1]
else:
i-=1
i+=1
if isIncludeFolder == "True":
isIncludeFolder = True
elif isIncludeFolder == "False":
isIncludeFolder = False
if excludePaths is not None:
excludePaths = excludePaths.split(',')
if len(excludePaths) == 1 and excludePaths[0].lower() is 'null':
excludePaths = None
else:
for i in list(range(0, len(excludePaths))):
excludePaths[i] = os.path.abspath(excludePaths[i])
result = (outputPath is not None) and (searchStartDir is not None) and (isIncludeFolder is not None)
return result, outputPath, searchStartDir, isIncludeFolder, excludePaths
def Dump():
print ("Paramater Error!!\n")
print ("-OutputPath \'outputpath\' -SearchStartDir \'searchstartDir\' -IsIncludeFolder \'True or False\' -ExcludePaths excludepath\n")
print ('Example 1 :')
print ("-OutputPath ../../Output -SearchStartDir ./Engine -IsIncludeFolder False -ExcludePaths ./Engine/ShaderCodes,./Engine/Scripts \n")
return
CONSOLE_LINE = "***********************************************"
print (CONSOLE_LINE + '\n')
print ("SOC Framework HeaderOrganizer\n")
result, outputPath, searchStartDir, isIncludeFolder, excludePaths = CheckParameter()
if result == False:
Dump()
print (CONSOLE_LINE)
exit()
headerFormat = ['.h', '.hpp', '.inl']
def MakeDirectoryPiramid(path):
folders = path.split('\\')
folders.reverse()
for i in list(range(1, len(folders))):
invIdx = len(folders) - i
folders[invIdx - 1] = folders[invIdx] + '\\' + folders[invIdx - 1]
folders.reverse()
return folders
# Clear Output Header Folder
if os.path.exists(outputPath):
shutil.rmtree(outputPath, ignore_errors=True)
os.makedirs(outputPath)
targetDir = os.path.normpath(searchStartDir)
for (path, dirs, files) in os.walk(targetDir):
for fileNameWithExtension in files:
if path in excludePaths:
continue
fileExtension = fileNameWithExtension[fileNameWithExtension.rfind('.'):]
if not (fileExtension.lower() in headerFormat):
continue
fileFullPath = path + "\\" + fileNameWithExtension
saveFilePath = ""
if isIncludeFolder:
relativePath = path[len(searchStartDir)+1:]
saveFolderPath = outputPath + '\\' + relativePath
saveFilePath = saveFolderPath + '\\' + fileNameWithExtension
# print saveFolderPath
folders = MakeDirectoryPiramid(saveFolderPath)
for folderPath in folders:
if not os.path.exists(folderPath):
os.makedirs(folderPath)
shutil.copy(fileFullPath, saveFilePath)
else:
saveFilePath = outputPath + '\\' + fileNameWithExtension
shutil.copy(fileFullPath, saveFilePath)
print (fileFullPath + " -> " + saveFilePath)
print ("\nDone!\n")
print (CONSOLE_LINE)
```
#### File: SOCEngine/Script/ShaderFactoryCodeGenerator.py
```python
import sys
import os, os.path
import json
import platform
if sys.version_info < (3,):
range = xrange
def tap(num):
return '\t'*num
def nextLine(num):
return '\n'*num
class ShaderFactory:
originFileDir = None
saveDir = None
addCodeBeginCommand = "/** Script Begin **/"
addCodeEndCommand = "/** Script End **/"
addCodeFullPathBeginCommand = "/** FullPath Script Begin **/"
addCodeFullPathEndCommand = "/** FullPath Script End **/"
def __init__(self, originFileDir, saveDir):
self.originFileDir = originFileDir
self.saveDir = saveDir
return
def Run(self, code, className, fullPaths):
factoryFile = open(self.originFileDir, 'r', encoding='utf-8')
source = factoryFile.read()
factoryFile.close()
source = source.replace("[ClassName]", className)
begin = source.find(self.addCodeBeginCommand) + len(self.addCodeBeginCommand)
end = source.find(self.addCodeEndCommand)
source = source[:begin] + code + tap(4) + source[end:]
begin = source.find(self.addCodeFullPathBeginCommand) + len(self.addCodeFullPathBeginCommand)
end = source.find(self.addCodeFullPathEndCommand)
pathCode = "\n"
isFirst = True
for component in fullPaths:
pathCode += tap(4)
if isFirst:
isFirst = False
else:
pathCode += "else "
pathCode += "if(fileName == \"" + component["fileName"] + "\")" + nextLine(1)
pathCode += tap(5) + "out = \"" + component["fullPath"] + "\";" + nextLine(1)
source = source[:begin] + pathCode + tap(4) + source[end:]
factoryFile = open(self.saveDir, 'w')
factoryFile.write(source)
factoryFile.close()
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def readJson(filename):
f = open(filename, 'r', encoding='utf-8')
js = json.loads(f.read())
f.close()
return js
def QuotationMarks(code):
code = "\"" + code +"\""
return code
def CheckParameter():
originFactory = None
saveFactory = None
runDir = None
className = None
count = len(sys.argv)-1
if count >= 6:
for i in range(1, count):
if sys.argv[i] == "-OriginalShaderFactoryFile":
originFactory = sys.argv[i+1]
elif sys.argv[i] == "-SaveShaderFactoryFile":
saveFactory = sys.argv[i+1]
elif sys.argv[i] == "-ScriptRunStartDir":
runDir = sys.argv[i+1]
elif sys.argv[i] == "-ClassName":
className = sys.argv[i+1]
else:
i-=1
i+=1
if className == None:
className = "Factory"
result = (originFactory != None and saveFactory != None and runDir != None)
return result, originFactory, saveFactory, runDir, className
def Dump():
print ("\nParamater Error!!\n")
print ('Example 1 :')
print ("-OriginalShaderFactoryFile ./origin.hpp -SaveShaderFactoryFile ./save.hpp -ScriptRunStartDir ./Shader -ClassName Factory\n")
CONSOLE_LINE = "***********************************************"
print (CONSOLE_LINE + '\n')
print ("SOC Framework ShaderFactoryCode Generator")
result, originalShaderFactoryFileDir, saveShaderFactoryFileDir, scriptRunStartDir, className = CheckParameter()
if result == False:
Dump()
print (CONSOLE_LINE)
exit()
code = ""
targetDir = os.path.normpath(scriptRunStartDir)
fullPaths = []
for (path, dirs, files) in os.walk(targetDir):
for fileNameWithExtension in files:
dirToken = '/'
if platform.system() == 'Windows':
dirToken = '\\'
fileFullPath = path + dirToken + fileNameWithExtension
extensionPos = fileNameWithExtension.rfind('.')
fileExtension = fileNameWithExtension[extensionPos:]
fileName = fileNameWithExtension[:extensionPos]
if fileExtension != ".metadata":
continue
path = path.replace("\\", "/")
component = {"fileName" : fileName, "fullPath" : path + "/" + fileName + ".hlsl"}
fullPaths.append(component)
print ("Conveeeerting : " + fileName)
jsonData = readJson(fileFullPath)
if len(jsonData) == 0:
code += nextLine(2) + tap(4) + "if(shaderName == \"" + fileName + "\")" + nextLine(1)
code += tap(4) + "{" + nextLine(1)
code += tap(5) + "folderPath = \""+ path +"/\";" + nextLine(1)
code += tap(5) + "isOnlyHasPath = true;" + nextLine(1)
code += tap(4) + "}" + nextLine(1)
continue
mainFuncs = dict()
for structName in jsonData["SemanticStructure"]:
mainFuncs[structName] = list()
for mainFunc in jsonData["MainFunctions"]:
structName = jsonData["MainFunctions"][mainFunc]
if (structName in mainFuncs) == True:
mainFuncs[structName].append(mainFunc)
#write Code
#write Code
#write Code
code += nextLine(1) + tap(4)+ 'if(shaderName == \"' + fileName + '\")' + nextLine(1)+tap(4)+"{" +nextLine(1)
code += tap(5) + "folderPath = \""+ path +"/\";" + nextLine(1)
for struct in jsonData["SemanticStructure"]:
if struct in mainFuncs == False:
continue
count = len(mainFuncs[struct])
if count == 0:
continue
code += tap(5) + "if(mainVSFuncName == " + QuotationMarks(mainFuncs[struct][0])
for i in range(1, count):
code += "|| mainVSFuncName == " + QuotationMarks(mainFuncs[struct][i])
code += ")" + nextLine(1)
code += tap(5) + "{" + nextLine(1)
for idx in range(0, len(jsonData["SemanticStructure"][struct])):
element = jsonData["SemanticStructure"][struct][str(idx)]
# Make InputSlotClass
inputSlotClass = "D3D11_INPUT_PER_"
inputSlot = 0
instanceDataStepRate = 0
if element["UsingType"] == "VERTEX":
inputSlotClass += "VERTEX_DATA"
#inputSlot = 0, instanceDataStepRate = 0
elif element["UsingType"] == "INSTANCE":
inputSlotClass += "INSTANCE_DATA"
inputSlot = 1
instanceDataStepRate = 1
# end
# Make Foramt
formatSize = element["Format"][len(element["Format"])-1]
format = "DXGI_FORMAT_"
if formatSize == '1' or isInt(formatSize) == False:
format += "R32_"
elif formatSize == '2':
format += "R32G32_"
elif formatSize == '3':
format += "R32G32B32_"
elif formatSize == '4':
format += "R32G32B32A32_"
dataType = element["Format"][0]
if dataType == 'f': #float
format += "FLOAT"
elif dataType == 'u': #uint
format += "UINT"
elif dataType == 'i': #int
format += "INT"
#end
#name, index, format, offset, slotClass, slot, steprate
code += tap(6)
code += "AddInputElementDesc(" + '\"' + element["SemanticName"] + '\"' + ','
code += str(element["SemanticIndex"]) + ',' + format + ',' + str(element["AlignedByteOffset"]) + ','
code += inputSlotClass + ',' + str(inputSlot) + ',' + str(instanceDataStepRate) + ");"
code += nextLine(1)
code += tap(5) + '}' + nextLine(1)
code += tap(4) + '}' + nextLine(1) + tap(4)
for structName in jsonData["SemanticStructure"]:
del mainFuncs[structName]
del mainFuncs
shaderFactory = ShaderFactory(originalShaderFactoryFileDir, saveShaderFactoryFileDir)
shaderFactory.Run(code, className, fullPaths)
print ("Success!\n")
print (CONSOLE_LINE)
``` |
{
"source": "Jin0331/D-SCRIPT",
"score": 2
} |
#### File: dscript/legacy/contact_legacy.py
```python
import torch
import torch.nn as nn
import torch.functional as F
class FullyConnected(nn.Module):
"""
Performs part 1 of Contact Prediction Module. Takes embeddings from Projection module and produces broadcast tensor.
Input embeddings of dimension :math:`d` are combined into a :math:`2d` length MLP input :math:`z_{cat}`, where :math:`z_{cat} = [z_0 \\ominus z_1 | z_0 \\odot z_1]`
:param embed_dim: Output dimension of `dscript.models.embedding <#module-dscript.models.embedding>`_ model :math:`d` [default: 100]
:type embed_dim: int
:param hidden_dim: Hidden dimension :math:`h` [default: 50]
:type hidden_dim: int
:param activation: Activation function for broadcast tensor [default: torch.nn.ReLU()]
:type activation: torch.nn.Module
"""
def __init__(self, embed_dim, hidden_dim, activation=nn.ReLU()):
super(FullyConnected, self).__init__()
self.D = embed_dim
self.H = hidden_dim
self.conv = nn.Conv2d(2 * self.D, self.H, 1)
self.batchnorm = nn.BatchNorm2d(self.H)
self.activation = activation
def forward(self, z0, z1):
"""
:param z0: Projection module embedding :math:`(b \\times N \\times d)`
:type z0: torch.Tensor
:param z1: Projection module embedding :math:`(b \\times M \\times d)`
:type z1: torch.Tensor
:return: Predicted broadcast tensor :math:`(b \\times N \\times M \\times h)`
:rtype: torch.Tensor
"""
# z0 is (b,N,d), z1 is (b,M,d)
z0 = z0.transpose(1, 2)
z1 = z1.transpose(1, 2)
# z0 is (b,d,N), z1 is (b,d,M)
z_dif = torch.abs(z0.unsqueeze(3) - z1.unsqueeze(2))
z_mul = z0.unsqueeze(3) * z1.unsqueeze(2)
z_cat = torch.cat([z_dif, z_mul], 1)
b = self.conv(z_cat)
b = self.activation(b)
b = self.batchnorm(b)
return b
class ContactCNN(nn.Module):
"""
Residue Contact Prediction Module. Takes embeddings from Projection module and produces contact map, output of Contact module.
:param embed_dim: Output dimension of `dscript.models.embedding <#module-dscript.models.embedding>`_ model :math:`d` [default: 100]
:type embed_dim: int
:param hidden_dim: Hidden dimension :math:`h` [default: 50]
:type hidden_dim: int
:param width: Width of convolutional filter :math:`2w+1` [default: 7]
:type width: int
:param activation: Activation function for final contact map [default: torch.nn.Sigmoid()]
:type activation: torch.nn.Module
"""
def __init__(
self, embed_dim=100, hidden_dim=50, width=7, activation=nn.Sigmoid()
):
super(ContactCNN, self).__init__()
self.hidden = FullyConnected(embed_dim, hidden_dim)
self.conv = nn.Conv2d(hidden_dim, 1, width, padding=width // 2)
self.batchnorm = nn.BatchNorm2d(1)
self.activation = activation
self.clip()
def clip(self):
"""
Force the convolutional layer to be transpose invariant.
:meta private:
"""
w = self.conv.weight
self.conv.weight.data[:] = 0.5 * (w + w.transpose(2, 3))
def forward(self, z0, z1):
"""
:param z0: Projection module embedding :math:`(b \\times N \\times d)`
:type z0: torch.Tensor
:param z1: Projection module embedding :math:`(b \\times M \\times d)`
:type z1: torch.Tensor
:return: Predicted contact map :math:`(b \\times N \\times M)`
:rtype: torch.Tensor
"""
B = self.broadcast(z0, z1)
return self.predict(B)
def broadcast(self, z0, z1):
"""
Calls `dscript.models.contact.FullyConnected <#module-dscript.models.contact.FullyConnected>`_.
:param z0: Projection module embedding :math:`(b \\times N \\times d)`
:type z0: torch.Tensor
:param z1: Projection module embedding :math:`(b \\times M \\times d)`
:type z1: torch.Tensor
:return: Predicted contact broadcast tensor :math:`(b \\times N \\times M \\times h)`
:rtype: torch.Tensor
"""
B = self.hidden(z0, z1)
return B
def predict(self, B):
"""
Predict contact map from broadcast tensor.
:param B: Predicted contact broadcast :math:`(b \\times N \\times M \\times h)`
:type B: torch.Tensor
:return: Predicted contact map :math:`(b \\times N \\times M)`
:rtype: torch.Tensor
"""
C = self.conv(B)
C = self.batchnorm(C)
C = self.activation(C)
return C
```
#### File: D-SCRIPT/dscript/utils.py
```python
from __future__ import print_function, division
import torch
import torch.utils.data
import numpy as np
import pandas as pd
import subprocess as sp
import sys
import gzip as gz
from datetime import datetime
def log(m, file=None, timestamped=True, print_also=False):
curr_time = f"[{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}] "
log_string = f"{curr_time if timestamped else ''}{m}"
if file is None:
print(log_string)
else:
print(log_string, file=file)
if print_also:
print(log_string)
def RBF(D, sigma=None):
"""
Convert distance matrix into similarity matrix using Radial Basis Function (RBF) Kernel.
:math:`RBF(x,x') = \\exp{\\frac{-(x - x')^{2}}{2\\sigma^{2}}}`
:param D: Distance matrix
:type D: np.ndarray
:param sigma: Bandwith of RBF Kernel [default: :math:`\\sqrt{\\text{max}(D)}`]
:type sigma: float
:return: Similarity matrix
:rtype: np.ndarray
"""
sigma = sigma or np.sqrt(np.max(D))
return np.exp(-1 * (np.square(D) / (2 * sigma ** 2)))
class PairedDataset(torch.utils.data.Dataset):
"""
Dataset to be used by the PyTorch data loader for pairs of sequences and their labels.
:param X0: List of first item in the pair
:param X1: List of second item in the pair
:param Y: List of labels
"""
def __init__(self, X0, X1, Y):
self.X0 = X0
self.X1 = X1
self.Y = Y
assert len(X0) == len(X1), (
"X0: "
+ str(len(X0))
+ " X1: "
+ str(len(X1))
+ " Y: "
+ str(len(Y))
)
assert len(X0) == len(Y), (
"X0: "
+ str(len(X0))
+ " X1: "
+ str(len(X1))
+ " Y: "
+ str(len(Y))
)
def __len__(self):
return len(self.X0)
def __getitem__(self, i):
return self.X0[i], self.X1[i], self.Y[i]
def collate_paired_sequences(args):
"""
Collate function for PyTorch data loader.
"""
x0 = [a[0] for a in args]
x1 = [a[1] for a in args]
y = [a[2] for a in args]
return x0, x1, torch.stack(y, 0)
``` |
{
"source": "jin0g/soundset",
"score": 3
} |
#### File: soundset/soundset/core.py
```python
import numpy as np
import random
# from .helper import default_path
def default_path(filename):
import os
return os.path.dirname(os.path.abspath(__file__)) + '/' + filename
center = 60
class score:
# generator
@classmethod
def random(cls,length,tempo=80,beat=8,chord=1,pitch=3,register=25,random_state=None,minnote=None,maxnote=None):
# minnote <= note < maxnote
if minnote is None: minnote = 12 + 12*pitch
if maxnote is None: maxnote = minnote + register
# note candidates in register
candidates = list( range(minnote, maxnote) )
# generate notes
random.seed(random_state)
notes = [sorted(random.sample(candidates, chord)) for _ in range(length)]
# create class and return
return cls(notes,base=minnote,high=maxnote,tempo=tempo,beat=beat)
def __init__(self, notes, base, high, tempo, beat):
self.notes = notes
self.base = base # including
self.high = high # excluding
self.tempo = tempo
self.beat = beat
def to_roll(self, ignore_out_of_range=False):
# zero array
roll = np.zeros((len(self.notes), 128), int)
# pin flg to roll
for i, ns in enumerate(self.notes):
roll[i][ns] = 1
# mask
if ignore_out_of_range:
roll = roll[:, self.base:self.high]
# return
return roll
# create wave data
# policy: notes -prettymidi-> midi -fluidsynth-> wav -scipy.waveform-> numpy array
def to_wave(self, instrument,font=None,stereo=False,rate=44100,mono_dim2=False,clip=True):
# find default soundfont if needed
if font is None: font = default_path('TimGM6mb.sf2')
assert 0<=instrument and instrument<128
# 1.create midi file
from pretty_midi import PrettyMIDI, Instrument, Note
midi = PrettyMIDI(resolution=960, initial_tempo=self.tempo)
inst = Instrument(instrument)
reso = 60/self.tempo*4/self.beat
for i,ns in enumerate(self.notes):
for n in ns:
inst.notes.append(Note(velocity=100, pitch=n, start=i*reso, end=i*reso+reso))
midi.instruments.append(inst)
midi.write('temp.mid')
# 2.create wave file
from midi2audio import FluidSynth
fs = FluidSynth(font,sample_rate=rate)
fs.midi_to_audio('temp.mid', 'temp.wav')
# 3.import wav file
from scipy.io import wavfile
_, wave = wavfile.read('temp.wav')
# clip
if clip:
le = len(self.notes)
wave = wave[:int(rate*reso*le)]
wave = wave.astype(float) / abs(wave).max() * 0.9
return wave
# import random
# from scipy.io import wavfile
# import numpy as np
# import os
# class score
# instruments = {
# 'piano': [ 0, 'piano', 'C4', 24],
# 'oboe': [69, 'oboe', 'C4', 24],
# 'guitar': [24, 'guitar', 'C3', 24],
# 'base': [34, 'base', 'C2', 24],
# }
# # with instrument name
# def get_wave(inst, tempo, beat):
# number, filename, lo_code, ncode = instruments[inst]
# return load_instrument(filename, ncode)
# # return rate, np.array(code, time, 2-channel)
# def load_instrument(filename, ncode=24):
# absfname = os.path.abspath(os.path.dirname(__file__)) + '/instruments/' + filename
# rate, wave = wavfile.read(absfname)
# wave = wave[:rate*24].reshape(ncode, rate, 2)
# return rate, wave
# # synthesis score
# # tempo=120 beat=4 fixed
# def synthesis(wave, score, tempo=120, beat=4):
# _,rate,channel = wave.shape
# output = np.zeros((int(rate * (len(score) + 1) / 2), channel))
# for i, code in enumerate(score):
# output[int(rate*i/2):int(rate*(1+i/2))] = wave[code].sum(axis=0)
# return output[:int(rate * len(score) / 2)]
# # transeform score to piano roll
# # return (time, key)
# def piano_roll(score, ncode=24):
# roll = np.zeros((len(score),ncode))
# for i, code in enumerate(score):
# roll[i][code] = 1
# return roll
# # generate random score
# def random_score(length, nmin=1, nmax=3, ncode=24):
# codes = range(ncode)
# score = [random.sample(codes, random.randint(nmin, nmax)) for _ in range(length)]
# return score
# def random_score_possible_melody(length, ncode=24):
# pass
# def random_score_possible_chord(length, ncode=24):
# pass
``` |
{
"source": "jin10086/browser-environment-test",
"score": 3
} |
#### File: jin10086/browser-environment-test/getEnvironment.py
```python
from selenium import webdriver
import asyncio
from pyppeteer import launch
import time
def selenium_html():
browser = webdriver.Chrome()
browser.get("http://localhost/")
time.sleep(100)
async def pyppeteer_html():
browser = await launch({"headless": False})
page = await browser.newPage()
await page.goto("http://localhost/")
await asyncio.sleep(100)
if __name__ == "__main__":
# asyncio.run(pyppeteer_html())
selenium_html()
``` |
{
"source": "jin10086/fcoinBackup",
"score": 3
} |
#### File: fcoin/spiders/zendesk.py
```python
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
import os
class ZendeskSpider(CrawlSpider):
name = "zendesk"
allowed_domains = ["fcoin.zendesk.com"]
start_urls = ["https://fcoin.zendesk.com/hc/zh-cn"]
rules = (
Rule(LinkExtractor(allow=("categories",), deny=("en-us",))),
Rule(LinkExtractor(allow=("articles",)), callback="parse_detail"),
)
def parse_detail(self, response):
title = response.css("title::text").get()
fname = os.path.join("rawHtml", title + ".html")
with open(fname, "wb") as f:
f.write(response.body)
``` |
{
"source": "jin10086/ftx-monitor",
"score": 2
} |
#### File: src/eth/eventMonitor.py
```python
from web3 import Web3, WebsocketProvider
import json
from sendMail import sendMail
from diskcache import Index
result = Index("data/result")
import os, sys, time
w3 = Web3(
WebsocketProvider(
"wss://mainnet.infura.io/ws/v3/cd42b3642f1441629f66000f8e544d5d",
websocket_timeout=30,
)
)
with open("erc20.json") as f:
erc20abi = json.loads(f.read())
comp = w3.eth.contract(
address="0xc00e94Cb662C3520282E6f5717214004A7f26888", abi=erc20abi
)
def go():
a1 = comp.events.Transfer.createFilter(fromBlock="latest", toBlock="pending")
print("开始检测大于500的comp转账")
while True:
c = a1.get_new_entries()
for i in c:
amount = i["args"]["amount"]
amount = w3.fromWei(amount, "ether")
if amount < 500: # 大于1000 alarm
continue
f = i["args"]["from"]
if f == "0x8248C5709b0835366821d0cAe83bdB7e2cf66a53":
continue
to = i["args"]["to"]
txhash = w3.toHex(i["transactionHash"])
msg = f"""发送者:{f}
接收者:{to}
金额:{amount}
txhash:https://cn.etherscan.com/tx/{txhash}
"""
print("发送邮件中...")
if "txs" in result:
txhashs = result["txs"]
if txhash not in txhashs:
txhashs.append(txhash)
# sendMail(
# "发现超过500COMP的转账!", msg, ["igao<EMAIL>", "<EMAIL>"]
# )
print(msg)
else:
txhashs = [txhash]
result["txs"] = txhashs
# sendMail(
# "发现超过500COMP的转账!", msg, ["<EMAIL>", "<EMAIL>"]
# )
print(msg)
def main():
print("AutoRes is starting")
go()
executable = sys.executable
args = sys.argv[:]
print(args)
args.insert(0, sys.executable)
time.sleep(1)
print("Respawning")
os.execvp(executable, args)
if __name__ == "__main__":
main()
```
#### File: ftx-monitor/src/sendMail.py
```python
import yagmail
from conf import emailUser, emailPassword
yag = yagmail.SMTP(user=emailUser, password=<PASSWORD>, host="smtp.163.com")
def sendMail(subject, contents, to):
yag.send(to, subject, contents)
if __name__ == "__main__":
pass
``` |
{
"source": "jin10086/ico-spider",
"score": 2
} |
#### File: icospider/spiders/contract.py
```python
import json
import logging
import os
import re
import scrapy
from scrapy.shell import inspect_response
logger = logging.getLogger(__name__)
class ContractSpider(scrapy.Spider):
name = "contracts"
addressUrl = "https://etherscan.io/address/{}"
allowed_domains = ["etherscan.io"]
current_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + ".")
p = os.path.join(father_path, "contractAddress-20180922-100358.json")
start_urls = []
with open(p) as f:
for i in f.readlines():
start_urls.append(addressUrl.format(json.loads(i)["address"]))
custom_settings = {
"AUTOTHROTTLE_ENABLED": True,
"LOG_LEVEL": "INFO",
"COOKIES_ENABLED": False,
}
addressR = re.compile(r"address/(\S+)")
def parse(self, response):
"Sorry, You have reached your maximum request limit for this resource "
if "maximum request limit" in response.body_as_unicode():
# inspect_response(response, self)
# 重试.
request = response.request
if "retry_times_etherscan" in request.meta:
retry_times_etherscan = request.meta["retry_times_etherscan"]
else:
retry_times_etherscan = 0
retryreq = request.copy()
retryreq.dont_filter = True
retryreq.meta["retry_times_etherscan"] = retry_times_etherscan + 1
retryreq.priority = request.priority + -1
logger.info(
"Retrying %(request)s (failed %(retries)d times)",
{"request": request, "retries": retry_times_etherscan + 1},
)
return retryreq
address = response.url.split("address/")[-1]
name = response.xpath('//a[@data-placement="bottom"]/text()').extract_first()
code = response.css("#editor::text").extract_first()
yield {"name": name, "address": address, "code": code}
``` |
{
"source": "jin10086/pools",
"score": 3
} |
#### File: pools/spider/wayi.py
```python
import requests
from .ut import logger
import time, json
s = requests.Session()
s.headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
}
merchant = "wayi"
@logger.catch
def getdata():
url = "https://api.wayi.cn/hash/getHashPlan/multi"
logger.info(f"get url {url}")
pageNO = 1
ret_data = []
while True:
logger.info(f"get page {pageNO}")
reqData = {
"pageNO": pageNO,
"pageSize": 10,
"search": '{"coinType":"2","status":1,"productType":"110","hash_status":1}',
}
z1 = s.post(url, json=reqData)
data = z1.json()["body"]
if z1.json()["type"]:
ret_data.extend(data["result"].copy())
if data["pageTotal"] > data["pageNum"]:
pageNO = data["pageNum"] + 1
continue
break
return ret_data
if __name__ == "__main__":
data = getdata()
with open(f"demo/{merchant}.json", "w") as f:
f.write(json.dumps(data))
``` |
{
"source": "jin10086/py-evm",
"score": 2
} |
#### File: fixtures/fillers/_utils.py
```python
import copy
import random
from eth_utils import (
int_to_big_endian,
)
from eth.db.backends.memory import MemoryDB
from eth.utils.db import (
apply_state_dict,
)
from eth.utils.padding import (
pad32,
)
from eth_keys import keys
def wrap_in_list(item):
return [item]
def add_transaction_to_group(group, transaction):
for key in ["gasPrice", "nonce", "secretKey", "to"]:
if key in transaction and transaction[key] != group[key]:
raise ValueError("Can't add transaction as it differs in {}".format(key))
new_group = copy.deepcopy(group)
indexes = {}
for key, index_key in [("data", "data"), ("gasLimit", "gas"), ("value", "value")]:
if key in group:
if key not in transaction:
if len(new_group[key]) != 1:
raise ValueError("Can't add transaction as {} is ambiguous".format(key))
index = 0
else:
if transaction[key] not in new_group[key]:
new_group[key].append(transaction[key])
index = new_group[key].index(transaction[key])
indexes[index_key] = index
else:
assert key not in transaction
return new_group, indexes
def calc_state_root(state, account_db_class):
account_db = account_db_class(MemoryDB())
apply_state_dict(account_db, state)
return account_db.state_root
def generate_random_keypair():
key_object = keys.PrivateKey(pad32(int_to_big_endian(random.getrandbits(8 * 32))))
return key_object.to_bytes(), key_object.public_key.to_canonical_address()
def generate_random_address():
_, address = generate_random_keypair()
return address
```
#### File: fixtures/fillers/vm.py
```python
from eth.tools.fixtures.helpers import (
get_test_name,
)
from eth.tools.fixtures.normalization import (
normalize_bytes,
normalize_call_creates,
normalize_environment,
normalize_execution,
normalize_int,
normalize_logs,
normalize_state,
)
from eth.tools._utils.hashing import hash_log_entries
from eth.tools._utils.mappings import deep_merge
def fill_vm_test(
filler,
*,
call_creates=None,
gas_price=None,
gas_remaining=0,
logs=None,
output=b""
):
test_name = get_test_name(filler)
test = filler[test_name]
environment = normalize_environment(test["env"])
pre_state = normalize_state(test["pre"])
execution = normalize_execution(test["exec"])
assert len(test["expect"]) == 1
expect = test["expect"][0]
assert "network" not in test
assert "indexes" not in test
result = normalize_state(expect["result"])
post_state = deep_merge(pre_state, result)
call_creates = normalize_call_creates(call_creates or [])
gas_remaining = normalize_int(gas_remaining)
output = normalize_bytes(output)
logs = normalize_logs(logs or [])
log_hash = hash_log_entries(logs)
return {
test_name: {
"env": environment,
"pre": pre_state,
"exec": execution,
"post": post_state,
"callcreates": call_creates,
"gas": gas_remaining,
"output": output,
"logs": log_hash,
}
}
```
#### File: tools/_utils/hashing.py
```python
from eth_hash.auto import keccak
import rlp
from eth.rlp.logs import Log
def hash_log_entries(log_entries):
"""
Helper function for computing the RLP hash of the logs from transaction
execution.
"""
logs = [Log(*entry) for entry in log_entries]
encoded_logs = rlp.encode(logs)
logs_hash = keccak(encoded_logs)
return logs_hash
```
#### File: vm/logic/comparison.py
```python
from eth import constants
from eth.utils.numeric import (
signed_to_unsigned,
unsigned_to_signed,
)
def lt(computation):
"""
Lesser Comparison
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if left < right:
result = 1
else:
result = 0
computation.stack_push(result)
def gt(computation):
"""
Greater Comparison
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if left > right:
result = 1
else:
result = 0
computation.stack_push(result)
def slt(computation):
"""
Signed Lesser Comparison
"""
left, right = map(
unsigned_to_signed,
computation.stack_pop(num_items=2, type_hint=constants.UINT256),
)
if left < right:
result = 1
else:
result = 0
computation.stack_push(signed_to_unsigned(result))
def sgt(computation):
"""
Signed Greater Comparison
"""
left, right = map(
unsigned_to_signed,
computation.stack_pop(num_items=2, type_hint=constants.UINT256),
)
if left > right:
result = 1
else:
result = 0
computation.stack_push(signed_to_unsigned(result))
def eq(computation):
"""
Equality
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if left == right:
result = 1
else:
result = 0
computation.stack_push(result)
def iszero(computation):
"""
Not
"""
value = computation.stack_pop(type_hint=constants.UINT256)
if value == 0:
result = 1
else:
result = 0
computation.stack_push(result)
def and_op(computation):
"""
Bitwise And
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
result = left & right
computation.stack_push(result)
def or_op(computation):
"""
Bitwise Or
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
result = left | right
computation.stack_push(result)
def xor(computation):
"""
Bitwise XOr
"""
left, right = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
result = left ^ right
computation.stack_push(result)
def not_op(computation):
"""
Not
"""
value = computation.stack_pop(type_hint=constants.UINT256)
result = constants.UINT_256_MAX - value
computation.stack_push(result)
def byte_op(computation):
"""
Bitwise And
"""
position, value = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if position >= 32:
result = 0
else:
result = (value // pow(256, 31 - position)) % 256
computation.stack_push(result)
```
#### File: core/merkle-utils/test_merkle_trees.py
```python
import pytest
from eth_utils import (
ValidationError,
)
from eth_hash.auto import (
keccak,
)
from eth.utils.merkle import (
calc_merkle_root,
calc_merkle_tree,
get_root,
get_merkle_proof,
verify_merkle_proof,
)
@pytest.mark.parametrize("leaves,tree", [
(
(b"single leaf",),
(
(keccak(b"single leaf"),),
),
),
(
(b"left", b"right"),
(
(keccak(keccak(b"left") + keccak(b"right")),),
(keccak(b"left"), keccak(b"right")),
),
),
(
(b"1", b"2", b"3", b"4"),
(
(
keccak(
keccak(
keccak(b"1") + keccak(b"2")
) + keccak(
keccak(b"3") + keccak(b"4")
)
),
),
(
keccak(
keccak(b"1") + keccak(b"2")
),
keccak(
keccak(b"3") + keccak(b"4")
),
),
(
keccak(b"1"),
keccak(b"2"),
keccak(b"3"),
keccak(b"4"),
),
),
),
])
def test_merkle_tree_calculation(leaves, tree):
calculated_tree = calc_merkle_tree(leaves)
assert calculated_tree == tree
assert get_root(tree) == tree[0][0]
assert calc_merkle_root(leaves) == get_root(tree)
@pytest.mark.parametrize("leave_number", [0, 3, 5, 6, 7, 9])
def test_invalid_merkle_root_calculation(leave_number):
with pytest.raises(ValidationError):
calc_merkle_root((b"",) * leave_number)
@pytest.mark.parametrize("leaves,index,proof", [
(
(b"1", b"2"),
0,
(keccak(b"2"),),
),
(
(b"1", b"2"),
1,
(keccak(b"1"),),
),
(
(b"1", b"2", b"3", b"4"),
0,
(keccak(b"2"), keccak(keccak(b"3") + keccak(b"4"))),
),
(
(b"1", b"2", b"3", b"4"),
1,
(keccak(b"1"), keccak(keccak(b"3") + keccak(b"4"))),
),
(
(b"1", b"2", b"3", b"4"),
2,
(keccak(b"4"), keccak(keccak(b"1") + keccak(b"2"))),
),
(
(b"1", b"2", b"3", b"4"),
3,
(keccak(b"3"), keccak(keccak(b"1") + keccak(b"2"))),
),
])
def test_merkle_proofs(leaves, index, proof):
tree = calc_merkle_tree(leaves)
root = get_root(tree)
item = leaves[index]
calculated_proof = get_merkle_proof(tree, index)
assert calculated_proof == proof
assert verify_merkle_proof(root, item, index, calculated_proof)
assert not verify_merkle_proof(b"\x00" * 32, item, index, proof)
assert not verify_merkle_proof(root, b"\x00" * 32, index, proof)
assert not verify_merkle_proof(root, item, (index + 1) % len(leaves), proof)
for replaced_index in range(len(proof)):
altered_proof = proof[:replaced_index] + (b"\x00" * 32,) + proof[replaced_index + 1:]
assert not verify_merkle_proof(root, item, index, altered_proof)
def test_single_element_merkle_proof():
leaves = (b"1",)
tree = calc_merkle_tree(leaves)
root = get_root(tree)
assert get_merkle_proof(tree, 0) == ()
assert verify_merkle_proof(root, b"1", 0, ())
assert not verify_merkle_proof(b"\x00" * 32, b"1", 0, ())
assert not verify_merkle_proof(root, b"2", 0, ())
assert not verify_merkle_proof(root, b"1", 0, (b"\x00" * 32,))
@pytest.mark.parametrize("leaves", [
(b"1",),
(b"1", b"2"),
(b"1", b"2", b"3", b"4"),
])
def test_proof_generation_index_validation(leaves):
tree = calc_merkle_tree(leaves)
for invalid_index in [-1, len(leaves)]:
with pytest.raises(ValidationError):
get_merkle_proof(tree, invalid_index)
```
#### File: core/message-object/test_message_object.py
```python
from __future__ import unicode_literals
import pytest
from eth_utils import (
to_normalized_address,
ValidationError,
)
from eth.vm.message import (
Message,
)
from eth.constants import (
CREATE_CONTRACT_ADDRESS,
)
ADDRESS_A = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0"
ADDRESS_B = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1"
ADDRESS_C = b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2"
def _create_message(gas=1,
to=ADDRESS_A,
sender=ADDRESS_B,
value=0,
data=b"",
code=b"",
**kwargs):
return Message(
gas=gas,
to=to,
sender=sender,
value=value,
data=data,
code=code,
**kwargs
)
@pytest.mark.parametrize(
"init_kwargs,is_valid",
(
({}, True),
({'gas': True}, False),
({'gas': -1}, False),
({'gas': 1.0}, False),
({'gas': '1'}, False),
({'value': True}, False),
({'value': -1}, False),
({'value': 1.0}, False),
({'value': '1'}, False),
({'sender': to_normalized_address(ADDRESS_A)}, False),
({'to': to_normalized_address(ADDRESS_A)}, False),
({'create_address': to_normalized_address(ADDRESS_A)}, False),
({'code_address': to_normalized_address(ADDRESS_A)}, False),
({'should_transfer_value': 1}, False),
({'should_transfer_value': 0}, False),
)
)
def test_parameter_validation(init_kwargs, is_valid):
if is_valid:
_create_message(**init_kwargs)
else:
with pytest.raises(ValidationError):
_create_message(**init_kwargs)
def test_code_address_defaults_to_to_address():
message = _create_message()
assert message.code_address == message.to
def test_code_address_uses_provided_address():
message = _create_message(code_address=ADDRESS_C)
assert message.code_address == ADDRESS_C
def test_storage_address_defaults_to_to_address():
message = _create_message()
assert message.storage_address == message.to
def test_storage_address_uses_provided_address():
message = _create_message(create_address=ADDRESS_C)
assert message.storage_address == ADDRESS_C
def test_is_create_computed_property():
create_message = _create_message(to=CREATE_CONTRACT_ADDRESS)
assert create_message.is_create is True
not_create_message = _create_message(to=ADDRESS_B)
assert not_create_message.is_create is False
```
#### File: core/vm/test_vm_state.py
```python
import pytest
from eth.db.backends.memory import MemoryDB
from eth.exceptions import StateRootNotFound
from eth.vm.forks.frontier.state import FrontierState
@pytest.fixture
def state(chain_without_block_validation):
return chain_without_block_validation.get_vm().state
def test_block_properties(chain_without_block_validation):
chain = chain_without_block_validation
vm = chain.get_vm()
block, _, _ = chain.import_block(vm.mine_block())
assert vm.state.coinbase == block.header.coinbase
assert vm.state.timestamp == block.header.timestamp
assert vm.state.block_number == block.header.block_number
assert vm.state.difficulty == block.header.difficulty
assert vm.state.gas_limit == block.header.gas_limit
def test_missing_state_root():
context = None
state = FrontierState(MemoryDB(), context, b'\x0f' * 32)
with pytest.raises(StateRootNotFound):
state.apply_transaction(None)
```
#### File: tests/database/test_journal_db.py
```python
import pytest
from eth.db.backends.memory import MemoryDB
from eth.db.journal import JournalDB
@pytest.fixture
def memory_db():
return MemoryDB()
@pytest.fixture
def journal_db(memory_db):
return JournalDB(memory_db)
def test_delete_removes_data_from_underlying_db_after_persist(journal_db, memory_db):
memory_db.set(b'1', b'test-a')
assert memory_db.exists(b'1') is True
journal_db.delete(b'1')
assert memory_db.exists(b'1') is True
journal_db.persist()
assert memory_db.exists(b'1') is False
def test_snapshot_and_revert_with_set(journal_db):
journal_db.set(b'1', b'test-a')
assert journal_db.get(b'1') == b'test-a'
changeset = journal_db.record()
journal_db.set(b'1', b'test-b')
assert journal_db.get(b'1') == b'test-b'
journal_db.discard(changeset)
assert journal_db.get(b'1') == b'test-a'
def test_snapshot_and_revert_with_delete(journal_db):
journal_db.set(b'1', b'test-a')
assert journal_db.exists(b'1') is True
assert journal_db.get(b'1') == b'test-a'
changeset = journal_db.record()
journal_db.delete(b'1')
assert journal_db.exists(b'1') is False
journal_db.discard(changeset)
assert journal_db.exists(b'1') is True
assert journal_db.get(b'1') == b'test-a'
def test_revert_clears_reverted_journal_entries(journal_db):
journal_db.set(b'1', b'test-a')
assert journal_db.get(b'1') == b'test-a'
changeset_a = journal_db.record()
journal_db.set(b'1', b'test-b')
journal_db.delete(b'1')
journal_db.set(b'1', b'test-c')
assert journal_db.get(b'1') == b'test-c'
changeset_b = journal_db.record()
journal_db.set(b'1', b'test-d')
journal_db.delete(b'1')
journal_db.set(b'1', b'test-e')
assert journal_db.get(b'1') == b'test-e'
journal_db.discard(changeset_b)
assert journal_db.get(b'1') == b'test-c'
journal_db.delete(b'1')
assert journal_db.exists(b'1') is False
journal_db.discard(changeset_a)
assert journal_db.get(b'1') == b'test-a'
def test_revert_removes_journal_entries(journal_db):
changeset_a = journal_db.record() # noqa: F841
assert len(journal_db.journal.journal_data) == 2
changeset_b = journal_db.record()
assert len(journal_db.journal.journal_data) == 3
# Forget *latest* changeset and prove it's the only one removed
journal_db.discard(changeset_b)
assert len(journal_db.journal.journal_data) == 2
changeset_b2 = journal_db.record()
assert len(journal_db.journal.journal_data) == 3
changeset_c = journal_db.record() # noqa: F841
assert len(journal_db.journal.journal_data) == 4
changeset_d = journal_db.record() # noqa: F841
assert len(journal_db.journal.journal_data) == 5
# Forget everything from b2 (inclusive) and what follows
journal_db.discard(changeset_b2)
assert len(journal_db.journal.journal_data) == 2
assert journal_db.journal.has_changeset(changeset_b2) is False
def test_commit_merges_changeset_into_previous(journal_db):
changeset = journal_db.record()
assert len(journal_db.journal.journal_data) == 2
journal_db.set(b'1', b'test-a')
assert journal_db.get(b'1') == b'test-a'
journal_db.commit(changeset)
assert len(journal_db.journal.journal_data) == 1
assert journal_db.journal.has_changeset(changeset) is False
def test_committing_middle_changeset_merges_in_subsequent_changesets(journal_db):
journal_db.set(b'1', b'test-a')
changeset_a = journal_db.record()
assert len(journal_db.journal.journal_data) == 2
journal_db.set(b'1', b'test-b')
changeset_b = journal_db.record()
assert len(journal_db.journal.journal_data) == 3
journal_db.set(b'1', b'test-c')
changeset_c = journal_db.record()
assert len(journal_db.journal.journal_data) == 4
journal_db.commit(changeset_b)
assert journal_db.get(b'1') == b'test-c'
assert len(journal_db.journal.journal_data) == 2
assert journal_db.journal.has_changeset(changeset_a)
assert journal_db.journal.has_changeset(changeset_b) is False
assert journal_db.journal.has_changeset(changeset_c) is False
def test_persist_writes_to_underlying_db(journal_db, memory_db):
changeset = journal_db.record() # noqa: F841
journal_db.set(b'1', b'test-a')
assert journal_db.get(b'1') == b'test-a'
assert memory_db.exists(b'1') is False
changeset_b = journal_db.record() # noqa: F841
journal_db.set(b'1', b'test-b')
assert journal_db.get(b'1') == b'test-b'
assert memory_db.exists(b'1') is False
journal_db.persist()
assert len(journal_db.journal.journal_data) == 1
assert memory_db.get(b'1') == b'test-b'
def test_journal_restarts_after_write(journal_db, memory_db):
journal_db.set(b'1', b'test-a')
journal_db.persist()
assert memory_db.get(b'1') == b'test-a'
journal_db.set(b'1', b'test-b')
journal_db.persist()
assert memory_db.get(b'1') == b'test-b'
def test_returns_key_from_underlying_db_if_missing(journal_db, memory_db):
changeset = journal_db.record() # noqa: F841
memory_db.set(b'1', b'test-a')
assert memory_db.exists(b'1')
assert journal_db.get(b'1') == b'test-a'
```
#### File: core/chain-management/test_initialize_data_dir.py
```python
import pytest
import os
from trinity.chains import (
is_data_dir_initialized,
initialize_data_dir,
)
from trinity.config import (
ChainConfig,
)
@pytest.fixture
def chain_config():
return ChainConfig(network_id=1, max_peers=1)
@pytest.fixture
def data_dir(chain_config):
os.makedirs(chain_config.data_dir, exist_ok=True)
assert os.path.exists(chain_config.data_dir)
return chain_config.data_dir
@pytest.fixture
def database_dir(chain_config, data_dir):
os.makedirs(chain_config.database_dir, exist_ok=True)
assert os.path.exists(chain_config.database_dir)
return chain_config.database_dir
@pytest.fixture
def nodekey(chain_config, data_dir):
with open(chain_config.nodekey_path, 'wb') as nodekey_file:
nodekey_file.write(b'\x01' * 32)
return chain_config.nodekey_path
def test_initializing_data_dir_from_nothing(chain_config):
assert not os.path.exists(chain_config.data_dir)
assert not is_data_dir_initialized(chain_config)
initialize_data_dir(chain_config)
assert is_data_dir_initialized(chain_config)
def test_initializing_data_dir_from_empty_data_dir(chain_config, data_dir):
assert not os.path.exists(chain_config.database_dir)
assert not is_data_dir_initialized(chain_config)
initialize_data_dir(chain_config)
assert is_data_dir_initialized(chain_config)
def test_initializing_data_dir_with_missing_nodekey(chain_config, data_dir, database_dir):
assert not os.path.exists(chain_config.nodekey_path)
assert not is_data_dir_initialized(chain_config)
initialize_data_dir(chain_config)
assert is_data_dir_initialized(chain_config)
```
#### File: sync/full/hexary_trie.py
```python
import bisect
from typing import (
Awaitable,
Callable,
Dict,
List,
Tuple,
)
from eth_utils import (
encode_hex,
)
from eth_typing import (
Hash32
)
from eth.db.backends.base import BaseDB
from eth.tools.logging import TraceLogger
from trie.constants import (
NODE_TYPE_BLANK,
NODE_TYPE_BRANCH,
NODE_TYPE_EXTENSION,
NODE_TYPE_LEAF,
)
from trie.utils.nodes import (
decode_node,
get_node_type,
is_blank_node,
)
from trinity.db.base import AsyncBaseDB
from trinity.exceptions import SyncRequestAlreadyProcessed
class SyncRequest:
def __init__(
self, node_key: Hash32, parent: 'SyncRequest', depth: int,
leaf_callback: Callable[[bytes, 'SyncRequest'], Awaitable[None]],
is_raw: bool = False) -> None:
"""Create a new SyncRequest for a given HexaryTrie node.
:param node_key: The node's key.
:param parent: The node's parent.
:param depth: The ndoe's depth in the trie.
:param leaf_callback: A callback called when for all leaf children of this node.
:param is_raw: If True, HexaryTrieSync will simply store the node's data in the db,
without decoding and scheduling requests for children. This is needed to fetch contract
code when doing a state sync.
"""
self.node_key = node_key
self.parents: List[SyncRequest] = []
if parent is not None:
self.parents = [parent]
self.depth = depth
self.leaf_callback = leaf_callback
self.is_raw = is_raw
self.dependencies = 0
self.data: bytes = None
def __lt__(self, other: 'SyncRequest') -> bool:
return self.depth < other.depth
def __repr__(self) -> str:
return "SyncRequest(%s, depth=%d)" % (encode_hex(self.node_key), self.depth)
def _get_children(node: Hash32, depth: int
) -> Tuple[List[Tuple[int, Hash32]], List[bytes]]:
"""Return all children of the node with the given hash.
:rtype: A two-tuple with one list containing the children that reference other nodes and
another containing the leaf children.
"""
node_type = get_node_type(node)
references = []
leaves = []
if node_type == NODE_TYPE_BLANK:
pass
elif node_type == NODE_TYPE_LEAF:
leaves.append(node[1])
elif node_type == NODE_TYPE_EXTENSION:
if isinstance(node[1], bytes) and len(node[1]) == 32:
references.append((depth + 1, node[1]))
elif isinstance(node[1], list):
# the rlp encoding of the node is < 32 so rather than a 32-byte
# reference, the actual rlp encoding of the node is inlined.
sub_references, sub_leaves = _get_children(node[1], depth + 1)
references.extend(sub_references)
leaves.extend(sub_leaves)
else:
raise Exception("Invariant")
elif node_type == NODE_TYPE_BRANCH:
for sub_node in node[:16]:
if isinstance(sub_node, bytes) and len(sub_node) == 32:
# this is a reference to another node.
references.append((depth + 1, sub_node))
else:
sub_references, sub_leaves = _get_children(sub_node, depth)
references.extend(sub_references)
leaves.extend(sub_leaves)
# The last item in a branch may contain a value.
if not is_blank_node(node[16]):
leaves.append(node[16])
return references, leaves
class HexaryTrieSync:
def __init__(self,
root_hash: Hash32,
db: AsyncBaseDB,
nodes_cache: BaseDB,
logger: TraceLogger) -> None:
# Nodes that haven't been requested yet.
self.queue: List[SyncRequest] = []
# Nodes that have been requested to a peer, but not yet committed to the DB, either
# because we haven't processed a reply containing them or because some of their children
# haven't been retrieved/committed yet.
self.requests: Dict[Hash32, SyncRequest] = {}
self.db = db
self.root_hash = root_hash
self.logger = logger
# A cache of node hashes we know to exist in our DB, used to avoid querying the DB
# unnecessarily as that's the main bottleneck when dealing with a large DB like for
# ethereum's mainnet/ropsten.
self.nodes_cache = nodes_cache
self.committed_nodes = 0
if root_hash in self.db:
self.logger.info("Root node (%s) already exists in DB, nothing to do", root_hash)
else:
self._schedule(root_hash, parent=None, depth=0, leaf_callback=self.leaf_callback)
async def leaf_callback(self, data: bytes, parent: SyncRequest) -> None:
"""Called when we reach a leaf node.
Should be implemented by subclasses that need to perform special handling of leaves.
"""
pass
@property
def has_pending_requests(self) -> bool:
return len(self.requests) > 0
def next_batch(self, n: int = 1) -> List[SyncRequest]:
"""Return the next requests that should be dispatched."""
if len(self.queue) == 0:
return []
batch = list(reversed((self.queue[-n:])))
self.queue = self.queue[:-n]
return batch
async def schedule(self, node_key: Hash32, parent: SyncRequest, depth: int,
leaf_callback: Callable[[bytes, 'SyncRequest'], Awaitable[None]],
is_raw: bool = False) -> None:
"""Schedule a request for the node with the given key."""
if node_key in self.nodes_cache:
self.logger.trace("Node %s already exists in db", encode_hex(node_key))
return
if await self.db.coro_exists(node_key):
self.nodes_cache[node_key] = b''
self.logger.trace("Node %s already exists in db", encode_hex(node_key))
return
self._schedule(node_key, parent, depth, leaf_callback, is_raw)
def _schedule(self, node_key: Hash32, parent: SyncRequest, depth: int,
leaf_callback: Callable[[bytes, 'SyncRequest'], Awaitable[None]],
is_raw: bool = False) -> None:
if parent is not None:
parent.dependencies += 1
existing = self.requests.get(node_key)
if existing is not None:
self.logger.trace(
"Already requesting %s, will just update parents list", node_key)
existing.parents.append(parent)
return
request = SyncRequest(node_key, parent, depth, leaf_callback, is_raw)
# Requests get added to both self.queue and self.requests; the former is used to keep
# track which requests should be sent next, and the latter is used to avoid scheduling a
# request for a given node multiple times.
self.logger.trace("Scheduling retrieval of %s", encode_hex(request.node_key))
self.requests[request.node_key] = request
bisect.insort(self.queue, request)
async def process(self, results: List[Tuple[Hash32, bytes]]) -> None:
"""Process request results.
:param results: A list of two-tuples containing the node's key and data.
"""
for node_key, data in results:
request = self.requests.get(node_key)
if request is None:
# This may happen if we resend a request for a node after waiting too long,
# and then eventually get two responses with it.
self.logger.trace(
"No SyncRequest found for %s, maybe we got more than one response for it",
encode_hex(node_key))
return
if request.data is not None:
raise SyncRequestAlreadyProcessed("%s has been processed already" % request)
request.data = data
if request.is_raw:
await self.commit(request)
continue
node = decode_node(request.data)
references, leaves = _get_children(node, request.depth)
for depth, ref in references:
await self.schedule(ref, request, depth, request.leaf_callback)
if request.leaf_callback is not None:
for leaf in leaves:
await request.leaf_callback(leaf, request)
if request.dependencies == 0:
await self.commit(request)
async def commit(self, request: SyncRequest) -> None:
"""Commit the given request's data to the database.
The request's data attribute must be set (done by the process() method) before this can be
called.
"""
self.committed_nodes += 1
await self.db.coro_set(request.node_key, request.data)
self.nodes_cache[request.node_key] = b''
self.requests.pop(request.node_key)
for ancestor in request.parents:
ancestor.dependencies -= 1
if ancestor.dependencies == 0:
await self.commit(ancestor)
``` |
{
"source": "jin10086/scrapy-fake-useragent",
"score": 2
} |
#### File: scrapy-fake-useragent/scrapy_fake_useragent/middleware.py
```python
import logging
from fake_useragent import UserAgent
logger = logging.getLogger(__name__)
class RandomUserAgentMiddleware(object):
def __init__(self, crawler):
super(RandomUserAgentMiddleware, self).__init__()
self.ua = UserAgent()
self.per_proxy = crawler.settings.get('RANDOM_UA_PER_PROXY', False)
self.proxy2ua = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
if self.per_proxy:
proxy = request.meta.get('proxy')
if proxy not in self.proxy2ua:
self.proxy2ua[proxy] = self.ua.random
logger.debug('Assign User-Agent %s to Proxy %s'
% (self.proxy2ua[proxy], proxy))
request.headers.setdefault('User-Agent', self.proxy2ua[proxy])
else:
_ = self.ua.random
logger.debug('Assign User-Agent to %s' % _)
request.headers.setdefault('User-Agent', _)
``` |
{
"source": "jin-2/jin2gram",
"score": 2
} |
#### File: jin2gram/images/serializers.py
```python
from rest_framework import serializers
from taggit_serializer.serializers import (TagListSerializerField,
TaggitSerializer)
from . import models
from jin2gram.users import models as users_model
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = users_model.User
fields = (
'name',
'profile_image'
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Comment
fields = (
'id',
'message',
'creator',
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Like
fields = '__all__'
class ImageSerializer(TaggitSerializer, serializers.ModelSerializer):
comments = CommentSerializer(many=True)
creator = FeedUserSerializer()
tags = TagListSerializerField()
is_liked = serializers.SerializerMethodField()
class Meta:
model = models.Image
fields = (
'id',
'file',
'location',
'caption',
'comments',
'likes_count',
'creator',
'tags',
'natural_time',
'is_liked',
)
def get_is_liked(self, obj):
if 'request' in self.context:
request = self.context['request']
try:
models.Like.objects.get(creator__id=request.user.id, img__id=obj.id)
return True
except models.Like.DoesNotExist:
return False
return False
class UserProfileImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'id',
'file',
'likes_count',
'comments_count',
)
class SmallImage(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'id',
'file'
)
class InputImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'file',
'location',
'caption',
)
```
#### File: jin2gram/images/views.py
```python
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from . import models, serializers
from jin2gram.users import models as user_models
from jin2gram.users import serializers as user_serializers
from jin2gram.notifications import views as notification_views
class Images(APIView):
def get(self, request, format=None):
user = request.user
following_users = user.following.all()
image_list = []
# 내가 follow하는 사람들의 이미지
for following_users in following_users:
# 2개의 이미지만 가져온다
user_image = following_users.images.all()[:2]
for image in user_image:
image_list.append(image)
# 내가 올린 이미지
my_images = user.images.all()[:2]
for image in my_images:
image_list.append(image)
# sorted_list = sorted(image_list, key=get_key, reverse=True)
# 람다식
sorted_list = sorted(image_list, key=lambda image: image.created_at, reverse=True)
serializer = serializers.ImageSerializer(sorted_list, many=True, context={'request': request})
return Response(serializer.data)
def post(self, request, format=None):
user = request.user
serializer = serializers.InputImageSerializer(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.data, status=status.HTTP_400_BAD_REQUEST)
# def get_key(image):
# return image.created_at
class LikeImage(APIView):
def get(self, request, image_id, format=None):
likes = models.Like.objects.filter(img__id=image_id)
like_creator_ids = likes.values('creator_id')
users = user_models.User.objects.filter(id__in=like_creator_ids)
serializer = user_serializers.ListUserSerializer(users, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
img=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
img=found_image
)
new_like.save()
notification_views.create_notification(user, found_image.creator, 'like', found_image)
return Response(status=status.HTTP_201_CREATED)
class UnLikeImage(APIView):
def delete(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
preexisiting_like = models.Like.objects.get(
creator=user,
img=found_image
)
preexisiting_like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Like.DoesNotExist:
new_like = models.Like.objects.create(
creator=user,
img=found_image
)
return Response(status=status.HTTP_304_NOT_MODIFIED)
class CommentImage(APIView):
def post(self, request, image_id, format=None):
user = request.user
try:
found_image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(
creator=user,
img=found_image
)
# notification_views.create_notification(user, found_image.creator, 'comment', found_image, request.data['message'])
notification_views.create_notification(
user, found_image.creator, 'comment', found_image, serializer.data['message'])
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Comment(APIView):
def delete(self, request, comment_id, format=None):
user = request.user
try:
commnet = models.Comment.objects.get(id=comment_id, creator=user)
commnet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
class ModerateComment(APIView):
# 내가 업로드한 이미지에 달린 댓글을 삭제할 때
def delete(self, request, image_id, comment_id, format=None):
user = request.user
try:
delete_comment = models.Comment.objects.get(id=comment_id, img__id=image_id, img__creator=user)
delete_comment.delete()
except models.Comment.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_204_NO_CONTENT)
class Search(APIView):
def get(self, request, format=None):
hashtags = request.query_params.get('hashtags', None)
if hashtags is not None:
hashtags = hashtags.split(',')
images = models.Image.objects.filter(tags__name__in=hashtags).distinct()
serializer = serializers.UserProfileImageSerializer(images, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class DetailImage(APIView):
def found_own_image(self, image_id, user):
try:
image = models.Image.objects.get(id=image_id, creator=user)
return image
except models.Image.DoesNotExist:
return None
def get(self, request, image_id, format=None):
try:
image = models.Image.objects.get(id=image_id)
except models.Image.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.ImageSerializer(image, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, requeset, image_id, format=None):
user = requeset.user
image = self.found_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.InputImageSerializer(image, data=requeset.data, partial=True)
if serializer.is_valid():
serializer.save(creator=user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, image_id, format=None):
user = request.user
image = self.found_own_image(image_id, user)
if image is None:
return Response(status=status.HTTP_400_BAD_REQUEST)
image.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
``` |
{
"source": "jin530/pytorch-widedeep",
"score": 3
} |
#### File: pytorch_widedeep/utils/text_utils.py
```python
import numpy as np
import os
from ..wdtypes import *
from .fastai_transforms import Tokenizer, Vocab
from gensim.utils import tokenize
__all__ = ["simple_preprocess", "get_texts", "pad_sequences", "build_embeddings_matrix"]
def simple_preprocess(
doc: str,
lower: bool = False,
deacc: bool = False,
min_len: int = 2,
max_len: int = 15,
) -> List[str]:
r"""
Gensim's simple_preprocess adding a 'lower' param to indicate wether or not to
lower case all the token in the texts
For more informations see: https://radimrehurek.com/gensim/utils.html
"""
tokens = [
token
for token in tokenize(doc, lower=False, deacc=deacc, errors="ignore")
if min_len <= len(token) <= max_len and not token.startswith("_")
]
return tokens
def get_texts(texts: List[str]) -> List[List[str]]:
r"""
Uses fastai's Tokenizer because it does a series of very convenients things
during the tokenization process
See here: https://docs.fast.ai/text.transform.html#Tokenizer
"""
processed_textx = [" ".join(simple_preprocess(t)) for t in texts]
tok = Tokenizer().process_all(processed_textx)
return tok
def pad_sequences(
seq: List[int], maxlen: int, pad_first: bool = True, pad_idx: int = 1
) -> List[List[int]]:
r"""
Given a List of tokenized and 'numericalised' sequences it will return padded sequences
according to the input parameters maxlen, pad_first and pad_idx
Parameters
----------
seq: List
List of int tokens
maxlen: Int
Maximum length of the padded sequences
pad_first: Boolean. Default=True
Indicates whether the padding index will be added at the beginning or the
end of the sequences
pad_idx: Int. Default=1
padding index. Fastai's Tokenizer leaves 0 for the 'unknown' token.
Returns:
res: List
Padded sequences
"""
if len(seq) >= maxlen:
res = np.array(seq[-maxlen:]).astype("int32")
return res
else:
res = np.zeros(maxlen, dtype="int32") + pad_idx
if pad_first:
res[-len(seq) :] = seq
else:
res[: len(seq) :] = seq
return res
def build_embeddings_matrix(
vocab: Vocab, word_vectors_path: str, min_freq: int, verbose: int = 1
) -> np.ndarray:
r"""
Build the embedding matrix using pretrained word vectors
Parameters
----------
vocab: Fastai's Vocab object
see: https://docs.fast.ai/text.transform.html#Vocab
word_vectors_path:str
path to the pretrained word embeddings
min_freq: Int
minimum frequency required for a word to be in the vocabulary
verbose: Int. Default=1
Returns
-------
embedding_matrix: np.ndarray
pretrained word embeddings. If a word in our vocabulary is not among the
pretrained embeddings it will be assigned the mean pretrained
word-embeddings vector
"""
if not os.path.isfile(word_vectors_path):
raise FileNotFoundError("{} not found".format(word_vectors_path))
if verbose:
print("Indexing word vectors...")
embeddings_index = {}
f = open(word_vectors_path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
if verbose:
print("Loaded {} word vectors".format(len(embeddings_index)))
print("Preparing embeddings matrix...")
mean_word_vector = np.mean(list(embeddings_index.values()), axis=0)
embedding_dim = len(list(embeddings_index.values())[0])
num_words = len(vocab.itos)
embedding_matrix = np.zeros((num_words, embedding_dim))
found_words = 0
for i, word in enumerate(vocab.itos):
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
found_words += 1
else:
embedding_matrix[i] = mean_word_vector
if verbose:
print(
"{} words in the vocabulary had {} vectors and appear more than {} times".format(
found_words, word_vectors_path, min_freq
)
)
return embedding_matrix.astype("float32")
```
#### File: tests/test_data_utils/test_du_deep_dense.py
```python
import numpy as np
import pandas as pd
import pytest
from pytorch_widedeep.utils.dense_utils import label_encoder
from pytorch_widedeep.preprocessing import DeepPreprocessor
def create_test_dataset(input_type, input_type_2=None):
df = pd.DataFrame()
col1 = list(np.random.choice(input_type, 3))
if input_type_2 is not None:
col2 = list(np.random.choice(input_type_2, 3))
else:
col2 = list(np.random.choice(input_type, 3))
df["col1"], df["col2"] = col1, col2
return df
some_letters = ["a", "b", "c", "d", "e"]
some_numbers = [1, 2, 3, 4, 5]
df_letters = create_test_dataset(some_letters)
df_numbers = create_test_dataset(some_numbers)
###############################################################################
# Simple test of functionality: testing the label_encoder function
###############################################################################
df_letters_le, letters_enconding_dict = label_encoder(df_letters, ["col1", "col2"])
df_numbers_le, numbers_enconding_dict = label_encoder(df_numbers, ["col1", "col2"])
@pytest.mark.parametrize(
"input_df, encoding_dict, output_df",
[
(df_letters, letters_enconding_dict, df_letters_le),
(df_numbers, numbers_enconding_dict, df_numbers_le),
],
)
def test_label_encoder(input_df, encoding_dict, output_df):
tmp_df = input_df.copy()
for c in input_df.columns:
tmp_df[c] = tmp_df[c].map(encoding_dict[c])
assert tmp_df.equals(output_df)
################################################################################
# Same as before but testing functioning when passed a custom encoding dict
###############################################################################
encoding_dict_1 = {
c: {k: v for v, k in enumerate(sorted(df_letters[c].unique()))}
for c in df_letters.columns
}
encoding_dict_2 = {
c: {k: v for v, k in enumerate(sorted(df_numbers[c].unique()))}
for c in df_numbers.columns
}
df_letters_le, letters_enconding_dict = label_encoder(
df_letters, cols=["col1", "col2"], val_to_idx=encoding_dict_1
)
df_numbers_le, numbers_enconding_dict = label_encoder(
df_numbers, cols=["col1", "col2"], val_to_idx=encoding_dict_2
)
@pytest.mark.parametrize(
"input_df, encoding_dict, output_df",
[
(df_letters, encoding_dict_1, df_letters_le),
(df_numbers, encoding_dict_2, df_numbers_le),
],
)
def test_label_encoder_with_custom_encoder(input_df, encoding_dict, output_df):
tmp_df = input_df.copy()
for c in input_df.columns:
tmp_df[c] = tmp_df[c].map(encoding_dict[c])
assert tmp_df.equals(output_df)
################################################################################
# Test the DeepPreprocessor: only categorical columns to be represented with
# embeddings
###############################################################################
cat_embed_cols = [("col1", 5), ("col2", 5)]
preprocessor1 = DeepPreprocessor(cat_embed_cols)
X_letters = preprocessor1.fit_transform(df_letters)
embed_input_letters = preprocessor1.embeddings_input
decoding_dict_letters = {
c: {k: v for v, k in preprocessor1.encoding_dict[c].items()}
for c in preprocessor1.encoding_dict.keys()
}
preprocessor2 = DeepPreprocessor(cat_embed_cols)
X_numbers = preprocessor2.fit_transform(df_numbers)
embed_input_numbers = preprocessor2.embeddings_input
decoding_dict_numbers = {
c: {k: v for v, k in preprocessor2.encoding_dict[c].items()}
for c in preprocessor2.encoding_dict.keys()
}
errors = []
@pytest.mark.parametrize(
"input_df, X_deep, embed_input, decoding_dict, error_list",
[
(df_letters, X_letters, embed_input_letters, decoding_dict_letters, errors),
(df_numbers, X_numbers, embed_input_numbers, decoding_dict_numbers, errors),
],
)
def test_prepare_deep_without_continous_columns(
input_df, X_deep, embed_input, decoding_dict, error_list
):
for i, c in enumerate(input_df.columns):
if (
input_df[c].nunique() != embed_input[i][1]
or cat_embed_cols[i][1] != embed_input[i][2]
):
error_list.append(
"error: the setup output does not match the intended input"
)
tmp_df = pd.DataFrame({"col1": X_deep[:, 0], "col2": X_deep[:, 1]})
for c in input_df.columns:
tmp_df[c] = tmp_df[c].map(decoding_dict[c])
if not tmp_df.equals(input_df):
error_list.append("error: the decoding does not match the encoding")
assert not error_list, "errors occured:\n{}".format("\n".join(error_list))
################################################################################
# Test the DeepPreprocessor: only continouos columns
###############################################################################
def test_prepare_deep_without_embedding_columns():
errors = []
df_randint = pd.DataFrame(np.random.choice(np.arange(100), (100, 2)))
df_randint.columns = ["col1", "col2"]
preprocessor3 = DeepPreprocessor(continuous_cols=["col1", "col2"])
try:
X_randint = preprocessor3.fit_transform(df_randint)
except:
errors.append("Fundamental Error")
out_booleans = []
means, stds = np.mean(X_randint, axis=0), np.std(X_randint, axis=0)
for mean, std in zip(means, stds):
out_booleans.append(np.isclose(mean, 0.0))
out_booleans.append(np.isclose(std, 1.0))
if not np.all(out_booleans):
errors.append("There is something going on with the scaler")
assert not errors, "errors occured:\n{}".format("\n".join(errors))
```
#### File: tests/test_data_utils/test_du_deep_image.py
```python
import numpy as np
import pandas as pd
import os
from pytorch_widedeep.preprocessing import ImagePreprocessor
full_path = os.path.realpath(__file__)
path = os.path.split(full_path)[0]
df = pd.DataFrame({"galaxies": ["galaxy1.png", "galaxy2.png"]})
img_col = "galaxies"
imd_dir = os.path.join(path, "images")
processor = ImagePreprocessor(img_col=img_col, img_path=imd_dir)
X_imgs = processor.fit_transform(df)
###############################################################################
# There is not much to test here, since I only resize.
###############################################################################
def test_sizes():
img_width = X_imgs.shape[1]
img_height = X_imgs.shape[2]
assert np.all((img_width == processor.width, img_height == processor.height))
``` |
{
"source": "jin530/spotify_recSys_challenge_2018",
"score": 3
} |
#### File: jin530/spotify_recSys_challenge_2018/data_generator.py
```python
import sys
import json
import os
import numpy as np
import argparse
from utils.spotify_reader import *
def fullpaths_generator(path):
filenames = os.listdir(path)
fullpaths = []
for filename in filenames:
fullpath = os.sep.join((path, filename))
fullpaths.append(fullpath)
return fullpaths
if __name__ == '__main__':
args = argparse.ArgumentParser(description="args")
args.add_argument('--datadir', type=str, default='./data', help="directory where the outputs are stored")
args.add_argument('--mpd_tr', type=str, default='./mpd_train', help="train mpd path")
args.add_argument('--mpd_te', type=str, default='./mpd_test', help="test mpd path")
args.add_argument('--mpd_ch', type=str, default='./challenge', help="challenge set path")
args.add_argument('--mincount_trk', type=int, default=5, help='minimum count of tracks')
args.add_argument('--mincount_art', type=int, default=3, help='minimum count of artists')
args.add_argument('--divide_ch', type=str, default='0-1,5,10-100,25-100r')
args = args.parse_args()
train_fullpaths = fullpaths_generator(args.mpd_tr)
train_fold = Spotify_train(train_fullpaths, args.mincount_trk, args.mincount_art, True, args.datadir)
if args.mpd_te != 'NULL':
test_fullpaths = fullpaths_generator(args.mpd_te)
for test_seed_num in [0, 1, 5, 10, 25, 100]:
test_fold = Spotify_test(test_fullpaths, args.datadir+'/train', test_seed_num, args.datadir, False)
del test_fold
for test_seed_num in [25, 100]:
test_fold = Spotify_test(test_fullpaths, args.datadir+'/train', test_seed_num, args.datadir, True)
del test_fold
if args.mpd_ch != 'NULL':
challenge_fullpaths = fullpaths_generator(args.mpd_ch)
divide_ranges = [rg for rg in args.divide_ch.split(',')]
for rg in divide_ranges:
is_in_order = True
if 'r' in rg:
is_in_order = False
rg = rg.replace("r","")
from_to = [int(num) for num in rg.split('-')]
from_to = list(range(from_to[0],from_to[-1]+1))
challange_fold = Spotify_challenge(challenge_fullpaths, args.datadir + '/train',
args.datadir, from_to, is_in_order)
```
#### File: spotify_recSys_challenge_2018/utils/metrics.py
```python
import math
import numpy as np
def get_r_precision(answer, cand):
set_answer = set(answer)
r = len(set_answer&set(cand[:len(answer)])) / len(answer)
return r
def get_ndcg(answer, cand):
cand_len = len(cand)
idcg=1
idcg_idx=2
dcg=0
if cand[0] in answer: dcg=1
for i in range(1,cand_len):
if cand[i] in answer:
dcg += (1/math.log(i+1,2))
idcg += (1/math.log(idcg_idx,2))
idcg_idx+=1
return dcg/idcg
def get_rsc(answer, cand):
cand_len = len(cand)
for i in range(cand_len):
if cand[i] in answer:
return i//10
return 51
def get_metrics(answer,cand):
r_precision = get_r_precision(answer,cand)
ndcg = get_ndcg(answer,cand)
rsc = get_rsc(answer,cand)
return r_precision,ndcg,rsc
def single_eval(scores, seed, answer):
cand = np.argsort(-1*scores)
cand = cand.tolist()
#print("sort:",np.sort(-1*scores)[:10])
#print("cand:",cand[:10])
for i in seed:
try:
cand.remove(i)
except:
pass
cand = cand[:500]
rprecision, ndcg, rsc = get_metrics(answer,cand)
return rprecision,ndcg,rsc
``` |
{
"source": "jina-ai/benchmark",
"score": 2
} |
#### File: benchmark/src/document_array_append.py
```python
import pytest
from faker import Faker
from jina import Document, DocumentArray, DocumentArrayMemmap
from .pages import Pages
from .utils.benchmark import benchmark_time
fake = Faker()
Faker.seed(42)
NUM_DOCS = 10000
@pytest.fixture
def docs():
return [Document(text=fake.text()) for _ in range(NUM_DOCS)]
def test_da_append(docs, json_writer):
def _append(da):
for doc in docs:
da.append(doc)
def _setup(**kwargs):
return (), dict(da=DocumentArray())
result = benchmark_time(setup=_setup, func=_append)
json_writer.append(
page=Pages.DA_APPEND,
result=result,
metadata=dict(num_docs_append=NUM_DOCS),
)
@pytest.mark.parametrize('flush', [True, False])
def test_dam_append(docs, flush, json_writer, ephemeral_tmpdir):
def _append(da):
for doc in docs:
da.append(doc, flush=flush)
def _setup(**kwargs):
return (), dict(da=DocumentArrayMemmap(f'{str(ephemeral_tmpdir)}/memmap'))
def _teardown():
import shutil
shutil.rmtree(f'{str(ephemeral_tmpdir)}/memmap')
result = benchmark_time(setup=_setup, func=_append, teardown=_teardown)
json_writer.append(
page=Pages.DA_APPEND,
result=result,
metadata=dict(num_docs_append=NUM_DOCS, flush=flush),
)
```
#### File: benchmark/src/document_array_embeddings.py
```python
import numpy as np
import pytest
from jina import Document, DocumentArray, DocumentArrayMemmap
from .pages import Pages
from .utils.benchmark import benchmark_time
NUM_REPETITIONS = 10
@pytest.mark.parametrize(
'num_docs,num_feat', [(100, 128), (10_000, 128), (10_000, 256)]
)
def test_da_embeddings(num_docs, num_feat, json_writer):
def _setup():
da = DocumentArray(
[Document(embedding=np.random.random(num_feat)) for i in range(num_docs)]
)
return (), dict(da=da)
def _da_embeddings(da):
embeddings = da.embeddings
result = benchmark_time(
setup=_setup,
func=_da_embeddings,
n=NUM_REPETITIONS,
)
json_writer.append(
page=Pages.DA_GET_ATTRIBUTES,
result=result,
metadata=dict(num_docs=num_docs, num_feat=num_feat),
)
@pytest.mark.parametrize(
'num_docs,num_feat', [(100, 128), (10_000, 128), (10_000, 256)]
)
def test_dam_embeddings(num_docs, num_feat, json_writer, ephemeral_tmpdir):
def _setup():
dam = DocumentArrayMemmap((f'{str(ephemeral_tmpdir)}/memmap'))
dam.extend(
[Document(embedding=np.random.rand(num_feat)) for i in range(num_docs)]
)
return (), dict(dam=dam)
def _dam_clear(dam):
dam.clear()
def _teardown():
import shutil
shutil.rmtree(f'{str(ephemeral_tmpdir)}/memmap')
result = benchmark_time(
setup=_setup,
func=_dam_clear,
teardown=_teardown,
n=NUM_REPETITIONS,
)
json_writer.append(
page=Pages.DA_GET_ATTRIBUTES,
result=result,
metadata=dict(num_docs=num_docs, num_feat=num_feat),
)
```
#### File: benchmark/src/document_array_insert.py
```python
import pytest
from jina import Document, DocumentArray
from .pages import Pages
from .utils.benchmark import benchmark_time
NUM_REPETITIONS = 10
@pytest.mark.parametrize('num_docs', [100, 10_000])
def test_da_insert(num_docs, json_writer):
def _setup():
docs = [Document(text=f'doc{i}') for i in range(num_docs)]
da = DocumentArray()
return (), dict(da=da, docs=docs)
def _insert_in_da(da, docs):
for doc in docs:
da.insert(index=0, doc=doc)
result = benchmark_time(
setup=_setup,
func=_insert_in_da,
n=NUM_REPETITIONS,
)
json_writer.append(
page=Pages.DA_INSERT,
result=result,
metadata=dict(num_docs=num_docs),
)
```
#### File: benchmark/src/document_array_sort.py
```python
import random
import string
import pytest
from jina import Document, DocumentArray
from .pages import Pages
from .utils.benchmark import benchmark_time
NUM_REPETITIONS = 25
NUM_DOCS = 1000
CHARS = tuple(string.ascii_uppercase + string.digits)
def _get_docs(num_docs):
return [Document(scores={'cosine': random.random()}) for _ in range(num_docs)]
@pytest.mark.parametrize('num_docs', [100, 100_000])
def test_da_sort(num_docs, json_writer):
def _sort(da):
da.sort(key=lambda x: x.scores['cosine'].value)
def _build_da(**kwargs):
docs = kwargs.get('docs')
da = DocumentArray(docs)
return (), dict(da=da)
result = benchmark_time(
setup=_build_da,
func=_sort,
n=NUM_REPETITIONS,
kwargs=dict(docs=_get_docs(num_docs)),
)
json_writer.append(
page=Pages.DA_SORT,
result=result,
metadata=dict(num_docs=num_docs),
)
```
#### File: benchmark/src/document_matches.py
```python
import pytest
from jina import Document
from .pages import Pages
from .utils.benchmark import benchmark_time
@pytest.mark.parametrize("num_docs", [100, 1000, 10_000])
def test_document_document_matches(num_docs, json_writer):
def _input_docs():
doc = Document(text="d1")
doc.matches = [Document(text=f"d{i}") for i in range(num_docs)]
return ((), {"doc": doc})
def _get_matches(doc):
return doc.matches
result = benchmark_time(setup=_input_docs, func=_get_matches)
json_writer.append(
page=Pages.DOCUMENT_RECURSIVE,
result=result,
metadata=dict(num_docs=num_docs),
)
```
#### File: src/utils/benchmark.py
```python
from collections import namedtuple
from contextlib import ExitStack
from statistics import mean, stdev
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from .profiler import Profiler, merge_profiles
from .timecontext import TimeContext
BenchmarkResult = namedtuple(
'BenchmarkResult', ['mean', 'std', 'iterations', 'profiles']
)
def benchmark_time(
func: Callable[[Any], Any],
n: int = 5,
setup: Optional[Callable[[Any], Optional[Tuple[Iterable, Dict[str, Any]]]]] = None,
teardown: Optional[Callable[[None], None]] = None,
profile_cls: Optional[List[type]] = [],
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
):
"""Get average time and std by benchmarking a function multiple times
:param func: The function to benchmark
:param setup: A setup function that can perform setup before running
the ``func``. It should take as inputs the ``args`` and ``kwargs``
that you provided, and return a tuple of an iterable, which will
be used to provide ``args`` to ``func``, and a dictionary, which
will be used to provide ``kwargs`` to ``func``.
:param teardown: A teardown function that can perform teardown/cleanup after running
the ``func``.
:param profile_cls: A list of the classes that want to be profiled
:param n: Number of repetitions
:param args: Positional arguments to pass to ``func`` (or ``setup``)
:param kwargs: Keyword arguments to pass to ``func`` (or ``setup``)
"""
results = []
args = args if args is not None else ()
kwargs = kwargs if kwargs is not None else {}
profiles_by_cls = {_cls: [] for _cls in profile_cls}
with TimeContext() as test_timer:
while test_timer.time_since_start() < 1e9 or len(results) < n:
if setup is not None:
new_args, new_kwargs = setup(*args, **kwargs)
else:
new_args, new_kwargs = args, kwargs
ctx_manager = ExitStack()
profiles = [ctx_manager.enter_context(Profiler(cls)) for cls in profile_cls]
with ctx_manager:
with TimeContext() as t:
func(*new_args, **new_kwargs)
for p in profiles:
profiles_by_cls[p._cls].append(p.profile)
if teardown is not None:
teardown()
results.append(t.duration)
mean_profiles = []
for profile_cls, profile_list in profiles_by_cls.items():
mean_profiles.append(merge_profiles(profile_list))
m = int(mean(results))
s = int(stdev(results)) if len(results) > 1 else None
print(
f'----> mean_time={round(m,3)}, std_time={round(s,3)}, iterations={len(results)}'
)
return BenchmarkResult(m, s, len(results), mean_profiles)
```
#### File: src/utils/timecontext.py
```python
import time
class TimeContext:
"""Timing a code snippet with a context manager."""
def __enter__(self):
self.start = time.time_ns()
return self
def __exit__(self, typ, value, traceback):
self.duration = self.time_since_start()
def time_since_start(self):
return time.time_ns() - self.start
``` |
{
"source": "jina-ai/bert-as-service",
"score": 3
} |
#### File: client/clip_client/client.py
```python
import mimetypes
import os
import time
import warnings
from typing import (
overload,
TYPE_CHECKING,
Optional,
Union,
Iterator,
Generator,
Iterable,
Dict,
)
from urllib.parse import urlparse
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray, Document
class Client:
def __init__(self, server: str):
"""Create a Clip client object that connects to the Clip server.
Server scheme is in the format of `scheme://netloc:port`, where
- scheme: one of grpc, websocket, http, grpcs, websockets, https
- netloc: the server ip address or hostname
- port: the public port of the server
:param server: the server URI
"""
try:
r = urlparse(server)
_port = r.port
_scheme = r.scheme
if not _scheme:
raise
except:
raise ValueError(f'{server} is not a valid scheme')
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme in ('grpc', 'http', 'websocket'):
_kwargs = dict(host=r.hostname, port=_port, protocol=_scheme, tls=_tls)
from jina import Client
self._client = Client(**_kwargs)
self._async_client = Client(**_kwargs, asyncio=True)
else:
raise ValueError(f'{server} is not a valid scheme')
@overload
def encode(
self,
content: Iterable[str],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'np.ndarray':
"""Encode images and texts into embeddings where the input is an iterable of raw strings.
Each image and text must be represented as a string. The following strings are acceptable:
- local image filepath, will be considered as an image
- remote image http/https, will be considered as an image
- a dataURI, will be considered as an image
- plain text, will be considered as a sentence
:param content: an iterator of image URIs or sentences, each element is an image or a text sentence as a string.
:param batch_size: the number of elements in each request when sending ``content``
:param show_progress: if set, show a progress bar
:return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
"""
...
@overload
def encode(
self,
content: Union['DocumentArray', Iterable['Document']],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'DocumentArray':
"""Encode images and texts into embeddings where the input is an iterable of :class:`docarray.Document`.
:param content: an iterable of :class:`docarray.Document`, each Document must be filled with `.uri`, `.text` or `.blob`.
:param batch_size: the number of elements in each request when sending ``content``
:param show_progress: if set, show a progress bar
:return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
"""
...
def encode(self, content, **kwargs):
if isinstance(content, str):
raise TypeError(
f'content must be an Iterable of [str, Document], try `.encode(["{content}"])` instead'
)
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(content) if hasattr(content, '__len__') else None,
)
with self._pbar:
self._client.post(
**self._get_post_payload(content, kwargs), on_done=self._gather_result
)
return self._unboxed_result
def _gather_result(self, r):
from rich import filesize
if not self._results:
self._pbar.start_task(self._r_task)
r = r.data.docs
self._results.extend(r)
self._pbar.update(
self._r_task,
advance=len(r),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
@property
def _unboxed_result(self):
if self._results.embeddings is None:
raise ValueError(
'empty embedding returned from the server. '
'This often due to a mis-config of the server, '
'restarting the server or changing the serving port number often solves the problem'
)
return self._results.embeddings if self._return_plain else self._results
def _iter_doc(self, content) -> Generator['Document', None, None]:
from rich import filesize
from docarray import Document
self._return_plain = True
if hasattr(self, '_pbar'):
self._pbar.start_task(self._s_task)
for c in content:
if isinstance(c, str):
self._return_plain = True
_mime = mimetypes.guess_type(c)[0]
if _mime and _mime.startswith('image'):
yield Document(uri=c).load_uri_to_blob()
else:
yield Document(text=c)
elif isinstance(c, Document):
if c.content_type in ('text', 'blob'):
self._return_plain = False
yield c
elif not c.blob and c.uri:
c.load_uri_to_blob()
self._return_plain = False
yield c
elif c.tensor is not None:
yield c
else:
raise TypeError(f'unsupported input type {c!r} {c.content_type}')
else:
raise TypeError(f'unsupported input type {c!r}')
if hasattr(self, '_pbar'):
self._pbar.update(
self._s_task,
advance=1,
total_size=str(
filesize.decimal(
int(os.environ.get('JINA_GRPC_SEND_BYTES', '0'))
)
),
)
def _get_post_payload(self, content, kwargs):
return dict(
on='/',
inputs=self._iter_doc(content),
request_size=kwargs.get('batch_size', 8),
total_docs=len(content) if hasattr(content, '__len__') else None,
)
def profile(self, content: Optional[str] = '') -> Dict[str, float]:
"""Profiling a single query's roundtrip including network and computation latency. Results is summarized in a table.
:param content: the content to be sent for profiling. By default it sends an empty Document
that helps you understand the network latency.
:return: the latency report in a dict.
"""
st = time.perf_counter()
r = self._client.post('/', self._iter_doc([content]), return_responses=True)
ed = (time.perf_counter() - st) * 1000
route = r[0].routes
gateway_time = (
route[0].end_time.ToMilliseconds() - route[0].start_time.ToMilliseconds()
)
clip_time = (
route[1].end_time.ToMilliseconds() - route[1].start_time.ToMilliseconds()
)
network_time = ed - gateway_time
server_network = gateway_time - clip_time
from rich.table import Table
def make_table(_title, _time, _percent):
table = Table(show_header=False, box=None)
table.add_row(
_title, f'[b]{_time:.0f}[/b]ms', f'[dim]{_percent * 100:.0f}%[/dim]'
)
return table
from rich.tree import Tree
t = Tree(make_table('Roundtrip', ed, 1))
t.add(make_table('Client-server network', network_time, network_time / ed))
t2 = t.add(make_table('Server', gateway_time, gateway_time / ed))
t2.add(
make_table(
'Gateway-CLIP network', server_network, server_network / gateway_time
)
)
t2.add(make_table('CLIP model', clip_time, clip_time / gateway_time))
from rich import print
print(t)
return {
'Roundtrip': ed,
'Client-server network': network_time,
'Server': gateway_time,
'Gateway-CLIP network': server_network,
'CLIP model': clip_time,
}
@overload
async def aencode(
self,
content: Iterator[str],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'np.ndarray':
...
@overload
async def aencode(
self,
content: Union['DocumentArray', Iterable['Document']],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'DocumentArray':
...
async def aencode(self, content, **kwargs):
from rich import filesize
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(content) if hasattr(content, '__len__') else None,
)
async for da in self._async_client.post(
**self._get_post_payload(content, kwargs)
):
if not self._results:
self._pbar.start_task(self._r_task)
self._results.extend(da)
self._pbar.update(
self._r_task,
advance=len(da),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
return self._unboxed_result
def _prepare_streaming(self, disable, total):
if total is None:
total = 500
warnings.warn(
'the length of the input is unknown, the progressbar would not be accurate.'
)
from docarray.array.mixins.io.pbar import get_pbar
self._pbar = get_pbar(disable)
os.environ['JINA_GRPC_SEND_BYTES'] = '0'
os.environ['JINA_GRPC_RECV_BYTES'] = '0'
self._s_task = self._pbar.add_task(
':arrow_up: Send', total=total, total_size=0, start=False
)
self._r_task = self._pbar.add_task(
':arrow_down: Recv', total=total, total_size=0, start=False
)
from docarray import DocumentArray
self._results = DocumentArray()
@staticmethod
def _prepare_single_doc(d: 'Document'):
if d.content_type in ('text', 'blob'):
return d
elif not d.blob and d.uri:
d.load_uri_to_blob()
return d
elif d.tensor is not None:
return d
else:
raise TypeError(f'unsupported input type {d!r} {d.content_type}')
@staticmethod
def _prepare_rank_doc(d: 'Document', _source: str = 'matches'):
_get = lambda d: getattr(d, _source)
if not _get(d):
raise ValueError(f'`.rank()` requires every doc to have `.{_source}`')
d = Client._prepare_single_doc(d)
setattr(d, _source, [Client._prepare_single_doc(c) for c in _get(d)])
return d
def _iter_rank_docs(
self, content, _source='matches'
) -> Generator['Document', None, None]:
from rich import filesize
from docarray import Document
self._return_plain = True
if hasattr(self, '_pbar'):
self._pbar.start_task(self._s_task)
for c in content:
if isinstance(c, Document):
yield self._prepare_rank_doc(c, _source)
else:
raise TypeError(f'unsupported input type {c!r}')
if hasattr(self, '_pbar'):
self._pbar.update(
self._s_task,
advance=1,
total_size=str(
filesize.decimal(
int(os.environ.get('JINA_GRPC_SEND_BYTES', '0'))
)
),
)
def _get_rank_payload(self, content, kwargs):
return dict(
on='/rank',
inputs=self._iter_rank_docs(
content, _source=kwargs.get('source', 'matches')
),
request_size=kwargs.get('batch_size', 8),
total_docs=len(content) if hasattr(content, '__len__') else None,
)
def rank(self, docs: Iterable['Document'], **kwargs) -> 'DocumentArray':
"""Rank image-text matches according to the server CLIP model.
Given a Document with nested matches, where the root is image/text and the matches is in another modality, i.e.
text/image; this method ranks the matches according to the CLIP model.
Each match now has a new score inside ``clip_score`` and matches are sorted descendingly according to this score.
More details can be found in: https://github.com/openai/CLIP#usage
:param docs: the input Documents
:return: the ranked Documents in a DocumentArray.
"""
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(docs),
)
with self._pbar:
self._client.post(
**self._get_rank_payload(docs, kwargs), on_done=self._gather_result
)
return self._results
async def arank(self, docs: Iterable['Document'], **kwargs) -> 'DocumentArray':
from rich import filesize
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(docs),
)
async for da in self._async_client.post(**self._get_rank_payload(docs, kwargs)):
if not self._results:
self._pbar.start_task(self._r_task)
self._results.extend(da)
self._pbar.update(
self._r_task,
advance=len(da),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
return self._results
```
#### File: clip_server/executors/clip_onnx.py
```python
import os
import warnings
from multiprocessing.pool import ThreadPool
from typing import Optional, Dict
import onnxruntime as ort
from clip_server.executors.helper import (
split_img_txt_da,
preproc_image,
preproc_text,
set_rank,
)
from clip_server.model import clip
from clip_server.model.clip_onnx import CLIPOnnxModel
from jina import Executor, requests, DocumentArray, monitor
class CLIPEncoder(Executor):
def __init__(
self,
name: str = 'ViT-B/32',
device: Optional[str] = None,
num_worker_preprocess: int = 4,
minibatch_size: int = 16,
**kwargs,
):
super().__init__(**kwargs)
self._preprocess_tensor = clip._transform_ndarray(clip.MODEL_SIZE[name])
self._pool = ThreadPool(processes=num_worker_preprocess)
self._minibatch_size = minibatch_size
self._model = CLIPOnnxModel(name)
import torch
if not device:
self._device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
self._device = device
# define the priority order for the execution providers
providers = ['CPUExecutionProvider']
# prefer CUDA Execution Provider over CPU Execution Provider
if self._device.startswith('cuda'):
providers.insert(0, 'CUDAExecutionProvider')
sess_options = ort.SessionOptions()
# Enables all available optimizations including layout optimizations
sess_options.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_ALL
)
if not self._device.startswith('cuda') and (
'OMP_NUM_THREADS' not in os.environ
and hasattr(self.runtime_args, 'replicas')
):
replicas = getattr(self.runtime_args, 'replicas', 1)
num_threads = max(1, torch.get_num_threads() // replicas)
if num_threads < 2:
warnings.warn(
f'Too many replicas ({replicas}) vs too few threads {num_threads} may result in '
f'sub-optimal performance.'
)
# Run the operators in the graph in parallel (not support the CUDA Execution Provider)
sess_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL
# The number of threads used to parallelize the execution of the graph (across nodes)
sess_options.inter_op_num_threads = 1
sess_options.intra_op_num_threads = max(num_threads, 1)
self._model.start_sessions(sess_options=sess_options, providers=providers)
@monitor(name='preprocess_images_seconds')
def _preproc_images(self, docs: 'DocumentArray'):
return preproc_image(
docs, preprocess_fn=self._preprocess_tensor, return_np=True
)
@monitor(name='preprocess_texts_seconds')
def _preproc_texts(self, docs: 'DocumentArray'):
return preproc_text(docs, return_np=True)
@monitor(name='encode_images_seconds')
def _encode_images(self, docs: 'DocumentArray'):
docs.embeddings = self._model.encode_image(docs.tensors)
@monitor(name='encode_texts_seconds')
def _encode_texts(self, docs: 'DocumentArray'):
docs.embeddings = self._model.encode_text(docs.tensors)
@requests(on='/rank')
async def rank(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
await self.encode(docs['@r,m'])
set_rank(docs)
@requests
async def encode(self, docs: 'DocumentArray', **kwargs):
_img_da = DocumentArray()
_txt_da = DocumentArray()
for d in docs:
split_img_txt_da(d, _img_da, _txt_da)
# for image
if _img_da:
for minibatch, _contents in _img_da.map_batch(
self._preproc_images,
batch_size=self._minibatch_size,
pool=self._pool,
):
self._encode_images(minibatch)
# recover original content
try:
_ = iter(_contents)
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
except TypeError:
pass
# for text
if _txt_da:
for minibatch, _contents in _txt_da.map_batch(
self._preproc_texts,
batch_size=self._minibatch_size,
pool=self._pool,
):
self._encode_texts(minibatch)
# recover original content
try:
_ = iter(_contents)
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
except TypeError:
pass
# drop tensors
docs.tensors = None
return docs
```
#### File: clip_server/executors/helper.py
```python
from typing import Tuple, List, Callable, Any
import numpy as np
from docarray import Document, DocumentArray
from docarray.math.distance.numpy import cosine
from clip_server.model import clip
def numpy_softmax(x: 'np.ndarray', axis: int = -1) -> 'np.ndarray':
max = np.max(x, axis=axis, keepdims=True)
e_x = np.exp(x - max)
div = np.sum(e_x, axis=axis, keepdims=True)
f_x = e_x / div
return f_x
def preproc_image(
da: 'DocumentArray',
preprocess_fn: Callable,
device: str = 'cpu',
return_np: bool = False,
) -> Tuple['DocumentArray', List[Any]]:
contents = da.contents
for d in da:
if d.blob:
d.convert_blob_to_image_tensor()
elif d.tensor is None and d.uri:
# in case user uses HTTP protocol and send data via curl not using .blob (base64), but in .uri
d.load_uri_to_image_tensor()
d.tensor = preprocess_fn(d.tensor).detach()
if return_np:
da.tensors = da.tensors.cpu().numpy().astype(np.float32)
else:
da.tensors = da.tensors.to(device)
return da, contents
def preproc_text(
da: 'DocumentArray', device: str = 'cpu', return_np: bool = False
) -> Tuple['DocumentArray', List[Any]]:
contents = da.contents
da.tensors = clip.tokenize(contents).detach()
if return_np:
da.tensors = da.tensors.cpu().numpy().astype(np.int64)
else:
da.tensors = da.tensors.to(device)
da[:, 'mime_type'] = 'text'
return da, contents
def split_img_txt_da(doc: 'Document', img_da: 'DocumentArray', txt_da: 'DocumentArray'):
if doc.uri:
img_da.append(doc)
elif doc.blob or (doc.tensor is not None):
img_da.append(doc)
elif doc.text:
txt_da.append(doc)
def set_rank(docs, _logit_scale=np.exp(4.60517)):
queries = docs
candidates = docs['@m']
query_embeddings = queries.embeddings # Q X D
candidate_embeddings = candidates.embeddings # C = Sum(C_q1, C_q2, C_q3,...) x D
cosine_scores = 1 - cosine(
query_embeddings, candidate_embeddings
) # Q x C Block matix
start_idx = 0
for q, _cosine_scores in zip(docs, cosine_scores):
_candidates = q.matches
end_idx = start_idx + len(_candidates)
_candidate_cosines = _cosine_scores[start_idx:end_idx]
_candidate_softmaxs = numpy_softmax(_logit_scale * _candidate_cosines)
for c, _c_score, _s_score in zip(
_candidates, _candidate_cosines, _candidate_softmaxs
):
c.scores['clip_score'].value = _s_score
c.scores['clip_score'].op_name = 'softmax'
c.scores['clip_score_cosine'].value = _c_score
c.scores['clip_score_cosine'].op_name = 'cosine'
start_idx = end_idx
_candidates.embeddings = None # remove embedding to save bandwidth
final = sorted(
_candidates, key=lambda _m: _m.scores['clip_score'].value, reverse=True
)
q.matches = final
```
#### File: clip_server/model/clip_onnx.py
```python
import os
from .clip import _download, available_models
_S3_BUCKET = 'https://clip-as-service.s3.us-east-2.amazonaws.com/models/onnx/'
_MODELS = {
'RN50': ('RN50/textual.onnx', 'RN50/visual.onnx'),
'RN101': ('RN101/textual.onnx', 'RN101/visual.onnx'),
'RN50x4': ('RN50x4/textual.onnx', 'RN50x4/visual.onnx'),
'RN50x16': ('RN50x16/textual.onnx', 'RN50x16/visual.onnx'),
'RN50x64': ('RN50x64/textual.onnx', 'RN50x64/visual.onnx'),
'ViT-B/32': ('ViT-B-32/textual.onnx', 'ViT-B-32/visual.onnx'),
'ViT-B/16': ('ViT-B-16/textual.onnx', 'ViT-B-16/visual.onnx'),
'ViT-L/14': ('ViT-L-14/textual.onnx', 'ViT-L-14/visual.onnx'),
'ViT-L/14@336px': ('ViT-L-14@336px/textual.onnx', 'ViT-L-14@336px/visual.onnx'),
}
class CLIPOnnxModel:
def __init__(self, name: str = None):
if name in _MODELS:
cache_dir = os.path.expanduser(f'~/.cache/clip/{name.replace("/", "-")}')
self._textual_path = _download(
_S3_BUCKET + _MODELS[name][0], cache_dir, with_resume=True
)
self._visual_path = _download(
_S3_BUCKET + _MODELS[name][1], cache_dir, with_resume=True
)
else:
raise RuntimeError(
f'Model {name} not found; available models = {available_models()}'
)
def start_sessions(
self,
**kwargs,
):
import onnxruntime as ort
self._visual_session = ort.InferenceSession(self._visual_path, **kwargs)
self._visual_session.disable_fallback()
self._textual_session = ort.InferenceSession(self._textual_path, **kwargs)
self._textual_session.disable_fallback()
def encode_image(self, onnx_image):
onnx_input_image = {self._visual_session.get_inputs()[0].name: onnx_image}
(visual_output,) = self._visual_session.run(None, onnx_input_image)
return visual_output
def encode_text(self, onnx_text):
onnx_input_text = {self._textual_session.get_inputs()[0].name: onnx_text}
(textual_output,) = self._textual_session.run(None, onnx_input_text)
return textual_output
```
#### File: clip_server/model/clip_trt.py
```python
import os
try:
import tensorrt as trt
from tensorrt.tensorrt import Logger, Runtime
from clip_server.model.trt_utils import load_engine, build_engine, save_engine
except ImportError:
raise ImportError(
"It seems that TensorRT is not yet installed. "
"It is required when you declare TensorRT backend."
"Please find installation instruction on "
"https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html"
)
from .clip import _download, MODEL_SIZE
_S3_BUCKET = 'https://clip-as-service.s3.us-east-2.amazonaws.com/models/tensorrt/'
_MODELS = {
'RN50': ('RN50/textual.trt', 'RN50/visual.trt'),
'RN101': ('RN101/textual.trt', 'RN101/visual.trt'),
'RN50x4': ('RN50x4/textual.trt', 'RN50x4/visual.trt'),
# 'RN50x16': ('RN50x16/textual.trt', 'RN50x16/visual.trt'),
# 'RN50x64': ('RN50x64/textual.trt', 'RN50x64/visual.trt'),
'ViT-B/32': ('ViT-B-32/textual.trt', 'ViT-B-32/visual.trt'),
'ViT-B/16': ('ViT-B-16/textual.trt', 'ViT-B-16/visual.trt'),
'ViT-L/14': ('ViT-L-14/textual.trt', 'ViT-L-14/visual.trt'),
}
class CLIPTensorRTModel:
def __init__(
self,
name: str = None,
):
if name in _MODELS:
cache_dir = os.path.expanduser(f'~/.cache/clip/{name.replace("/", "-")}')
self._textual_path = _download(_S3_BUCKET + _MODELS[name][0], cache_dir)
self._visual_path = _download(_S3_BUCKET + _MODELS[name][1], cache_dir)
else:
raise RuntimeError(
f'Model {name} not found or not supports Nvidia TensorRT backend; available models = {list(_MODELS.keys())}'
)
self._name = name
def start_engines(self):
import torch
trt_logger: Logger = trt.Logger(trt.Logger.ERROR)
runtime: Runtime = trt.Runtime(trt_logger)
compute_capacity = torch.cuda.get_device_capability()
if compute_capacity != (8, 6):
print(
f'The engine plan file is generated on an incompatible device, expecting compute {compute_capacity} '
'got compute 8.6, will rebuild the TensorRT engine.'
)
from clip_server.model.clip_onnx import CLIPOnnxModel
onnx_model = CLIPOnnxModel(self._name)
visual_engine = build_engine(
runtime=runtime,
onnx_file_path=onnx_model._visual_path,
logger=trt_logger,
min_shape=(1, 3, MODEL_SIZE[self._name], MODEL_SIZE[self._name]),
optimal_shape=(
768,
3,
MODEL_SIZE[self._name],
MODEL_SIZE[self._name],
),
max_shape=(
1024,
3,
MODEL_SIZE[self._name],
MODEL_SIZE[self._name],
),
workspace_size=10000 * 1024 * 1024,
fp16=False,
int8=False,
)
save_engine(visual_engine, self._visual_path)
text_engine = build_engine(
runtime=runtime,
onnx_file_path=onnx_model._textual_path,
logger=trt_logger,
min_shape=(1, 77),
optimal_shape=(768, 77),
max_shape=(1024, 77),
workspace_size=10000 * 1024 * 1024,
fp16=False,
int8=False,
)
save_engine(text_engine, self._textual_path)
self._textual_engine = load_engine(runtime, self._textual_path)
self._visual_engine = load_engine(runtime, self._visual_path)
def encode_image(self, onnx_image):
(visual_output,) = self._visual_engine({'input': onnx_image})
return visual_output
def encode_text(self, onnx_text):
(textual_output,) = self._textual_engine({'input': onnx_text})
return textual_output
``` |
{
"source": "jina-ai/clip-as-service",
"score": 2
} |
#### File: clip-as-service/tests/test_ranker.py
```python
import os
import pytest
from clip_client import Client
from clip_server.executors.clip_torch import CLIPEncoder as TorchCLIPEncoder
from clip_server.executors.clip_onnx import CLIPEncoder as ONNXCLILPEncoder
from docarray import DocumentArray, Document
@pytest.mark.asyncio
@pytest.mark.parametrize('encoder_class', [TorchCLIPEncoder, ONNXCLILPEncoder])
async def test_torch_executor_rank_img2texts(encoder_class):
ce = encoder_class()
da = DocumentArray.from_files(
f'{os.path.dirname(os.path.abspath(__file__))}/**/*.jpg'
)
for d in da:
d.matches.append(Document(text='hello, world!'))
d.matches.append(Document(text='goodbye, world!'))
await ce.rank(da, {})
print(da['@m', 'scores__clip_score__value'])
for d in da:
for c in d.matches:
assert c.scores['clip_score'].value is not None
@pytest.mark.asyncio
@pytest.mark.parametrize('encoder_class', [TorchCLIPEncoder, ONNXCLILPEncoder])
async def test_torch_executor_rank_text2imgs(encoder_class):
ce = encoder_class()
db = DocumentArray(
[Document(text='hello, world!'), Document(text='goodbye, world!')]
)
for d in db:
d.matches.extend(
DocumentArray.from_files(
f'{os.path.dirname(os.path.abspath(__file__))}/**/*.jpg'
)
)
await ce.rank(db, {})
print(db['@m', 'scores__clip_score__value'])
for d in db:
for c in d.matches:
assert c.scores['clip_score'].value is not None
@pytest.mark.parametrize(
'd',
[
Document(
uri='https://docarray.jina.ai/_static/favicon.png',
matches=[Document(text='hello, world'), Document(text='goodbye, world')],
),
Document(
text='hello, world',
matches=[
Document(uri='https://docarray.jina.ai/_static/favicon.png'),
Document(
uri=f'{os.path.dirname(os.path.abspath(__file__))}/img/00000.jpg'
),
],
),
],
)
def test_docarray_inputs(make_flow, d):
c = Client(server=f'grpc://0.0.0.0:{make_flow.port}')
r = c.rank([d])
assert isinstance(r, DocumentArray)
rv = r['@m', 'scores__clip_score__value']
for v in rv:
assert v is not None
assert v > 0
@pytest.mark.parametrize(
'd',
[
Document(
uri='https://docarray.jina.ai/_static/favicon.png',
matches=[Document(text='hello, world'), Document(text='goodbye, world')],
),
Document(
text='hello, world',
matches=[
Document(uri='https://docarray.jina.ai/_static/favicon.png'),
Document(
uri=f'{os.path.dirname(os.path.abspath(__file__))}/img/00000.jpg'
),
],
),
],
)
@pytest.mark.asyncio
async def test_async_arank(make_flow, d):
c = Client(server=f'grpc://0.0.0.0:{make_flow.port}')
r = await c.arank([d])
assert isinstance(r, DocumentArray)
rv = r['@m', 'scores__clip_score__value']
for v in rv:
assert v is not None
assert v > 0
```
#### File: clip-as-service/tests/test_tensorrt.py
```python
import os
import pytest
from docarray import Document, DocumentArray
from jina import Flow
from clip_client.client import Client
@pytest.mark.gpu
@pytest.mark.parametrize(
'inputs',
[
[Document(text='hello, world'), Document(text='goodbye, world')],
DocumentArray([Document(text='hello, world'), Document(text='goodbye, world')]),
lambda: (Document(text='hello, world') for _ in range(10)),
DocumentArray(
[
Document(uri='https://docarray.jina.ai/_static/favicon.png'),
Document(
uri=f'{os.path.dirname(os.path.abspath(__file__))}/img/00000.jpg'
),
Document(text='hello, world'),
Document(
uri=f'{os.path.dirname(os.path.abspath(__file__))}/img/00000.jpg'
).load_uri_to_image_tensor(),
]
),
DocumentArray.from_files(
f'{os.path.dirname(os.path.abspath(__file__))}/**/*.jpg'
),
],
)
def test_docarray_inputs(make_trt_flow, inputs):
c = Client(server=f'grpc://0.0.0.0:{make_trt_flow.port}')
r = c.encode(inputs if not callable(inputs) else inputs())
assert isinstance(r, DocumentArray)
assert r.embeddings.shape
@pytest.mark.gpu
@pytest.mark.asyncio
@pytest.mark.parametrize(
'd',
[
Document(
uri='https://docarray.jina.ai/_static/favicon.png',
matches=[Document(text='hello, world'), Document(text='goodbye, world')],
),
Document(
text='hello, world',
matches=[
Document(uri='https://docarray.jina.ai/_static/favicon.png'),
Document(
uri=f'{os.path.dirname(os.path.abspath(__file__))}/img/00000.jpg'
),
],
),
],
)
async def test_async_arank(make_trt_flow, d):
c = Client(server=f'grpc://0.0.0.0:{make_trt_flow.port}')
r = await c.arank([d])
assert isinstance(r, DocumentArray)
rv = r['@m', 'scores__clip_score__value']
for v in rv:
assert v is not None
assert v > 0
``` |
{
"source": "jina-ai/docarray",
"score": 2
} |
#### File: storage/elastic/backend.py
```python
import copy
import uuid
from dataclasses import dataclass, field
from typing import (
Dict,
Optional,
TYPE_CHECKING,
Union,
List,
Iterable,
Any,
Mapping,
)
import numpy as np
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from ..base.backend import BaseBackendMixin
from .... import Document
from ....helper import dataclass_from_dict
if TYPE_CHECKING:
from ....typing import (
DocumentArraySourceType,
)
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class ElasticConfig:
n_dim: int # dims in elastic
distance: str = 'cosine' # similarity in elastic
hosts: Union[
str, List[Union[str, Mapping[str, Union[str, int]]]], None
] = 'http://localhost:9200'
index_name: Optional[str] = None
es_config: Dict[str, Any] = field(default_factory=dict)
index_text: bool = False
tag_indices: List[str] = field(default_factory=list)
batch_size: int = 64
ef_construction: Optional[int] = None
m: Optional[int] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[ElasticConfig, Dict]] = None,
**kwargs,
):
config = copy.deepcopy(config)
if not config:
raise ValueError('Empty config is not allowed for Elastic storage')
elif isinstance(config, dict):
config = dataclass_from_dict(ElasticConfig, config)
if config.index_name is None:
self._persist = False
id = uuid.uuid4().hex
config.index_name = 'index_name__' + id
else:
self._persist = True
self._index_name_offset2id = 'offset2id__' + config.index_name
self._config = config
self.n_dim = self._config.n_dim
self._client = self._build_client()
self._build_offset2id_index()
# Note super()._init_storage() calls _load_offset2ids which calls _get_offset2ids_meta
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.extend(_docs)
else:
if isinstance(_docs, Document):
self.append(_docs)
def _build_offset2id_index(self):
if not self._client.indices.exists(index=self._index_name_offset2id):
self._client.indices.create(index=self._index_name_offset2id, ignore=[404])
def _build_schema_from_elastic_config(self, elastic_config):
da_schema = {
'mappings': {
'dynamic': 'true',
'_source': {'enabled': 'true'},
'properties': {
'embedding': {
'type': 'dense_vector',
'dims': elastic_config.n_dim,
'index': 'true',
'similarity': elastic_config.distance,
},
'text': {'type': 'text', 'index': elastic_config.index_text},
},
}
}
if elastic_config.tag_indices:
for index in elastic_config.tag_indices:
da_schema['mappings']['properties'][index] = {
'type': 'text',
'index': True,
}
if self._config.m or self._config.ef_construction:
index_options = {
'type': 'hnsw',
'm': self._config.m or 16,
'ef_construction': self._config.ef_construction or 100,
}
da_schema['mappings']['properties']['embedding'][
'index_options'
] = index_options
return da_schema
def _build_client(self):
client = Elasticsearch(
hosts=self._config.hosts,
**self._config.es_config,
)
schema = self._build_schema_from_elastic_config(self._config)
if not client.indices.exists(index=self._config.index_name):
client.indices.create(
index=self._config.index_name, mappings=schema['mappings']
)
client.indices.refresh(index=self._config.index_name)
return client
def _send_requests(self, request):
bulk(self._client, request)
def _refresh(self, index_name):
self._client.indices.refresh(index=index_name)
def _doc_id_exists(self, doc_id):
return self._client.exists(index=self._config.index_name, id=doc_id)
def _update_offset2ids_meta(self):
"""Update the offset2ids in elastic"""
if self._client.indices.exists(index=self._index_name_offset2id):
requests = [
{
'_op_type': 'index',
'_id': offset_, # note offset goes here because it's what we want to get by
'_index': self._index_name_offset2id,
'blob': f'{id_}',
} # id here
for offset_, id_ in enumerate(self._offset2ids.ids)
]
r = bulk(self._client, requests)
self._client.indices.refresh(index=self._index_name_offset2id)
def _get_offset2ids_meta(self) -> List:
"""Return the offset2ids stored in elastic
:return: a list containing ids
:raises ValueError: error is raised if index _client is not found or no offsets are found
"""
if not self._client:
raise ValueError('Elastic client does not exist')
n_docs = self._client.count(index=self._index_name_offset2id)["count"]
if n_docs != 0:
offsets = [x for x in range(n_docs)]
resp = self._client.mget(index=self._index_name_offset2id, ids=offsets)
ids = [x['_source']['blob'] for x in resp['docs']]
return ids
else:
return []
def _map_embedding(self, embedding: 'ArrayType') -> List[float]:
from ....math.helper import EPSILON
if embedding is None:
embedding = np.zeros(self.n_dim) + EPSILON
else:
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
if np.all(embedding == 0):
embedding = embedding + EPSILON
return embedding # .tolist()
def __getstate__(self):
d = dict(self.__dict__)
del d['_client']
return d
def __setstate__(self, state):
self.__dict__ = state
self._client = self._build_client()
# def clear(self):
# self._client.indices.delete(index=self._config.index_name)
```
#### File: storage/elastic/seqlike.py
```python
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend([doc.id for doc in docs])
```
#### File: storage/qdrant/getsetdel.py
```python
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
return PointStruct(
id=self._map_id(doc.id),
payload=dict(_serialized=doc.to_base64(**self.serialization_config)),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
```
#### File: document/mixins/plot.py
```python
import copy
from typing import Optional
import numpy as np
from ...helper import deprecate_by
class PlotMixin:
"""Provide helper functions for :class:`Document` to plot and visualize itself."""
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.summary()
def __rich_console__(self, console, options):
yield f":page_facing_up: [b]Document[/b]: [cyan]{self.id}[cyan]"
from rich.table import Table
from rich import box
my_table = Table(
'Attribute', 'Value', width=80, box=box.ROUNDED, highlight=True
)
for f in self.non_empty_fields:
if f.startswith('_'):
continue
elif f in ('text', 'blob') and len(getattr(self, f)) > 100:
v = getattr(self, f)
my_table.add_row(f, str(v)[:100] + f'... [dim](length: {len(v)})')
elif f in ('embedding', 'tensor'):
from ...math.ndarray import to_numpy_array
v = to_numpy_array(getattr(self, f))
if v.squeeze().ndim == 1 and len(v) < 1000:
from .rich_embedding import ColorBoxEmbedding
v = ColorBoxEmbedding(v.squeeze())
else:
v = f'{type(getattr(self, f))} in shape {v.shape}, dtype: {v.dtype}'
my_table.add_row(f, v)
elif f not in ('id', 'chunks', 'matches'):
my_table.add_row(f, str(getattr(self, f)))
if my_table.rows:
yield my_table
def summary(self) -> None:
"""Print non-empty fields and nested structure of this Document object."""
from rich import print
print(self._plot_recursion())
def _plot_recursion(self, tree=None):
if tree is None:
from rich.tree import Tree
tree = Tree(self)
else:
tree = tree.add(self)
for a in ('matches', 'chunks'):
if getattr(self, a):
if a == 'chunks':
_icon = ':diamond_with_a_dot:'
else:
_icon = ':large_orange_diamond:'
_match_tree = tree.add(f'{_icon} [b]{a.capitalize()}[/b]')
for d in getattr(self, a):
d._plot_recursion(_match_tree)
return tree
def display(self):
"""Plot image data from :attr:`.tensor` or :attr:`.uri`."""
from IPython.display import Image, display
if self.uri:
if self.mime_type.startswith('audio') or self.uri.startswith('data:audio/'):
uri = _convert_display_uri(self.uri, self.mime_type)
_html5_audio_player(uri)
elif self.mime_type.startswith('video') or self.uri.startswith(
'data:video/'
):
uri = _convert_display_uri(self.uri, self.mime_type)
_html5_video_player(uri)
elif self.uri.startswith('data:image/'):
_html5_image(self.uri)
else:
display(Image(self.uri))
elif self.tensor is not None:
try:
import PIL.Image
p = PIL.Image.fromarray(self.tensor)
if p.mode != 'RGB':
raise
display(p)
except:
import matplotlib.pyplot as plt
plt.matshow(self.tensor)
else:
self.summary()
def plot_matches_sprites(
self,
top_k: int = 10,
channel_axis: int = -1,
inv_normalize: bool = False,
skip_empty: bool = False,
canvas_size: int = 1920,
min_size: int = 100,
output: Optional[str] = None,
):
"""Generate a sprite image for the query and its matching images in this Document object.
An image sprite is a collection of images put into a single image. Query image is on the left
followed by matching images. The Document object should contain matches.
:param top_k: the number of top matching documents to show in the sprite.
:param channel_axis: the axis id of the color channel, ``-1`` indicates the color channel info at the last axis
:param inv_normalize: If set to True, inverse the normalization of a float32 image :attr:`.tensor` into a uint8
image :attr:`.tensor` inplace.
:param skip_empty: skip matches which has no .uri or .tensor.
:param canvas_size: the width of the canvas
:param min_size: the minimum size of the image
:param output: Optional path to store the visualization. If not given, show in UI
"""
if not self or not self.matches:
raise ValueError(f'{self!r} is empty or has no matches')
if not self.uri and self.tensor is None:
raise ValueError(
f'Document has neither `uri` nor `tensor`, cannot be plotted'
)
if top_k <= 0:
raise ValueError(f'`limit` must be larger than 0, receiving {top_k}')
import matplotlib.pyplot as plt
img_per_row = top_k + 2
if top_k > len(self.matches):
img_per_row = len(self.matches) + 2
img_size = int((canvas_size - 50) / img_per_row)
if img_size < min_size:
# image is too small, recompute the image size and canvas size
img_size = min_size
canvas_size = img_per_row * img_size + 50
try:
_d = copy.deepcopy(self)
if _d.content_type != 'tensor':
_d.load_uri_to_image_tensor() # the channel axis is -1
if inv_normalize:
# inverse normalise to uint8 and set the channel axis to -1
_d.set_image_tensor_inv_normalization(channel_axis)
_d.set_image_tensor_channel_axis(channel_axis, -1)
# Maintain the aspect ratio keeping the width fixed
h, w, _ = _d.tensor.shape
img_h, img_w = int(h * (img_size / float(w))), img_size
sprite_img = np.ones([img_h + 20, canvas_size, 3], dtype='uint8')
_d.set_image_tensor_shape(shape=(img_h, img_w))
sprite_img[10 : img_h + 10, 10 : 10 + img_w] = _d.tensor
pos = canvas_size // img_per_row
for col_id, d in enumerate(self.matches, start=2):
if not d.uri and d.tensor is None:
if skip_empty:
continue
else:
raise ValueError(
f'Document match has neither `uri` nor `tensor`, cannot be plotted'
)
_d = copy.deepcopy(d)
if _d.content_type != 'tensor':
_d.load_uri_to_image_tensor()
if inv_normalize:
_d.set_image_tensor_inv_normalization(channel_axis=channel_axis)
_d.set_image_tensor_channel_axis(
channel_axis, -1
).set_image_tensor_shape(shape=(img_h, img_w))
# paste it on the main canvas
sprite_img[
10 : img_h + 10,
(col_id * pos) : ((col_id * pos) + img_w),
] = _d.tensor
col_id += 1
if col_id >= img_per_row:
break
except Exception as ex:
raise ValueError('Bad image tensor. Try different `channel_axis`') from ex
from PIL import Image
im = Image.fromarray(sprite_img)
if output:
with open(output, 'wb') as fp:
im.save(fp)
else:
plt.figure(figsize=(img_per_row, 2))
plt.gca().set_axis_off()
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.imshow(im, interpolation="none")
plt.show()
def _convert_display_uri(uri, mime_type):
import urllib
from .helper import _to_datauri, _uri_to_blob
scheme = urllib.parse.urlparse(uri).scheme
if scheme not in ['data', 'http', 'https']:
blob = _uri_to_blob(uri)
return _to_datauri(mime_type, blob)
return uri
def _html5_image(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<image src="{uri}" height="200px">
</body>
'''
display(HTML(src)) # noqa
def _html5_video_player(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{uri}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src)) # noqa
def _html5_audio_player(uri):
from IPython.display import display
from IPython.core.display import HTML # noqa
src = f'''
<body>
<audio controls="controls" style="width:320px" >
<source src="{uri}"/>
Your browser does not support the audio element.
</audio>
</body>
'''
display(HTML(src)) # noqa
``` |
{
"source": "jina-ai/example-3D-model",
"score": 2
} |
#### File: executors/pn_encoder/pn_encoder.py
```python
import numpy as np
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from executors.pn_encoder.pn import get_bottleneck_model
class PNEncoder(Executor):
def __init__(self, ckpt_path: str, **kwargs):
super().__init__(**kwargs)
self.embedding_model = get_bottleneck_model(ckpt_path=ckpt_path)
self.logger = JinaLogger('pn-encoder')
@requests(on=['/index', '/search'])
def encode(self, docs: DocumentArray, **kwargs):
embeds = self.embedding_model.predict(np.stack(docs.get_attributes('blob')))
for layer_embeds in embeds:
for d, b in zip(docs, layer_embeds):
d.chunks.append(Document(embedding=b))
# set content to uri to reduce document size
for d in docs:
d.uri = d.tags['glb_path']
``` |
{
"source": "jina-ai/example-multimodal-fashion-search",
"score": 3
} |
#### File: example-multimodal-fashion-search/searcher/helper.py
```python
from docarray import DocumentArray, Document
from config import DATA_DIR, CSV_FILE
import random
import os
def generate_price(minimum=10, maximum=200):
price = random.randrange(minimum, maximum)
return price
def remove_tensor(doc):
doc.tensor = None
return doc
def process_doc(doc):
if hasattr(doc, "id"):
filename = f"{DATA_DIR}/{doc.id}.jpg"
if os.path.isfile(filename):
doc.uri = filename
# Generate fake price
doc.tags["price"] = generate_price()
# Generate fake rating based on id
random.seed(int(doc.id)) # Ensure reproducability
doc.tags["rating"] = random.randrange(0, 5)
doc = doc.load_uri_to_image_tensor()
return doc
def csv_to_docarray(file_path=CSV_FILE, max_docs=100):
docs = DocumentArray.from_csv(file_path, size=max_docs)
docs.apply(process_doc)
return docs
# Deprecated
def input_docs_from_csv(file_path=CSV_FILE, max_docs=100, data_dir=DATA_DIR):
docs = DocumentArray()
import csv
from itertools import islice
with open(file_path, "r") as file:
reader = csv.DictReader(file)
for row in islice(reader, max_docs):
filename = f"{data_dir}/{row['id']}.jpg"
doc = Document(uri=filename, tags=row)
random.seed(int(doc.tags["id"])) # Ensure reproducability
# Generate useful data that's missing
doc.tags["price"] = generate_price() # Generate fake price
doc.tags["rating"] = random.randrange(0, 5)
doc.load_uri_to_image_tensor()
docs.append(doc)
return docs
def get_columns(document):
"""
Return a list of tuples, each tuple containing column name and type
"""
# tags = document.tags.to_dict()
tags = document.tags
names = list(tags.keys())
types = list(tags.values())
columns = []
for field, value in zip(names, types):
try:
value = int(value) # Handle year better
except:
pass
if isinstance(value, str):
value = "str"
elif isinstance(value, int):
value = "int"
col = (field, value)
columns.append(col)
return columns
``` |
{
"source": "jina-ai/example-video-search",
"score": 2
} |
#### File: jina-ai/example-video-search/executors.py
```python
from typing import Optional, Iterable
from collections import defaultdict
import numpy as np
from jina import Executor, requests
from docarray import Document, DocumentArray
_ALLOWED_METRICS = ['min', 'max', 'mean_min', 'mean_max']
DEFAULT_FPS = 1
class FilterModality(Executor):
def __init__(self,
modality: str = None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.modality = modality
@requests
def filter(self, docs: DocumentArray, **kwargs):
for doc in docs:
chunks = filter(lambda d: d.modality == self.modality, doc.chunks)
doc.chunks = chunks
return docs
class AudioSegmenter(Executor):
def __init__(self, chunk_duration: int = 10, chunk_strip: int = 1,
traversal_paths: str = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chunk_duration = chunk_duration # seconds
self.strip = chunk_strip
self.traversal_paths = traversal_paths
@requests(on=['/search', '/index'])
def segment(self, docs: DocumentArray,
parameters: dict = None, **kwargs):
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
for idx, doc in enumerate(docs[traversal_paths]):
sample_rate = doc.tags['sample_rate']
chunk_size = int(self.chunk_duration * sample_rate)
strip = parameters.get('chunk_strip', self.strip)
strip_size = int(strip * sample_rate)
num_chunks = max(1, int((doc.tensor.shape[0] - chunk_size) / strip_size))
chunk_array = DocumentArray()
for chunk_id in range(num_chunks):
beg = chunk_id * strip_size
end = beg + chunk_size
if beg > doc.tensor.shape[0]:
break
chunk_array.append(
Document(
tensor=doc.tensor[beg:end],
offset=idx,
location=[beg, end],
tags=doc.tags,
modality='audio'
)
)
ts = (beg / sample_rate) if sample_rate != 0 else 0
chunk_array[chunk_id].tags['timestamp'] = ts
chunk_array[chunk_id].tags['video'] = doc.id
docs[idx].chunks = chunk_array
class MixRanker(Executor):
"""
Aggregate the matches and overwrite document.matches with the aggregated results.
"""
def __init__(
self,
metric: str = 'cosine',
ranking: str = 'min',
top_k: int = 10,
modality_list: Iterable[str] = ('image', 'audio'),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if ranking not in _ALLOWED_METRICS:
raise ValueError(
f'ranking should be one of {_ALLOWED_METRICS}, got "{ranking}"',
)
self.metric = metric
self.ranking = ranking
self.top_k = top_k
self.modality_list = modality_list
@requests(on='/search')
def merge_matches(self, docs: DocumentArray, parameters=None, **kwargs):
if not docs:
return
top_k = int(parameters.get('top_k', self.top_k))
for doc in docs:
parents_matches = defaultdict(list)
for m in doc.matches:
if m.modality in self.modality_list:
parents_matches[m.parent_id].append(m)
new_matches = []
for match_parent_id, matches in parents_matches.items():
best_id = 0
if self.ranking == 'min':
best_id = np.argmin([m.scores[self.metric].value for m in matches])
elif self.ranking == 'max':
best_id = np.argmax([m.scores[self.metric].value for m in matches])
new_match = matches[best_id]
new_match.id = matches[best_id].parent_id
new_match.scores = {self.metric: matches[best_id].scores[self.metric]}
timestamp = matches[best_id].tags['timestamp']
if new_match.modality == 'image':
new_match.tags['timestamp'] = float(timestamp) / DEFAULT_FPS
vid = new_match.id.split('.')[0]
# reconstruct the YouTube URL based on the vid
new_match.uri = f'https://www.youtube.com/watch?v={vid}#t={int(timestamp)}s'
new_matches.append(new_match)
# Sort the matches
doc.matches = new_matches
if self.ranking == 'min':
doc.matches.sort(key=lambda d: d.scores[self.metric].value)
elif self.ranking == 'max':
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
doc.matches = doc.matches[:top_k]
doc.pop('embedding')
``` |
{
"source": "jina-ai/executor-simpleindexer",
"score": 3
} |
#### File: jina-ai/executor-simpleindexer/executor.py
```python
import inspect
import os
from typing import Dict, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class SimpleIndexer(Executor):
"""
A simple indexer that stores all the Document data together in a DocumentArray,
and can dump to and load from disk.
To be used as a unified indexer, combining both indexing and searching
"""
FILE_NAME = 'index.db'
def __init__(
self,
match_args: Optional[Dict] = None,
table_name: str = 'simple_indexer_table',
traversal_right: str = '@r',
traversal_left: str = '@r',
**kwargs,
):
"""
Initializer function for the simple indexer
To specify storage path, use `workspace` attribute in executor `metas`
:param match_args: the arguments to `DocumentArray`'s match function
:param table_name: name of the table to work with for the sqlite backend
:param traversal_right: the default traversal path for the indexer's
DocumentArray
:param traversal_left: the default traversal path for the query
DocumentArray
"""
super().__init__(**kwargs)
self._match_args = match_args or {}
self._index = DocumentArray(
storage='sqlite',
config={
'connection': os.path.join(self.workspace, SimpleIndexer.FILE_NAME),
'table_name': table_name,
},
) # with customize config
self.logger = JinaLogger(self.metas.name)
self.default_traversal_right = traversal_right
self.default_traversal_left = traversal_left
@property
def table_name(self) -> str:
return self._index._table_name
@requests(on='/index')
def index(
self,
docs: 'DocumentArray',
**kwargs,
):
"""All Documents to the DocumentArray
:param docs: the docs to add
"""
if docs:
self._index.extend(docs)
@requests(on='/search')
def search(
self,
docs: 'DocumentArray',
parameters: Optional[Dict] = None,
**kwargs,
):
"""Perform a vector similarity search and retrieve the full Document match
:param docs: the Documents to search with
:param parameters: the runtime arguments to `DocumentArray`'s match
function. They overwrite the original match_args arguments.
"""
match_args = (
{**self._match_args, **parameters}
if parameters is not None
else self._match_args
)
traversal_right = parameters.get(
'traversal_right', self.default_traversal_right
)
traversal_left = parameters.get('traversal_left', self.default_traversal_left)
match_args = SimpleIndexer._filter_match_params(docs, match_args)
docs[traversal_left].match(self._index[traversal_right], **match_args)
@staticmethod
def _filter_match_params(docs, match_args):
# get only those arguments that exist in .match
args = set(inspect.getfullargspec(docs.match).args)
args.discard('self')
match_args = {k: v for k, v in match_args.items() if k in args}
return match_args
@requests(on='/delete')
def delete(self, parameters: Dict, **kwargs):
"""Delete entries from the index by id
:param parameters: parameters to the request
"""
deleted_ids = parameters.get('ids', [])
if len(deleted_ids) == 0:
return
del self._index[deleted_ids]
@requests(on='/update')
def update(self, docs: DocumentArray, **kwargs):
"""Update doc with the same id, if not present, append into storage
:param docs: the documents to update
"""
for doc in docs:
try:
self._index[doc.id] = doc
except IndexError:
self.logger.warning(
f'cannot update doc {doc.id} as it does not exist in storage'
)
@requests(on='/fill_embedding')
def fill_embedding(self, docs: DocumentArray, **kwargs):
"""retrieve embedding of Documents by id
:param docs: DocumentArray to search with
"""
for doc in docs:
doc.embedding = self._index[doc.id].embedding
@requests(on='/clear')
def clear(self, **kwargs):
"""clear the database"""
self._index.clear()
```
#### File: executor-simpleindexer/tests/test_simple_indexer.py
```python
import shutil
from copy import deepcopy
from pathlib import Path
import numpy as np
import pytest
from executor import SimpleIndexer
from jina import Document, DocumentArray, Executor, Flow
def assert_document_arrays_equal(arr1, arr2):
assert len(arr1) == len(arr2)
for d1, d2 in zip(arr1, arr2):
assert d1.id == d2.id
assert d1.content == d2.content
assert d1.chunks == d2.chunks
assert d1.matches == d2.matches
@pytest.fixture
def docs():
return DocumentArray(
[
Document(id='doc1', embedding=np.array([1, 0, 0, 0])),
Document(id='doc2', embedding=np.array([0, 1, 0, 0])),
Document(id='doc3', embedding=np.array([0, 0, 1, 0])),
Document(id='doc4', embedding=np.array([0, 0, 0, 1])),
Document(id='doc5', embedding=np.array([1, 0, 1, 0])),
Document(id='doc6', embedding=np.array([0, 1, 0, 1])),
]
)
@pytest.fixture
def docs_with_chunks():
docs = DocumentArray(
[
Document(id='doc1', embedding=np.array([1, 0, 0, 0])),
Document(id='doc2', embedding=np.array([0, 1, 0, 0])),
Document(id='doc3', embedding=np.array([0, 0, 1, 0])),
Document(id='doc4', embedding=np.array([0, 0, 0, 1])),
Document(id='doc5', embedding=np.array([1, 0, 1, 0])),
Document(id='doc6', embedding=np.array([0, 1, 0, 1])),
]
)
for d in docs:
d.chunks = DocumentArray(
[
Document(id=f'{d.id}_chunk1', embedding=np.array([1, 0, 0, 0])),
Document(id=f'{d.id}_chunk2', embedding=np.array([0, 1, 0, 0])),
Document(id=f'{d.id}_chunk3', embedding=np.array([0, 1, 0, 0])),
]
)
return docs
@pytest.fixture
def update_docs():
return DocumentArray(
[
Document(id='doc1', embedding=np.array([0, 0, 0, 1])),
]
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[1] / 'config.yml'))
assert ex._match_args == {}
def test_flow(tmpdir):
f = Flow().add(
uses=SimpleIndexer,
uses_metas={'workspace': str(tmpdir)},
)
with f:
f.post(
on='/index',
inputs=[Document(id='a', embedding=np.array([1]))],
)
docs = f.post(
on='/search',
inputs=[Document(embedding=np.array([1]))],
)
assert docs[0].matches[0].id == 'a'
def test_fill_embeddings(tmpdir):
metas = {'workspace': str(tmpdir)}
indexer = SimpleIndexer(metas=metas)
indexer.index(DocumentArray([Document(id='a', embedding=np.array([1]))]))
search_docs = DocumentArray([Document(id='a')])
indexer.fill_embedding(search_docs)
assert search_docs['a'].embedding is not None
with pytest.raises(KeyError, match="`b`"):
indexer.fill_embedding(DocumentArray([Document(id='b')]))
def test_load(tmpdir, docs):
metas = {'workspace': str(tmpdir)}
indexer1 = SimpleIndexer(metas=metas)
indexer1.index(docs)
indexer2 = SimpleIndexer(metas=metas)
assert_document_arrays_equal(indexer2._index, docs)
def test_index(tmpdir, docs):
metas = {'workspace': str(tmpdir)}
# test general/normal case
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
assert_document_arrays_equal(indexer._index, docs)
# test index empty docs
shutil.rmtree(tmpdir)
indexer = SimpleIndexer(metas=metas)
indexer.index(DocumentArray())
assert not indexer._index
def test_delete(tmpdir, docs):
metas = {'workspace': str(tmpdir)}
# index docs first
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
assert_document_arrays_equal(indexer._index, docs)
# delete empty docs
indexer.delete({})
assert_document_arrays_equal(indexer._index, docs)
# delete first 3 docs
parameters = {'ids': [f'doc{i}' for i in range(1, 4)]}
indexer.delete(parameters)
assert_document_arrays_equal(indexer._index, docs[3:])
# delete the rest of the docs stored
parameters = {'ids': [f'doc{i}' for i in range(4, 7)]}
indexer.delete(parameters)
assert not indexer._index
def test_update(tmpdir, docs, update_docs):
metas = {'workspace': str(tmpdir)}
# index docs first
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
assert_document_arrays_equal(indexer._index, docs)
# update first doc
indexer.update(update_docs)
assert indexer._index[0].id == 'doc1'
assert (indexer._index[0].embedding == [0, 0, 0, 1]).all()
@pytest.mark.parametrize('metric', ['euclidean', 'cosine'])
def test_search(tmpdir, metric, docs):
metas = {'workspace': str(tmpdir)}
match_args = {'metric': metric}
# test general/normal case
indexer = SimpleIndexer(match_args=match_args, metas=metas)
indexer.index(docs)
search_docs = deepcopy(docs)
indexer.search(search_docs, {})
for i in range(len(docs)):
assert search_docs[i].matches[0].id == f'doc{i + 1}'
assert sorted(
[m.scores['euclidean'].value for m in search_docs[0].matches]
) == [m.scores['euclidean'].value for m in search_docs[0].matches]
assert len(search_docs[i].matches) == len(docs)
# test search with top_k/limit = 1
indexer.search(search_docs, parameters={'limit': 1})
for i in range(len(docs)):
assert len(search_docs[i].matches) == 1
# test search with default limit/top_k again
# indexer._match_args should not change as a result of the previous operation
# so expected length of matches should be the same as the first case
indexer.search(search_docs, {})
for i in range(len(docs)):
assert len(search_docs[i].matches) == len(docs)
# test search from empty indexed docs
shutil.rmtree(tmpdir)
indexer = SimpleIndexer(metas=metas)
indexer.index(DocumentArray())
indexer.search(docs, {})
for doc in docs:
assert not doc.matches
# test search empty docs
indexer.search(DocumentArray(), {})
def test_empty_docs(tmp_path):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs=None)
def test_unexpected_kwargs(tmp_path, docs):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs=docs)
indexer.search(docs, parameters={'unknown': 1, 'limit': 1, 'self': 2})
assert len(docs[0].matches) == 1
def test_invalid_embedding_indices(tmp_path, docs):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
indexer.index(DocumentArray([Document(), Document(embedding=np.array([1]))]))
query = DocumentArray([Document(embedding=np.array([1, 0, 0, 0]))])
with pytest.raises(ValueError):
indexer.search(query, parameters={'match_args': {'limit': len(indexer._index)}})
def test_invalid_embedding_query(tmp_path, docs):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
indexer.index(DocumentArray([Document(), Document(embedding=np.array([1]))]))
with pytest.raises(ValueError):
indexer.search(DocumentArray([Document(embedding=np.array([1, 0]))]), {})
def test_clear(tmp_path, docs):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs)
assert len(indexer._index) > 0
indexer.clear()
assert len(indexer._index) == 0
def test_chunks_retrieval_root(tmp_path, docs_with_chunks):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs_with_chunks)
# we search on chunk level and retrieve on root
search_docs = DocumentArray(
[
Document(id='search_doc1', embedding=np.array([1, 0, 0, 0])),
]
)
indexer.search(
search_docs, parameters={'traversal_left': '@r', 'traversal_right': '@c'}
)
assert 'chunk' in search_docs[0].matches[0].id
def test_chunks_retrieval_chunk(tmp_path, docs_with_chunks):
metas = {'workspace': str(tmp_path / 'workspace')}
indexer = SimpleIndexer(metas=metas)
indexer.index(docs_with_chunks)
# we search on chunk level and retrieve on root
search_docs = DocumentArray(
[
Document(id='search_doc1', embedding=np.array([1, 0, 0, 0])),
]
)
indexer.search(
search_docs, parameters={'traversal_left': '@r', 'traversal_right': '@r'}
)
assert 'chunk' not in search_docs[0].matches[0].id
``` |
{
"source": "jina-ai/executor-text-CLIP",
"score": 3
} |
#### File: jina-ai/executor-text-CLIP/clip_text.py
```python
from jina import DocumentArray, Executor, requests
import torch
import clip
from typing import Iterable, Optional, List
from jina_commons.batching import get_docs_batch_generator
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using a CLIP model.
:param model_name: The name of one of the pre-trained CLIP models.
Can also be a path to a local checkpoint (a ``.pt`` file).
:param default_batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
:param default_traversal_paths: Default traversal paths for encoding, used if the
traversal path is not passed as a parameter with the request.
:param default_device: The device (cpu or gpu) that the model should be on.
:param jit: Whether a JIT version of the model should be loaded.
"""
def __init__(
self,
model_name: str = 'ViT-B/32',
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
default_device: str = 'cpu',
jit: bool = True,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.device = default_device
self.model, _ = clip.load(model_name, self.device, jit)
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have text.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
with torch.no_grad():
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
tensor = clip.tokenize(text_batch).to(self.device)
embedding_batch = self.model.encode_text(tensor)
numpy_embedding_batch = embedding_batch.cpu().numpy()
for document, numpy_embedding in zip(
document_batch, numpy_embedding_batch
):
document.embedding = numpy_embedding
``` |
{
"source": "jina-ai/finetuner",
"score": 3
} |
#### File: finetuner/tailor/base.py
```python
import abc
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
if TYPE_CHECKING:
from ..helper import AnyDNN, LayerInfoType
class BaseTailor(abc.ABC):
def __init__(
self,
model: 'AnyDNN',
input_size: Optional[Tuple[int, ...]] = None,
input_dtype: str = 'float32',
device: Optional[str] = 'cpu',
):
"""Tailor converts a general DNN model into an embedding model.
:param model: a general DNN model
:param input_size: a sequence of integers defining the shape of the input tensor. Note, batch size is *not* part
of ``input_size``. It is required for :py:class:`PytorchTailor` and :py:class:`PaddleTailor`, but not :py:class:`C`
:param input_dtype: the data type of the input tensor.
:param device: The device to which to move the model. Supported options are
``"cpu"`` and ``"cuda"`` (for GPU).
"""
self._model = model
self._set_device(device)
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
self._input_size = input_size
self._input_dtype = input_dtype
@abc.abstractmethod
def _set_device(self, device: str) -> None:
...
@abc.abstractmethod
def to_embedding_model(
self,
layer_name: Optional[str] = None,
freeze: Union[bool, List[str]] = False,
projection_head: Optional['AnyDNN'] = None,
) -> 'AnyDNN':
"""Convert a general model from :py:attr:`.model` to an embedding model.
:param layer_name: the name of the layer that is used for output embeddings. All layers *after* that layer
will be removed. When set to ``None``, then the last layer listed in :py:attr:`.embedding_layers` will be used.
To see all available names you can check ``name`` field of :py:attr:`.embedding_layers`.
:param freeze: if set as True, will freeze all layers before :py:`attr`:`layer_name`. If set as list of str, will freeze layers by names.
:param projection_head: Attach a module at the end of model, this module should be always trainable.
:return: Converted embedding model.
"""
...
@property
def embedding_layers(self) -> 'LayerInfoType':
"""Get all dense layers that can be used as embedding layer from the :py:attr:`.model`.
:return: layers info as Dict.
"""
_layers = self.summary()
return [_l for _l in _layers if _l['is_embedding_layer']]
@abc.abstractmethod
def summary(self, include_identity_layer: bool = False) -> 'LayerInfoType':
"""The summary of the model architecture. To list all potential embedding layers, use :py:attr:`.embedding_layers`.
:param include_identity_layer: if set, then identity layers are included and returned.
:return: all layers info as Dict.
"""
...
def display(self, *args, **kwargs) -> None:
"""Display the model architecture from :py:attr:`.summary` in a table.
:param args: args pass to :py:attr:`.summary`
:param kwargs: kwargs pass to :py:attr:`.summary`
"""
from rich import box, print
from rich.table import Table
_summary = self.summary(*args, **kwargs)
table = Table(box=box.SIMPLE)
cols = ['name', 'output_shape_display', 'nb_params', 'trainable']
for k in cols:
table.add_column(k)
for s in _summary:
style = None
if s['trainable']:
style = 'bright_green'
elif not s['trainable']:
style = 'cyan'
if 'identity' in s['name']:
style = 'bright_black'
table.add_row(*map(str, (s[v] for v in cols)), style=style)
print(
table,
'[green]Green[/green] layers are trainable layers, '
'[cyan]Cyan[/cyan] layers are non-trainable layers or frozen layers.\n'
'[bright_black]Gray[/bright_black] layers indicates this layer has been replaced by an Identity layer.\n'
'Use to_embedding_model(...) to create embedding model.',
)
```
#### File: tailor/keras/projection_head.py
```python
import tensorflow as tf
class ProjectionHead(tf.keras.layers.Layer):
"""Projection head used internally for self-supervised training.
It is (by default) a simple 3-layer MLP to be attached on top of embedding model only for training purpose.
After training, it should be cut-out from the embedding model.
"""
EPSILON = 1e-5
def __init__(self, in_features: int, output_dim: int = 128, num_layers: int = 2):
super().__init__()
self.layers = []
for idx in range(num_layers - 1):
self.layers.append(
tf.keras.layers.Dense(
units=in_features,
bias_initializer='zeros',
)
)
self.layers.append(tf.keras.layers.BatchNormalization(epsilon=self.EPSILON))
self.layers.append(tf.keras.layers.ReLU())
self.layers.append(
tf.keras.layers.Dense(
units=output_dim,
bias_initializer='zeros',
)
)
self.layers.append(tf.keras.layers.BatchNormalization(epsilon=self.EPSILON))
def call(self, x):
for layer in self.layers:
x = layer(x)
return x
```
#### File: finetuner/tuner/augmentation.py
```python
import numpy as np
from docarray import Document
def vision_preprocessor(
height: int = 224,
width: int = 224,
default_channel_axis: int = -1,
target_channel_axis: int = 0,
normalize: bool = False,
phase: str = 'train',
):
"""Randomly augments a Document with `tensor` field.
The method applies flipping, color jitter, cropping, gaussian blur and random rectangle erase
to the given image.
:param height: image height.
:param width: image width.
:param default_channel_axis: The color channel of the input image, by default -1, the expected input is H, W, C.
:param target_channel_axis: The color channel of the output image, by default 0, the expected output is C, H, W.
:param normalize: Normalize uint8 image :attr:`.tensor` into a float32 image :attr:`.tensor` inplace.
:param phase: phase of experiment, either `train` or `validation`. At `validation` phase, will not apply
random transformation.
"""
def preprocess_fn(doc):
return _vision_preprocessor(
doc,
height,
width,
default_channel_axis,
target_channel_axis,
normalize,
phase,
)
return preprocess_fn
def _vision_preprocessor(
doc: Document,
height: int = 224,
width: int = 224,
default_channel_axis: int = -1,
target_channel_axis: int = 0,
normalize: bool = False,
phase: str = 'train',
):
"""
Randomly augments a Document with `tensor` field.
The method applies flipping, color jitter, cropping, gaussian blur and random rectangle erase
to the given image.
:param doc: The document to preprocess.
:param height: image height.
:param width: image width.
:param default_channel_axis: The color channel of the input image, by default -1, the expected input is H, W, C.
:param target_channel_axis: The color channel of the output image, by default 0, the expected output is C, H, W.
:param normalize: Normalize uint8 image :attr:`.tensor` into a float32 image :attr:`.tensor` inplace.
:param phase: stage of experiment, either `train` or `validation`. At `validation` phase, will not apply
random transformation.
"""
import albumentations as A
tensor = doc.tensor
if tensor is None:
if doc.uri:
doc.load_uri_to_image_tensor(
width=width, height=height, channel_axis=default_channel_axis
)
tensor = doc.tensor
else:
raise AttributeError(
f'Document `tensor` is None, loading it from url: {doc.uri} failed.'
)
if normalize:
doc.set_image_tensor_normalization(channel_axis=default_channel_axis)
tensor = doc.tensor
if tensor.dtype == np.float64:
tensor = np.float32(tensor)
if default_channel_axis not in [-1, 2]:
tensor = np.moveaxis(tensor, default_channel_axis, -1)
if phase == 'train':
transform = A.Compose(
[
A.HorizontalFlip(p=0.5),
A.ColorJitter(p=1),
A.RandomResizedCrop(width=width, height=height, p=1),
A.GaussianBlur(p=1),
A.GridDropout(
ratio=0.2, p=0.5
), # random erase 0.2 percent of image with 0.5 probability
]
)
tensor = transform(image=tensor)['image']
if target_channel_axis != -1:
tensor = np.moveaxis(tensor, -1, target_channel_axis)
return tensor
```
#### File: tuner/callback/base.py
```python
from abc import ABC
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..base import BaseTuner
class BaseCallback(ABC):
"""
The base callback class.
This class defines the different callback methods that can be overriden, however
there is no method that the subclass would be required to override.
The callback instance should be passed to the tuner in the ``fit`` method, in a list
that contains all callbacks and is passed to the ``callbacks`` argument.
All methods receive the tuner instance to which the callback has been added as an
argument. The most relevant property of the tuner instance is the ``state``, which
is an instance of ``TunerState`` and contains relevant training statistics, such as
current loss, epoch number, number of batches and batch number.
"""
def on_fit_begin(self, tuner: 'BaseTuner'):
"""
Called at the start of the ``fit`` method call, after all setup has been done,
but before the training has started.
"""
def on_epoch_begin(self, tuner: 'BaseTuner'):
"""
Called at the start of an epoch.
"""
def on_train_epoch_begin(self, tuner: 'BaseTuner'):
"""
Called at the begining of training part of the epoch.
"""
def on_train_batch_begin(self, tuner: 'BaseTuner'):
"""
Called at the start of a training batch, after the data for the batch has
already been loaded.
"""
def on_train_batch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of a training batch, after the backward pass.
"""
def on_train_epoch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of training part of the epoch.
"""
def on_val_begin(self, tuner: 'BaseTuner'):
"""
Called at the start of the evaluation.
"""
def on_val_batch_begin(self, tuner: 'BaseTuner'):
"""
Called at the start of a evaluation batch.
"""
def on_val_batch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of an evaluation batch.
"""
def on_val_end(self, tuner: 'BaseTuner'):
"""
Called at the end of the evaluation.
"""
def on_epoch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of an epoch, after both training and validation (or just
training if no validaton is provided).
"""
def on_fit_end(self, tuner: 'BaseTuner'):
"""
Called at the end of the ``fit`` method call, after finishing all the epochs.
"""
def on_keyboard_interrupt(self, tuner: 'BaseTuner'):
"""
Called when the tuner is interrupted by the user
"""
def on_exception(self, tuner: 'BaseTuner', exception: BaseException):
"""
Called when the tuner encounters an exception during execution.
"""
```
#### File: tuner/callback/evaluation.py
```python
import math
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple
from ... import embed
from ..evaluation import Evaluator
from .base import BaseCallback
if TYPE_CHECKING:
from docarray import DocumentArray
from ..base import BaseTuner
class EvaluationCallback(BaseCallback):
"""
A callback that uses the Evaluator to calculate IR metrics at the end of each epoch.
When used with other callbacks that rely on metrics, like checkpoints and logging,
this callback should be defined first, so that it precedes in execution.
"""
def __init__(
self,
query_data: 'DocumentArray',
index_data: Optional['DocumentArray'] = None,
metrics: Optional[
Dict[str, Tuple[Callable[..., float], Dict[str, Any]]]
] = None,
exclude_self: bool = True,
limit: int = 20,
distance: str = 'cosine',
num_workers: int = 1,
):
"""
:param query_data: Search data used by the evaluator at the end of each epoch,
to evaluate the model.
:param index_data: Index data or catalog used by the evaluator at the end of
each epoch, to evaluate the model.
:param metrics: A dictionary that specifies the metrics to calculate. It maps
metric names to tuples of metric functions and their keyword arguments. If
set to None, default metrics are computed.
:param exclude_self: Whether to exclude self when matching.
:param limit: The number of top search results to consider when computing the
evaluation metrics.
:param distance: The type of distance metric to use when matching query and
index docs, available options are ``'cosine'``, ``'euclidean'`` and
``'sqeuclidean'``.
:param num_workers: The number of workers to use when matching query and
index data.
"""
self._query_data = query_data
self._index_data = index_data
self._metrics = metrics
self._exclude_self = exclude_self
self._limit = limit
self._distance = distance
self._num_workers = num_workers
self._query_pbar_id = None
self._index_pbar_id = None
self._match_pbar_id = None
def on_fit_begin(self, tuner: 'BaseTuner'):
self._query_pbar_id = tuner._progress_bar.add_task(
'Embedding queries', visible=False, start=False
)
self._index_pbar_id = tuner._progress_bar.add_task(
'Embedding index', visible=False, start=False
)
self._match_pbar_id = tuner._progress_bar.add_task(
'Matching', visible=False, start=False
)
def on_epoch_end(self, tuner: 'BaseTuner'):
# start query data progress bar
tuner._progress_bar.reset(
self._query_pbar_id,
visible=True,
description='Embedding queries',
total=math.ceil(len(self._query_data) / tuner._batch_size),
completed=0,
metrics='',
)
# embed queries
for batch in self._query_data.batch(tuner._batch_size):
embed(
batch,
tuner._embed_model,
device=tuner._device_name,
batch_size=tuner._batch_size,
preprocess_fn=tuner._preprocess_fn,
collate_fn=tuner._collate_fn,
)
tuner._progress_bar.update(task_id=self._query_pbar_id, advance=1)
tuner._progress_bar.update(task_id=self._query_pbar_id, visible=False)
if self._index_data:
# start index data progress bar
tuner._progress_bar.reset(
self._index_pbar_id,
visible=True,
description='Embedding index',
total=math.ceil(len(self._index_data) / tuner._batch_size),
completed=0,
metrics='',
)
# embed index
for batch in self._index_data.batch(tuner._batch_size):
embed(
batch,
tuner._embed_model,
device=tuner._device_name,
batch_size=tuner._batch_size,
preprocess_fn=tuner._preprocess_fn,
collate_fn=tuner._collate_fn,
)
tuner._progress_bar.update(task_id=self._index_pbar_id, advance=1)
index_data = self._index_data
tuner._progress_bar.update(task_id=self._index_pbar_id, visible=False)
else:
index_data = self._query_data
# start matching progress bar
tuner._progress_bar.reset(
self._match_pbar_id,
visible=True,
description='Matching',
metrics='',
)
# compute metrics
evaluator = Evaluator(self._query_data, index_data, metrics=self._metrics)
tuner.state.eval_metrics = evaluator.evaluate(
exclude_self=self._exclude_self,
limit=self._limit,
distance=self._distance,
num_workers=self._num_workers,
)
tuner._progress_bar.update(task_id=self._match_pbar_id, visible=False)
```
#### File: tuner/callback/training_checkpoint.py
```python
import logging
import os
import pickle
import shutil
from typing import TYPE_CHECKING
from ...helper import get_framework
from .base import BaseCallback
if TYPE_CHECKING:
from ..base import BaseTuner
class TrainingCheckpoint(BaseCallback):
"""
Callback to save model at every epoch or the last K epochs
"""
def __init__(self, save_dir: str, last_k_epochs: int = 1, verbose: bool = False):
"""
:param save_dir: string, path to save the model file.
:param last_k_epochs: this parameter is an integer. Only the most
recent k checkpoints will be kept. Older checkpoints are deleted.
:param verbose: Whether to log notifications when a checkpoint is saved/deleted.
"""
self._logger = logging.getLogger('finetuner.' + self.__class__.__name__)
self._logger.setLevel(logging.INFO if verbose else logging.WARNING)
self._save_dir = save_dir
self._last_k_epochs = last_k_epochs
self._saved_checkpoints = []
def on_epoch_end(self, tuner: 'BaseTuner'):
self._save_model(tuner)
self._logger.info(f'Model trained for {tuner.state.epoch+1} epochs is saved!')
if self._last_k_epochs:
if len(self._saved_checkpoints) > self._last_k_epochs:
if os.path.isfile(self._saved_checkpoints[0]):
os.remove(self._saved_checkpoints[0])
else:
shutil.rmtree(self._saved_checkpoints[0])
self._saved_checkpoints.pop(0)
self._logger.info(
f'Model trained for {tuner.state.epoch+1-self._last_k_epochs}'
' epochs is deleted!'
)
def _save_model(self, tuner: 'BaseTuner'):
"""
Saves the model weights, optimizer, scheduler and epoch
depending on the framework.
"""
framework = get_framework(tuner.embed_model)
if framework == 'keras':
tuner.save(filepath=self._get_file_path(tuner))
state = {'epoch': tuner.state.epoch + 1}
with open(
os.path.join(self._get_file_path(tuner), 'saved_state.pkl'), 'wb'
) as f:
pickle.dump(state, f)
elif framework == 'torch':
import torch
state = {
'epoch': tuner.state.epoch + 1,
'state_dict': tuner.embed_model.state_dict(),
'optimizer': tuner._optimizer.state_dict(),
}
if tuner._scheduler and hasattr(tuner._scheduler, 'state_dict'):
state['scheduler'] = tuner._scheduler.state_dict()
torch.save(state, f=self._get_file_path(tuner))
elif framework == 'paddle':
import paddle
state = {
'epoch': tuner.state.epoch + 1,
'state_dict': tuner.embed_model.state_dict(),
'optimizer': tuner._optimizer.state_dict(),
}
if tuner._scheduler and hasattr(tuner._scheduler, 'state_dict'):
state['scheduler'] = tuner._scheduler.state_dict()
paddle.save(state, path=self._get_file_path(tuner))
self._saved_checkpoints.append(self._get_file_path(tuner))
def _get_file_path(self, tuner):
"""
Returns the file path for checkpoint.
"""
return os.path.join(
self._save_dir, f'saved_model_epoch_{tuner.state.epoch + 1:02d}'
)
@staticmethod
def load(tuner: 'BaseTuner', fp: str):
"""
Loads the model and tuner state
"""
framework = get_framework(tuner.embed_model)
if framework == 'keras':
import keras
tuner._embed_model = keras.models.load_model(fp)
with open(os.path.join(fp, 'saved_state.pkl'), 'rb') as f:
loaded_state = pickle.load(f)
tuner.state.epoch = loaded_state['epoch']
elif framework == 'torch':
import torch
checkpoint = torch.load(fp)
tuner._embed_model.load_state_dict(checkpoint['state_dict'])
tuner._optimizer.load_state_dict(checkpoint['optimizer'])
if tuner._scheduler and hasattr(tuner._scheduler, 'state_dict'):
tuner._scheduler.load_state_dict(checkpoint['scheduler'])
tuner.state.epoch = checkpoint['epoch']
elif framework == 'paddle':
import paddle
checkpoint = paddle.load(fp)
tuner._embed_model.set_state_dict(checkpoint['state_dict'])
tuner._optimizer.set_state_dict(checkpoint['optimizer'])
if tuner._scheduler and hasattr(tuner._scheduler, 'state_dict'):
tuner._scheduler.set_state_dict(checkpoint['scheduler'])
tuner.state.epoch = checkpoint['epoch']
```
#### File: tuner/paddle/__init__.py
```python
from typing import TYPE_CHECKING, Optional, Union
import paddle
from paddle import nn
from paddle.fluid.dataloader.dataloader_iter import default_collate_fn
from paddle.io import DataLoader
from paddle.optimizer import Adam, Optimizer
from paddle.optimizer.lr import LRScheduler
from ... import __default_tag_key__
from ...device import get_device_paddle, to_device_paddle
from ..base import BaseTuner
from ..state import TunerState
from . import losses
from .datasets import PaddleClassDataset, PaddleInstanceDataset, PaddleSessionDataset
if TYPE_CHECKING:
from docarray import DocumentArray
from ...helper import CollateFnType, PreprocFnType
class PaddleTuner(BaseTuner[nn.Layer, DataLoader, Optimizer, LRScheduler]):
def _get_loss(self, loss: Union[nn.Layer, str]) -> nn.Layer:
"""Get the loss layer."""
if isinstance(loss, str):
return getattr(losses, loss)()
elif isinstance(loss, nn.Layer):
return loss
def _get_data_loader(
self,
data: 'DocumentArray',
batch_size: int,
shuffle: bool,
preprocess_fn: Optional['PreprocFnType'] = None,
collate_fn: Optional['CollateFnType'] = None,
num_items_per_class: Optional[int] = None,
num_workers: int = 0,
) -> DataLoader:
"""Get the dataloader for the dataset."""
if collate_fn:
def collate_fn_all(inputs):
batch_content = collate_fn([x[0] for x in inputs])
batch_labels = default_collate_fn([x[1] for x in inputs])
return batch_content, batch_labels
else:
collate_fn_all = None
if __default_tag_key__ in data[0].tags:
dataset = PaddleClassDataset(data, preprocess_fn=preprocess_fn)
else:
if len(data[0].matches) > 0:
dataset = PaddleSessionDataset(data, preprocess_fn=preprocess_fn)
else:
dataset = PaddleInstanceDataset(data, preprocess_fn=preprocess_fn)
batch_sampler = self._get_batch_sampler(
dataset,
batch_size,
shuffle=shuffle,
num_items_per_class=num_items_per_class,
)
data_loader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn_all,
num_workers=num_workers,
)
return data_loader
def _move_model_to_device(self):
"""Move the model to device and set device."""
self.device = get_device_paddle(self._device_name)
self._embed_model.to(device=self.device)
def _default_configure_optimizer(self, model: nn.Layer) -> Optimizer:
"""Get the default optimizer (Adam), if none was provided by user."""
return Adam(parameters=model.parameters(), learning_rate=self._learning_rate)
def _train(self, data: DataLoader):
"""Train the model on the given labeled data."""
self._embed_model.train()
for idx, (inputs, labels) in enumerate(data):
# Set state variables
self.state.learning_rates['learning_rate'] = self._optimizer.get_lr()
self.state.batch_index = idx
self._trigger_callbacks('on_train_batch_begin')
inputs = to_device_paddle(inputs, self.device)
labels = to_device_paddle(labels, self.device)
embeddings = self.embed_model(inputs)
loss = self._loss(embeddings, labels)
self._optimizer.clear_grad()
loss.backward()
self._optimizer.step()
if self._scheduler_step == 'batch' and self._scheduler is not None:
self._scheduler.step()
self.state.current_loss = loss.item()
self._trigger_callbacks('on_train_batch_end')
def _eval(self, data: DataLoader):
"""Compute the validation loss on the given labeled data."""
self._embed_model.eval()
for idx, (inputs, labels) in enumerate(data):
self.state.batch_index = idx
self._trigger_callbacks('on_val_batch_begin')
inputs = to_device_paddle(inputs, self.device)
labels = to_device_paddle(labels, self.device)
embeddings = self.embed_model(inputs)
loss = self._loss(embeddings, labels)
self.state.current_loss = loss.item()
self._trigger_callbacks('on_val_batch_end')
def _fit(
self,
train_data: 'DocumentArray',
eval_data: Optional['DocumentArray'] = None,
preprocess_fn: Optional['PreprocFnType'] = None,
collate_fn: Optional['CollateFnType'] = None,
epochs: int = 10,
batch_size: int = 256,
num_items_per_class: Optional[int] = None,
num_workers: int = 0,
limit: int = 20,
distance: str = 'cosine',
):
"""Fit the model - training and evaluation."""
# Get dataloaders
train_dl = self._get_data_loader(
train_data,
batch_size=batch_size,
num_items_per_class=num_items_per_class,
shuffle=True,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_workers=num_workers,
)
if eval_data:
eval_dl = self._get_data_loader(
eval_data,
batch_size=batch_size,
num_items_per_class=num_items_per_class,
shuffle=False,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_workers=num_workers,
)
# Set state
self.state = TunerState(num_epochs=epochs)
self._trigger_callbacks('on_fit_begin')
for epoch in range(epochs):
# Setting here as re-shuffling can change number of batches
self.state.epoch = epoch
self.state.num_batches_train = len(train_dl)
self.state.batch_index = 0
self._trigger_callbacks('on_epoch_begin')
self._trigger_callbacks('on_train_epoch_begin')
self._train(train_dl)
if self._scheduler_step == 'epoch' and self._scheduler is not None:
self._scheduler.step()
self._trigger_callbacks('on_train_epoch_end')
if eval_data:
self.state.num_batches_val = len(eval_dl)
self.state.batch_index = 0
self._trigger_callbacks('on_val_begin')
self._eval(eval_dl)
self._trigger_callbacks('on_val_end')
self._trigger_callbacks('on_epoch_end')
if self.stop_training:
break
self._trigger_callbacks('on_fit_end')
def save(self, *args, **kwargs):
"""Save the embedding model.
You need to pass the path where to save the model in either ``args`` or
``kwargs`` (for ``path`` key).
:param args: Arguments to pass to ``paddle.save`` function.
:param kwargs: Keyword arguments to pass to ``paddle.save`` function.
"""
paddle.save(self.embed_model.state_dict(), *args, **kwargs)
```
#### File: tuner/pytorch/__init__.py
```python
from typing import TYPE_CHECKING, Optional, Union
import torch
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.dataloader import DataLoader
from ... import __default_tag_key__
from ...device import get_device_pytorch, to_device_pytorch
from ..base import BaseTuner
from ..state import TunerState
from . import losses
from .datasets import PytorchClassDataset, PytorchInstanceDataset, PytorchSessionDataset
if TYPE_CHECKING:
from docarray import DocumentArray
from ...helper import CollateFnType, PreprocFnType
class CollateAll:
def __init__(self, content_collate_fn: 'CollateFnType'):
self._content_collate_fn = content_collate_fn
def __call__(self, inputs):
batch_content = self._content_collate_fn([x[0] for x in inputs])
batch_labels = default_collate([x[1] for x in inputs])
return batch_content, batch_labels
class PytorchTuner(BaseTuner[nn.Module, DataLoader, Optimizer, _LRScheduler]):
def _get_loss(self, loss: Union[nn.Module, str]) -> nn.Module:
"""Get the loss layer."""
if isinstance(loss, str):
return getattr(losses, loss)()
elif isinstance(loss, nn.Module):
return loss
def _get_data_loader(
self,
data: 'DocumentArray',
batch_size: int,
shuffle: bool,
preprocess_fn: Optional['PreprocFnType'] = None,
collate_fn: Optional['CollateFnType'] = None,
num_items_per_class: Optional[int] = None,
num_workers: int = 0,
) -> DataLoader:
"""Get the dataloader for the dataset."""
if collate_fn:
collate_fn_all = CollateAll(collate_fn)
else:
collate_fn_all = None
if __default_tag_key__ in data[0].tags:
dataset = PytorchClassDataset(data, preprocess_fn=preprocess_fn)
else:
if len(data[0].matches) > 0:
dataset = PytorchSessionDataset(data, preprocess_fn=preprocess_fn)
else:
dataset = PytorchInstanceDataset(data, preprocess_fn=preprocess_fn)
batch_sampler = self._get_batch_sampler(
dataset,
batch_size,
shuffle=shuffle,
num_items_per_class=num_items_per_class,
)
data_loader = DataLoader(
dataset=dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn_all,
num_workers=num_workers,
)
return data_loader
def _move_model_to_device(self):
"""Move the model to device and set device."""
self.device = get_device_pytorch(self._device_name)
self._embed_model = self._embed_model.to(self.device)
def _default_configure_optimizer(self, model: nn.Module) -> Optimizer:
"""Get the default Adam optimizer."""
optimizer = torch.optim.Adam(model.parameters(), lr=self._learning_rate)
return optimizer
def _train(self, data: DataLoader):
"""Train the model on the given labeled data."""
self._embed_model.train()
for idx, (inputs, labels) in enumerate(data):
# Set state variables
self.state.batch_index = idx
for param_idx, param_group in enumerate(self._optimizer.param_groups):
self.state.learning_rates[f'group_{param_idx}'] = param_group['lr']
self._trigger_callbacks('on_train_batch_begin')
inputs = to_device_pytorch(inputs, self.device)
labels = to_device_pytorch(labels, self.device)
embeddings = self.embed_model(inputs)
loss = self._loss(embeddings, labels)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
if self._scheduler_step == 'batch' and self._scheduler is not None:
self._scheduler.step()
self.state.current_loss = loss.item()
self._trigger_callbacks('on_train_batch_end')
def _eval(self, data: DataLoader):
"""Compute the validation loss on the given labeled data."""
self._embed_model.eval()
for idx, (inputs, labels) in enumerate(data):
self.state.batch_index = idx
self._trigger_callbacks('on_val_batch_begin')
inputs = to_device_pytorch(inputs, self.device)
labels = to_device_pytorch(labels, self.device)
with torch.no_grad():
embeddings = self.embed_model(inputs)
loss = self._loss(embeddings, labels)
self.state.current_loss = loss.item()
self._trigger_callbacks('on_val_batch_end')
def _fit(
self,
train_data: 'DocumentArray',
eval_data: Optional['DocumentArray'] = None,
preprocess_fn: Optional['PreprocFnType'] = None,
collate_fn: Optional['CollateFnType'] = None,
epochs: int = 10,
batch_size: int = 256,
num_items_per_class: Optional[int] = None,
num_workers: int = 0,
):
"""Fit the model - training and evaluation."""
# Get dataloaders
train_dl = self._get_data_loader(
train_data,
batch_size=batch_size,
num_items_per_class=num_items_per_class,
shuffle=True,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_workers=num_workers,
)
if eval_data:
eval_dl = self._get_data_loader(
eval_data,
batch_size=batch_size,
num_items_per_class=num_items_per_class,
shuffle=False,
preprocess_fn=preprocess_fn,
collate_fn=collate_fn,
num_workers=num_workers,
)
# Set state
self.state = TunerState(num_epochs=epochs)
self._trigger_callbacks('on_fit_begin')
for epoch in range(epochs):
# Setting here as re-shuffling can change number of batches
self.state.epoch = epoch
self.state.num_batches_train = len(train_dl)
self.state.batch_index = 0
self._trigger_callbacks('on_epoch_begin')
self._trigger_callbacks('on_train_epoch_begin')
self._train(train_dl)
if self._scheduler_step == 'epoch' and self._scheduler is not None:
self._scheduler.step()
self._trigger_callbacks('on_train_epoch_end')
if eval_data:
self.state.num_batches_val = len(eval_dl)
self.state.batch_index = 0
self._trigger_callbacks('on_val_begin')
self._eval(eval_dl)
self._trigger_callbacks('on_val_end')
self._trigger_callbacks('on_epoch_end')
if self.stop_training:
break
self._trigger_callbacks('on_fit_end')
def save(self, *args, **kwargs):
"""Save the embedding model.
You need to pass the path where to save the model in either ``args`` or
``kwargs`` (for ``f`` key).
:param args: Arguments to pass to ``torch.save`` function.
:param kwargs: Keyword arguments to pass to ``torch.save`` function.
"""
torch.save(self.embed_model.state_dict(), *args, **kwargs)
```
#### File: integration/paddle/test_self_supervised_learner.py
```python
import pytest
import finetuner as ft
from finetuner.tailor.paddle.projection_head import ProjectionHead
from finetuner.tuner.augmentation import vision_preprocessor
from finetuner.tuner.paddle.losses import NTXentLoss
@pytest.fixture
def default_model():
import paddle
return paddle.vision.models.resnet50(pretrained=False)
@pytest.mark.parametrize(
"n_cls,n_epochs,loss_cls,temperature",
[
(5, 2, NTXentLoss, 0.1),
(10, 2, NTXentLoss, 0.2),
(10, 5, NTXentLoss, 1.0),
],
)
def test_self_supervised_learning(
default_model, create_easy_data_instance, n_cls, n_epochs, loss_cls, temperature
):
# Prepare model and data
data, vecs = create_easy_data_instance(n_cls)
projection_head = ProjectionHead(in_features=2048)
model = ft.fit(
model=default_model,
train_data=data,
epochs=n_epochs,
batch_size=len(data),
loss=loss_cls(temperature),
num_items_per_class=2,
learning_rate=1e-2,
preprocess_fn=vision_preprocessor(),
to_embedding_model=True,
layer_name='adaptiveavgpool2d_173',
projection_head=projection_head,
input_size=(3, 224, 224),
)
assert model
```
#### File: integration/paddle/test_tail_and_tune.py
```python
import paddle.nn as nn
import pytest
from finetuner import fit
@pytest.fixture
def embed_model():
return nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=128, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=32),
)
def test_tail_and_tune(embed_model, create_easy_data_session):
data, _ = create_easy_data_session(10, 128, 1000)
model = fit(
model=embed_model,
train_data=data,
epochs=5,
to_embedding_model=True,
input_size=(128,),
output_dim=16,
layer_name='linear_4',
)
assert model
assert model != embed_model
```
#### File: integration/torch/test_callback.py
```python
import pytest
import torch
from finetuner.tuner.pytorch import PytorchTuner
def test_basic_callback(generate_random_data, expected_results, record_callback):
train_data = generate_random_data(8, 2, 2)
eval_data = generate_random_data(4, 2, 2)
model = torch.nn.Sequential(
torch.nn.Flatten(), torch.nn.Linear(in_features=2, out_features=2)
)
# Train
tuner = PytorchTuner(model, callbacks=[record_callback])
tuner.fit(
train_data=train_data,
eval_data=eval_data,
epochs=2,
batch_size=4,
num_items_per_class=2,
)
expected_calls = [x[0] for x in expected_results]
expected_epochs = [x[1] for x in expected_results]
expected_batch_idx = [x[2] for x in expected_results]
expected_num_epochs = [x[3] for x in expected_results]
expected_num_batches_train = [x[4] for x in expected_results]
expected_num_batches_val = [x[5] for x in expected_results]
assert record_callback.calls == expected_calls
assert record_callback.epochs == expected_epochs
assert record_callback.batch_idx == expected_batch_idx
assert record_callback.num_epochs == expected_num_epochs
assert record_callback.num_batches_train == expected_num_batches_train
assert record_callback.num_batches_val == expected_num_batches_val
def test_on_exception(exception_callback, generate_random_data):
train_data = generate_random_data(8, 2, 2)
model = torch.nn.Sequential(
torch.nn.Flatten(), torch.nn.Linear(in_features=2, out_features=2)
)
ec = exception_callback(ValueError('Test'))
# Train
tuner = PytorchTuner(model, callbacks=[ec])
with pytest.raises(ValueError, match='Test'):
tuner.fit(train_data=train_data, epochs=1, batch_size=4, num_items_per_class=2)
assert ec.calls == ['on_exception']
def test_on_keyboard_interrupt(exception_callback, generate_random_data):
train_data = generate_random_data(8, 2, 2)
model = torch.nn.Sequential(
torch.nn.Flatten(), torch.nn.Linear(in_features=2, out_features=2)
)
ec = exception_callback(KeyboardInterrupt)
# Train
tuner = PytorchTuner(model, callbacks=[ec])
tuner.fit(train_data=train_data, epochs=1, batch_size=4, num_items_per_class=2)
assert ec.calls == ['on_keyboard_interrupt']
```
#### File: tailor/paddle/test_projection_head.py
```python
import paddle
import paddle.nn as nn
import pytest
from finetuner.tailor.paddle import PaddleTailor
from finetuner.tailor.paddle.projection_head import ProjectionHead
@pytest.mark.parametrize(
'in_features, output_dim, num_layers',
[(2048, 128, 3), (2048, 256, 3), (1024, 512, 5)],
)
def test_projection_head(in_features, output_dim, num_layers):
head = ProjectionHead(
in_features=in_features, output_dim=output_dim, num_layers=num_layers
)
out = head(paddle.rand([2, in_features]))
assert list(out.shape) == [2, output_dim]
def test_attach_custom_projection_head(paddle_vgg16_cnn_model):
class _BottleneckModel(nn.Layer):
def __init__(self):
super().__init__()
self._linear1 = nn.Linear(in_features=4096, out_features=1024)
self._relu1 = nn.ReLU()
self._linear2 = nn.Linear(in_features=1024, out_features=512)
self._softmax = nn.Softmax()
def forward(self, input_):
return self._softmax(self._linear2(self._relu1(self._linear1(input_))))
paddle_tailor = PaddleTailor(
model=paddle_vgg16_cnn_model,
input_size=(3, 224, 224),
input_dtype='float32',
)
tailed_model = paddle_tailor.to_embedding_model(
layer_name='linear_36', freeze=False, projection_head=_BottleneckModel()
)
out = tailed_model(paddle.rand((1, 3, 224, 224)))
assert out.shape == [1, 512]
@pytest.mark.parametrize(
'paddle_model, input_size, input_, dim_projection_head, out_dim_embed_model, input_dtype',
[
('paddle_dense_model', (128,), (2, 128), 128, 10, 'float32'),
('paddle_simple_cnn_model', (1, 28, 28), (2, 1, 28, 28), 128, 10, 'float32'),
(
'paddle_vgg16_cnn_model',
(3, 224, 224),
(2, 3, 224, 224),
128,
1000,
'float32',
),
('paddle_stacked_lstm', (128,), (2, 128), 128, 5, 'int64'),
],
indirect=['paddle_model'],
)
def test_attach_default_projection_head(
paddle_model,
input_size,
input_,
dim_projection_head,
out_dim_embed_model,
input_dtype,
):
torch_tailor = PaddleTailor(
model=paddle_model, input_size=input_size, input_dtype=input_dtype
)
tailed_model = torch_tailor.to_embedding_model(
freeze=False, projection_head=ProjectionHead(in_features=out_dim_embed_model)
)
assert tailed_model.projection_head
rand_input = paddle.cast(paddle.rand(input_), input_dtype)
out = tailed_model(rand_input)
assert list(out.shape) == [2, dim_projection_head]
```
#### File: tests/unit/test_embedding.py
```python
import numpy as np
import paddle
import pytest
import tensorflow as tf
import torch
from docarray import Document, DocumentArray
from finetuner.embedding import embed
from finetuner.toydata import generate_fashion
embed_models = {
'keras': lambda: tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(32),
]
),
'pytorch': lambda: torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(
in_features=28 * 28,
out_features=128,
),
torch.nn.ReLU(),
torch.nn.Linear(in_features=128, out_features=32),
),
'paddle': lambda: paddle.nn.Sequential(
paddle.nn.Flatten(),
paddle.nn.Linear(
in_features=28 * 28,
out_features=128,
),
paddle.nn.ReLU(),
paddle.nn.Linear(in_features=128, out_features=32),
),
}
random_embed_models = {
'keras': lambda: tf.keras.Sequential(
[tf.keras.layers.Dropout(0.5), tf.keras.layers.BatchNormalization()]
),
'pytorch': lambda: torch.nn.Sequential(
torch.nn.Dropout(0.5), torch.nn.BatchNorm1d(128)
),
'paddle': lambda: paddle.nn.Sequential(
paddle.nn.Dropout(0.5), paddle.nn.BatchNorm1D(128)
),
}
@pytest.mark.parametrize('framework', ['keras', 'pytorch', 'paddle'])
def test_embedding_on_random_network(framework):
docs = DocumentArray([Document() for _ in range(2)])
docs.tensors = np.random.random([2, 128]).astype(np.float32)
embed_model = random_embed_models[framework]()
embed(docs, embed_model)
embed1 = docs.embeddings.copy()
# reset
docs.embeddings = np.random.random([2, 128]).astype(np.float32)
# try it again, it should yield the same result
embed(docs, embed_model)
np.testing.assert_array_almost_equal(docs.embeddings, embed1)
# reset
docs.embeddings = np.random.random([2, 128]).astype(np.float32)
# now do this one by one
embed(docs[:1], embed_model)
embed(docs[-1:], embed_model)
np.testing.assert_array_almost_equal(docs.embeddings, embed1)
@pytest.mark.parametrize('framework', ['keras', 'pytorch', 'paddle'])
def test_set_embeddings(framework, tmpdir):
# works for DA
embed_model = embed_models[framework]()
docs = DocumentArray(generate_fashion(num_total=100))
embed(docs, embed_model)
assert docs.embeddings.shape == (100, 32)
```
#### File: tuner/dataset/test_class_sampler.py
```python
from collections import Counter
import pytest
from finetuner.tuner.dataset.samplers import ClassSampler
@pytest.mark.parametrize("batch_size", [-1, 0])
def test_wrong_batch_size(batch_size: int):
with pytest.raises(ValueError, match="batch_size"):
ClassSampler([0, 1], batch_size, 1)
@pytest.mark.parametrize("num_items_per_class", [-1, 0])
def test_wrong_num_items_per_class(num_items_per_class: int):
with pytest.raises(ValueError, match="num_items_per_class"):
ClassSampler([0, 1], 1, num_items_per_class)
def test_normal_case():
labels = [1, 1, 2, 2, 3, 3, 4, 4]
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert len(batch) == 4
assert i + 1 == 2
assert set(all_inds) == set(range(8))
def test_classes_in_batch():
labels = []
for i in range(50):
labels += [i] * 20
for i in range(50, 100):
labels += [i] * 19 # Mini repeating test as well
class_to_label = {}
for idx, label in enumerate(labels):
class_to_label[idx] = label
sampler = ClassSampler(labels, 20, 5)
assert len(sampler) >= 98
for i, batch in enumerate(sampler):
c = Counter([class_to_label[element] for element in batch])
assert len(c) == 4
for val in c.values():
assert val == 5
assert i + 1 >= 98 # Best we can hope for
def test_almost_full_coverage():
"""Check that almost all items get covered in one epoch"""
labels = []
for i in range(100):
labels += [i] * 20
sampler = ClassSampler(labels, 20, 5)
assert len(sampler) >= 98
c = Counter()
for i, batch in enumerate(sampler):
c.update(batch)
assert i + 1 >= 98 # Best we can hope for
assert set(c).issubset(range(100 * 20))
assert c.most_common(1)[0][1] == 1
def test_label_repetition1():
"""Test that elements from class get repeated to fill the batch"""
labels = [1, 1, 1, 2, 2]
sampler = ClassSampler(labels, 6, 3)
assert len(sampler) == 1
all_inds = []
for batch in sampler:
all_inds += batch
assert len(batch) == 6
c = Counter(all_inds)
assert c[3] >= 1
assert c[4] >= 1
assert c[3] + c[4] == 3
@pytest.mark.parametrize('num_items_per_class', [4, 2])
def test_label_repetition2(num_items_per_class):
labels = [1, 1, 1, 1, 2, 2, 2]
sampler = ClassSampler(labels, 4, num_items_per_class)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert len(batch) == 4
assert i + 1 == 2
c = Counter(all_inds)
assert c[4] >= 1
assert c[5] >= 1
assert c[6] >= 1
assert c[6] + c[5] + c[4] == 4
def test_cutoff1():
"""Cutoff due to last batch being < batch_size"""
labels = [1, 1, 1, 1, 2, 2]
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 1
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert i + 1 == 1
# Make sure the first class got cut off
c = Counter(all_inds)
assert c[0] + c[1] + c[2] + c[3] == 2
def test_cutoff2():
"""Cutoff due to last batch only containing one class"""
labels = [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
class_to_label = {}
for idx, label in enumerate(labels):
class_to_label[idx] = label
sampler = ClassSampler(labels, 4, 2)
assert len(sampler) == 2
all_inds = []
for i, batch in enumerate(sampler):
all_inds += batch
assert i + 1 == 2
# Make sure that most common items are cut off
c = Counter([class_to_label[label] for label in all_inds])
assert c[1] == 4
assert c[2] == 4
```
#### File: tuner/keras/test_model_checkpoint.py
```python
import copy
import os
import time
import keras
import pytest
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import CosineDecay
from finetuner.tuner.base import BaseTuner
from finetuner.tuner.callback import BestModelCheckpoint, TrainingCheckpoint
from finetuner.tuner.keras import KerasTuner
from finetuner.tuner.state import TunerState
@pytest.fixture(scope='module')
def keras_model() -> BaseTuner:
embed_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=()),
tf.keras.layers.Dense(10, activation='relu'),
]
)
return embed_model
def test_save_on_every_epoch_end(keras_model: BaseTuner, tmpdir):
checkpoint = TrainingCheckpoint(save_dir=tmpdir)
tuner = KerasTuner(embed_model=keras_model)
tuner.state = TunerState(epoch=0, batch_index=2, current_loss=1.1)
checkpoint.on_epoch_end(tuner)
assert os.listdir(tmpdir) == ['saved_model_epoch_01']
tuner.state = TunerState(epoch=1, batch_index=2, current_loss=0.5)
checkpoint.on_epoch_end(tuner)
assert os.listdir(tmpdir) == ['saved_model_epoch_02']
def test_same_model(keras_model: BaseTuner, tmpdir):
tuner = KerasTuner(keras_model)
checkpoint = TrainingCheckpoint(save_dir=tmpdir)
tuner.state = TunerState(epoch=1, batch_index=2, current_loss=1.1)
checkpoint.on_epoch_end(tuner)
new_model = keras.models.load_model(os.path.join(tmpdir, 'saved_model_epoch_02'))
for l1, l2 in zip(new_model.layers, keras_model.layers):
assert l1.get_config() == l2.get_config()
assert len(l1.weights) == len(l2.weights)
for idx in range(len(l1.weights)):
assert (l1.get_weights()[idx] == l2.get_weights()[idx]).all()
def test_load_model(keras_model: BaseTuner, tmpdir):
def get_optimizer_and_scheduler(embdding_model):
opt = tf.keras.optimizers.Adam(learning_rate=0.1)
scheduler = CosineDecay(initial_learning_rate=0.1, decay_steps=2)
return (opt, scheduler)
def get_optimizer_and_scheduler_different_parameters(embdding_model):
opt = tf.keras.optimizers.Adam(learning_rate=0.01)
scheduler = CosineDecay(initial_learning_rate=0.01, decay_steps=3)
return (opt, scheduler)
new_model = copy.deepcopy(keras_model)
before_stop_tuner = KerasTuner(
keras_model, configure_optimizer=get_optimizer_and_scheduler
)
before_stop_tuner.state = TunerState(epoch=10, batch_index=2, current_loss=1.1)
after_stop_tuner = KerasTuner(
new_model, configure_optimizer=get_optimizer_and_scheduler_different_parameters
)
after_stop_tuner.state = TunerState(epoch=0, batch_index=2, current_loss=1.1)
checkpoint = TrainingCheckpoint(save_dir=tmpdir)
checkpoint.on_epoch_end(before_stop_tuner)
checkpoint.load(after_stop_tuner, os.path.join(tmpdir, 'saved_model_epoch_11'))
assert after_stop_tuner.state.epoch == 11
for l1, l2 in zip(
before_stop_tuner.embed_model.layers, after_stop_tuner.embed_model.layers
):
assert l1.get_config() == l2.get_config()
assert len(l1.weights) == len(l2.weights)
for idx in range(len(l1.weights)):
assert (l1.get_weights()[idx] == l2.get_weights()[idx]).all()
def test_save_best_only(keras_model: BaseTuner, tmpdir):
checkpoint = BestModelCheckpoint(save_dir=tmpdir, monitor='train_loss')
tuner = KerasTuner(embed_model=keras_model)
tuner.state = TunerState(epoch=0, batch_index=2, current_loss=1.1)
checkpoint.on_train_batch_end(tuner)
checkpoint.on_epoch_end(tuner)
assert os.listdir(tmpdir) == ['best_model_train_loss']
creation_time = os.path.getmtime(os.path.join(tmpdir, 'best_model_train_loss'))
tuner.state = TunerState(epoch=1, batch_index=2, current_loss=1.5)
checkpoint.on_train_batch_end(tuner)
checkpoint.on_epoch_end(tuner)
assert creation_time == os.path.getmtime(
os.path.join(tmpdir, 'best_model_train_loss')
)
tuner.state = TunerState(epoch=2, batch_index=2, current_loss=0.5)
time.sleep(2)
checkpoint.on_train_batch_end(tuner)
checkpoint.on_epoch_end(tuner)
assert creation_time < os.path.getmtime(
os.path.join(tmpdir, 'best_model_train_loss')
)
def test_load_best_model(keras_model: BaseTuner, tmpdir):
new_model = copy.deepcopy(keras_model)
checkpoint = BestModelCheckpoint(tmpdir)
before_tuner = KerasTuner(embed_model=keras_model)
before_tuner.state = TunerState(epoch=0, batch_index=2, current_loss=1.1)
checkpoint.on_val_batch_end(before_tuner)
checkpoint.on_epoch_end(before_tuner)
after_tuner = KerasTuner(embed_model=new_model)
after_tuner.state = TunerState(epoch=1, batch_index=2, current_loss=0)
assert os.listdir(tmpdir) == ['best_model_val_loss']
checkpoint.load(after_tuner, fp=os.path.join(tmpdir, 'best_model_val_loss'))
for l1, l2 in zip(after_tuner.embed_model.layers, before_tuner.embed_model.layers):
assert l1.get_config() == l2.get_config()
assert len(l1.weights) == len(l2.weights)
for idx in range(len(l1.weights)):
assert (l1.get_weights()[idx] == l2.get_weights()[idx]).all()
```
#### File: tuner/pytorch/test_gpu.py
```python
import pytest
import torch
import torch.nn as nn
from docarray import DocumentArray
from finetuner.embedding import embed
from finetuner.toydata import generate_fashion
from finetuner.tuner.pytorch import PytorchTuner
all_test_losses = ['SiameseLoss', 'TripletLoss']
@pytest.mark.gpu
@pytest.mark.parametrize('loss', all_test_losses)
def test_gpu(generate_random_data, loss):
data = generate_random_data(40, 4)
embed_model = torch.nn.Sequential(torch.nn.Linear(in_features=4, out_features=4))
tuner = PytorchTuner(embed_model, loss, device='cuda')
# Run quick training - mainly makes sure no errors appear, and that the model is
# moved to GPU
tuner.fit(data, data, epochs=2, batch_size=8)
# Test the model was moved (by checking one of its parameters)
assert next(embed_model.parameters()).device.type == 'cuda'
@pytest.mark.gpu
@pytest.mark.parametrize('loss', all_test_losses)
def test_gpu_session(generate_random_session_data, loss):
data = generate_random_session_data(40, 4)
embed_model = torch.nn.Sequential(torch.nn.Linear(in_features=4, out_features=4))
tuner = PytorchTuner(embed_model, loss, device='cuda')
# Run quick training - mainly makes sure no errors appear, and that the model is
# moved to GPU
tuner.fit(data, data, epochs=2, batch_size=9)
# Test the model was moved (by checking one of its parameters)
assert next(embed_model.parameters()).device.type == 'cuda'
@pytest.mark.gpu
def test_set_embeddings_gpu(tmpdir):
# works for DA
embed_model = nn.Sequential(
nn.Flatten(),
nn.Linear(
in_features=28 * 28,
out_features=128,
),
nn.ReLU(),
nn.Linear(in_features=128, out_features=32),
)
docs = DocumentArray(generate_fashion(num_total=100))
embed(docs, embed_model, 'cuda')
assert docs.embeddings.shape == (100, 32)
``` |
{
"source": "jina-ai/hub-builder",
"score": 2
} |
#### File: jina-ai/hub-builder/app.py
```python
import json
import os
import pathlib
from jina.docker.hubio import HubIO
from jina.helper import get_now_timestamp, get_full_version
from jina.logging import default_logger
from jina.parsers.hub import set_hub_build_parser
def get_parser():
parser = set_hub_build_parser()
parser.add_argument('--fail-fast', action='store_true', default=False,
help='when set to true, cancels all build jobs if any one fails.')
parser.add_argument('--summary', type=str, default=f'build-{get_now_timestamp()}.json',
help='path of the build summary')
return parser
def main(args):
all_targets = list(
set(os.path.abspath(p.parent) for p in pathlib.Path(args.path).absolute().glob('**/manifest.yml')))
all_targets.sort()
default_logger.info(f'{len(all_targets)} targets to build')
info, env_info = get_full_version()
import docker
client = docker.APIClient(base_url='unix://var/run/docker.sock')
summary = {
'builder_args': vars(args),
'num_tasks': len(all_targets),
'start_time': get_now_timestamp(),
'host_info': {
'jina': info,
'jina_envs': env_info,
'docker': client.info(),
},
'tasks': []
}
for t in all_targets:
args.path = t
args.pull = True
args.test_uses = True
s = HubIO(args).build()
s['path'] = t
summary['tasks'].append(s)
if not s['is_build_success']:
default_logger.error(f'❌ {t} fails to build')
if args.fail_fast:
break
else:
default_logger.success(f'✅ {t} is successfully built!')
with open(args.summary, 'w') as fp:
json.dump(summary, fp)
failed = [t for t in summary['tasks'] if not t['is_build_success']]
if failed:
default_logger.warning(f'{len(failed)}/{len(all_targets)} failed to build')
for t in failed:
default_logger.error(f'{t["path"]}\t{t["exception"]}')
if __name__ == '__main__':
a = get_parser().parse_args()
main(a)
``` |
{
"source": "jina-ai/hub-updater-action",
"score": 2
} |
#### File: jina-ai/hub-updater-action/hub_updater.py
```python
import glob
import os
import sys
import time
import traceback
from typing import List, Optional, Tuple, Dict
import git
import github
import pkg_resources
import requests
import semver
from github import Github, Repository
from github.Issue import Issue
from github.PullRequest import PullRequest
from ruamel.yaml import YAML
WAIT_BETWEEN_PR_CHECKS = 5 * 60
TIME_WAIT_PR_CREATE = 30
FIX_MODULE_TEMPLATE = 'fix module '
COMPARISON_TYPES = ["major", "minor", "patch"]
TAG_IN_ISSUES = os.environ.get('TAG_IN_ISSUES', '')
MODULES_REPO = os.environ.get('MODULES_REPO')
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
COMPARISON_LEVEL = os.environ['COMPARISON_LEVEL']
TEST_AGAIN = os.getenv('TEST_AGAIN').lower() == 'true'
FORCE_RECHECK_PR = os.getenv('FORCE_RECHECK_PR').lower() == 'true'
print(f'TEST_AGAIN = {TEST_AGAIN}')
print(f'FORCE_RECHECK_PR = {FORCE_RECHECK_PR}')
if MODULES_REPO is None:
print(f'Error: MODULES_REPO needs to be set. Exiting...')
sys.exit(1)
if GITHUB_TOKEN is None:
print(f'Error: GITHUB_TOKEN needs to be set. Exiting...')
sys.exit(1)
if COMPARISON_LEVEL is None:
print(f'Error: COMPARISON_LEVEL needs to be set. Exiting...')
sys.exit(1)
if COMPARISON_LEVEL not in COMPARISON_TYPES:
print(f'Error: COMPARISON_LEVEL needs to be one of {COMPARISON_TYPES}')
sys.exit(1)
GITHUB_API_HEADERS = {
f'Authorization': f'token {GITHUB_TOKEN}',
'Accept': 'application/vnd.github.v3+json'
}
# this one has PR push access
g = Github(GITHUB_TOKEN)
print(f'rate limit status: {g.get_rate_limit()}')
yaml = YAML()
def get_pr_from_gh(pr_name, all_prs):
"""try to obtain existing open PR with name in GH"""
prs = [
pr for pr in all_prs
if pr_name in pr.title
]
if len(prs) > 1:
print(f'Warning: Too many PRs matched query "{pr_name}": {[p.html_url for p in prs]}. Returning first.')
return prs[0]
elif len(prs) == 1:
pr = prs[0]
print(f'Found existing PR for {pr_name}: {pr.html_url}')
return prs[0]
else:
print(f'Couldn\'t retrieve PR for module version and jina version. Will create...')
return None
def check_pr_is_valid(pr, module, module_version, jina_core_version) -> Optional[PullRequest]:
"""Check whether PR is open and valid to be re-checked.
Otherwise, open a new one on the new version to be tested.
Could also be that we force the re-check of a closed PR that matches the version
:param pr: the PR object
:param module: the directory name
:param module_version: version of the module
:param jina_core_version: the new Jina core version to check
:return: PR, if to be re-checked
"""
print(f'PR found for {module} on Jina core v{jina_core_version} ({pr.html_url}) ...', )
if FORCE_RECHECK_PR:
print('Will rename as [old] and try again...')
pr.edit(
title=f'[old] {pr.title}'
)
else:
if pr.state == 'open':
# something must've stopped us from closing it
# make sure we close it
print('PR was open. Will handle now...')
return pr
else:
print('Warning: Module has already been tested. Skipping...')
def create_pr(manifest_path, requirements_path, module, jina_core_version, hub_repo, hub_origin, gh_hub_repo,
all_prs) -> Optional[PullRequest]:
"""for each module with manifest.yml attempts to open a PR for testing specific jina version returns None (if no
need to open new PR), old PR (if found and 'open'), new PR (if versions haven't been tested before)
"""
# noinspection PyTypeChecker
pr = None
timestamp = time.time()
print(f'handling {module}...')
module_version = None
with open(manifest_path) as fp:
info = yaml.load(fp)
module_version = info['version']
# in order to trigger a PR we make a 'dummy' change to the timestamp
info['timestamp'] = timestamp
# means this module version + jina version has been tested before
# NOTE: DO NOT MODIFY THIS AS IT'S NEEDED FOR SEARCHING ON GITHUB
pr_name = build_pr_name(jina_core_version, module, module_version)
pr: PullRequest = get_pr_from_gh(pr_name, all_prs)
if pr and check_pr_is_valid(pr, module, module_version, jina_core_version):
return pr
with open(manifest_path, 'w') as fp:
yaml.dump(info, fp)
if os.path.exists(requirements_path):
new_requirements = []
update = False
with open(requirements_path, 'r') as fp:
requirements = pkg_resources.parse_requirements(fp)
for req in requirements:
if 'jina' in str(req):
update = True
new_requirements.append(f'jina=={jina_core_version}')
else:
new_requirements.append(str(req))
if update:
with open(requirements_path, 'w') as fp:
fp.write('\n'.join(new_requirements))
br_name = ''
try:
print('preparing the branch ...')
br_name = f'chore-{module.lower()}-{module_version}-core-{jina_core_version.replace(".", "-")}'
# delete any existing branch if exists
# if we don't find the PR (above), then it must've been
# renamed (for a good reason)
delete_remote_branch(br_name, expect_exists=False)
new_branch = hub_repo.create_head(br_name)
new_branch.checkout()
print(f'bumping version for timestamp {timestamp} and committing to {new_branch}...')
hub_repo.git.add(update=True)
# NOTE limited to 72 characters by commit lint
hub_repo.index.commit(f'chore: bump {module}')
hub_repo.git.push('--set-upstream', hub_origin, hub_repo.head.ref)
print('making a PR ...')
body_string = f'Due to the release of jina core v{jina_core_version}, this draft PR is created in order to ' \
f'trigger an automatic build & push of the module '
pr = gh_hub_repo.create_pull(
title=pr_name,
body=body_string,
head=br_name,
base='master',
draft=True
)
except github.GithubException as e:
print('caught GH exception')
print(f'Error: {repr(e), type(e), e.data.get("message")}')
print(f'Retry limit reached? {g.get_rate_limit()}')
except Exception as e:
print(f'Error: {repr(e), type(e), e.data.get("message")}')
print(f'Retry limit reached? {g.get_rate_limit()}')
raise e
finally:
hub_repo.git.checkout('master')
if br_name:
hub_repo.delete_head(br_name, force=True)
return pr
def build_pr_name(jina_core_version, module, module_version):
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {jina_core_version}'
if COMPARISON_LEVEL == 'major':
version = semver.parse(jina_core_version)
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {version["major"]}.'
elif COMPARISON_LEVEL == 'minor':
version = semver.parse(jina_core_version)
pr_name = f'chore: testing/building {module} ({module_version}) on new jina core: {version["major"]}.{version["minor"]}.'
return pr_name
def all_checks_passed(runs: Optional[List[Dict]]) -> Optional[bool]:
"""
check whether all checks from a PR head ref have completed and passed
"""
if runs is None:
return None
for c in runs:
if c['status'] == 'completed':
if c['conclusion'] == 'failure':
return False
else:
return None
return True
def get_checks_for_pr(sha) -> Optional[List[Dict]]:
result = requests.get(
f'https://api.github.com/repos/{MODULES_REPO}/commits/{sha}/check-runs',
headers=GITHUB_API_HEADERS
)
checks = result.json()
if 'check_runs' not in checks.keys():
print(f'Warning: "check_runs" not in PR JSON from Github API. Will retry...')
return None
pr_checks = checks['check_runs']
print(
f'Got {len(pr_checks)} runs to check for PR: \n{[(r["name"], r["status"], r["conclusion"]) for r in pr_checks]}')
return pr_checks
def open_issue(pr, pr_checks, hub_repo: Repository, module, jina_core_version):
"""opens an issue for the PR with the failed checks (if not already open)"""
issue_name = f'{FIX_MODULE_TEMPLATE}{module}'
existing_issue_for_pr = [
i for i in list(hub_repo.get_issues(state='open'))
if i.title == issue_name
]
if len(existing_issue_for_pr) > 0:
print(f'Found existing issue: {existing_issue_for_pr}')
return existing_issue_for_pr[0]
else:
# open the issue
body = f"""
**[This is an automated issue opened as part of the hub modules update GH action. DO NOT EDIT THIS DESCRIPTION]**
Could not build module {module} for Jina core version {jina_core_version} because some of the checks failed:
```
{[(c['name'], c['status'], c['conclusion']) for c in pr_checks]}
```
See {pr.html_url} for more info. {TAG_IN_ISSUES}
"""
issue = hub_repo.create_issue(
title=issue_name,
body=body,
)
print(f'opened issue at {issue.html_url}')
return issue
def delete_remote_branch(br_name, expect_exists=True):
req = requests.delete(f'https://api.github.com/repos/{MODULES_REPO}/git/refs/heads/{br_name}',
headers=GITHUB_API_HEADERS)
if req.status_code == 204:
if expect_exists:
print(f'branch {br_name} deleted')
else:
print(f'Warning: {br_name} existed and was deleted')
elif expect_exists:
print(f'WARNING: Output from attempting to delete branch. code {req.status_code}, json: {req.json()}')
return
def close_related_issue(pr, hub_repo, module):
issue_name = f'{FIX_MODULE_TEMPLATE}{module}'
existing_issues_for_pr: List[Issue] = [
i for i in list(hub_repo.get_issues(state='open'))
if i.title == issue_name
]
if len(existing_issues_for_pr) > 0:
print(f'Found existing issue: {existing_issues_for_pr}')
if len(existing_issues_for_pr) > 1:
print(f'Warning: Found too many matching issues. Will close them all.')
for i in existing_issues_for_pr:
i.create_comment(f'Closing issue as build succeeded on PR {pr.html_url}')
i.edit(state='closed')
return
def handle_prs(prs_modules: List[Tuple[PullRequest, str]], hub_repo, jina_core_version):
"""
traverses list of open PRs. Confirms whether checks have passed or not. If not, opens issues
"""
# noinspection PyBroadException
if len(prs_modules) == 0:
return
# noinspection PyBroadException
try:
# allow for checks to be initiated. It's not instantaneous
print(f'waiting for 2 mins. before continuing...')
time.sleep(120)
new_prs = []
while len(prs_modules) > 0:
for i, pr_module in enumerate(prs_modules):
print(f'rate limit status: {g.get_rate_limit()}')
pr = None
try:
pr = pr_module[0]
module = pr_module[1]
print(f'Checking PR {pr} ( {pr.html_url} )...')
br_name = pr.head.ref
last_commit = sorted(list(pr.get_commits()), key=lambda t: t.commit.author.date)[-1]
sha = last_commit.sha
pr_checks: Optional[List[Dict]] = get_checks_for_pr(sha)
checks_passed = all_checks_passed(pr_checks)
if checks_passed is None:
print(f'Not all checks have completed for {br_name}. Skipping and will attempt later...')
new_prs.append((pr, module))
else:
if checks_passed:
print(f'All checks completed and passed for {br_name}. Commenting and closing...')
pr.create_issue_comment(
f'Automatic build successful. Image has been built and deployed.'
)
# close any open issues related to this module using the template
close_related_issue(pr, hub_repo, module)
else:
print(
f'warning: not all checks have passed for {br_name}. '
f'Will open issue and abandon trying.')
issue = open_issue(pr, pr_checks, hub_repo, module, jina_core_version)
pr.create_issue_comment(
f'Automatic build failed in this PR and we '
f'have opened an issue here: {issue.html_url}. '
f'Closing this PR. '
)
pr.edit(state='closed')
delete_remote_branch(br_name, expect_exists=True)
except Exception as e:
print(f'Error handling pr {pr}. Error: {repr(e)}')
# starting the checking process again on the subset
# of PRs that had not yet completed
prs_modules = new_prs
print(f'Have {len(prs_modules)} PRs left to check')
if len(prs_modules) > 0:
print(f'waiting for {WAIT_BETWEEN_PR_CHECKS // 60} mins. before continuing...')
time.sleep(WAIT_BETWEEN_PR_CHECKS)
new_prs = []
print('Done!')
except Exception:
print(f'Error occurred: {traceback.format_exc()}')
def get_issues_for_modules(hub_repo: Repository):
issues: List[Issue] = list(hub_repo.get_issues(
state='open'
))
issues = [
i for i in issues if FIX_MODULE_TEMPLATE in i.title
]
names = [
i.title.split(FIX_MODULE_TEMPLATE)[-1]
for i in issues
]
print(f'found {len(names)} modules to be fixed: {names}')
return names
def get_jina_version() -> str:
"""gets stable version of jina"""
response = requests.get('https://api.jina.ai/latest').json()
return response['version']
def main():
modules_dir = 'hub_repo'
hub_repo = git.Repo(modules_dir)
hub_origin = hub_repo.remote(name='origin')
gh_hub_repo: Repository = g.get_repo(MODULES_REPO)
print(f'initiated modules repo: {hub_repo} with GitHub repo {gh_hub_repo} (origin: {hub_origin})')
jina_core_version = get_jina_version()
print(f'got jina core v: "{jina_core_version}"')
print(f'cur. dir. is "{os.getcwd()}"')
modules = glob.glob(f'{modules_dir}/**/manifest.yml', recursive=True)
print(f'got {len(modules)} modules to update')
prs: List[Tuple[PullRequest, str]] = []
# filter those for which there is an open issue
# template 'fix module modulename' (ex. 'fix module DeepSegmenter')
to_be_fixed = get_issues_for_modules(gh_hub_repo)
# traverse list of modules in jina-hub
all_prs = list(gh_hub_repo.get_pulls(state='all'))
for manifest_path in modules:
print(f'rate limit status: {g.get_rate_limit()}')
module = manifest_path.split('/')[-2]
requirements_path = os.path.join(os.path.dirname(manifest_path), 'requirements.txt')
if not TEST_AGAIN and module in to_be_fixed:
print(f'skipping {module} as there is an open issue for it...')
else:
pr = create_pr(manifest_path, requirements_path, module, jina_core_version, hub_repo, hub_origin,
gh_hub_repo, all_prs)
print(f'Waiting {TIME_WAIT_PR_CREATE} secs. to avoid triggering abuse flagging system...')
time.sleep(TIME_WAIT_PR_CREATE)
if pr:
prs.append((pr, module))
handle_prs(prs, gh_hub_repo, jina_core_version)
if __name__ == '__main__':
main()
``` |
{
"source": "jina-ai/jinad",
"score": 2
} |
#### File: api/endpoints/pod.py
```python
import uuid
from typing import Dict, List, Union
from fastapi import status, APIRouter, File, UploadFile
from jina.logging import JinaLogger
from jinad.store import pod_store
from jinad.models import SinglePodModel, ParallelPodModel
from jinad.excepts import HTTPException, PodStartException
from jinad.helper import pod_to_namespace, create_meta_files_from_upload
logger = JinaLogger(context='👻 PODAPI')
router = APIRouter()
@router.put(
path='/upload',
summary='Upload pod context yamls & pymodules',
)
async def _upload(
uses_files: List[UploadFile] = File(()),
pymodules_files: List[UploadFile] = File(())
):
"""
"""
upload_status = 'nothing to upload'
if uses_files:
[create_meta_files_from_upload(current_file) for current_file in uses_files]
upload_status = 'uploaded'
if pymodules_files:
[create_meta_files_from_upload(current_file) for current_file in pymodules_files]
upload_status = 'uploaded'
return {
'status_code': status.HTTP_200_OK,
'status': upload_status
}
@router.put(
path='/pod',
summary='Create a Pod via Flow or CLI',
)
async def _create(
pod_arguments: Union[SinglePodModel, ParallelPodModel]
):
"""This is used to create a remote Pod which gets triggered either in a Flow context or via CLI
Args: pod_arguments (SinglePodModel or RemotePodModel)
"""
pod_arguments = pod_to_namespace(args=pod_arguments)
with pod_store._session():
try:
pod_id = pod_store._create(pod_arguments=pod_arguments)
except PodStartException as e:
raise HTTPException(status_code=404,
detail=f'Pod couldn\'t get started: {repr(e)}')
except Exception as e:
logger.error(f'Got an error while creating a pod {repr(e)}')
raise HTTPException(status_code=404,
detail=f'Something went wrong')
return {
'status_code': status.HTTP_200_OK,
'pod_id': pod_id,
'status': 'started'
}
@router.delete(
path='/pod',
summary='Delete pod',
)
async def _delete(
pod_id: uuid.UUID
):
"""Close Pod context
"""
with pod_store._session():
try:
pod_store._delete(pod_id=pod_id)
return {
'status_code': status.HTTP_200_OK
}
except KeyError:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f'Pod ID {pod_id} not found! Please create a new Pod')
@router.on_event('shutdown')
def _shutdown():
with pod_store._session():
pod_store._delete_all()
```
#### File: jinad/jinad/config.py
```python
from . import __version__, __prefix__
from pydantic import BaseSettings, validator
class BaseConfig(BaseSettings):
class Config:
env_prefix = 'JINAD_'
class FastAPIConfig(BaseConfig):
NAME: str = 'Jina Remote Manager'
DESCRIPTION: str = 'REST API for managing Jina on Remote'
VERSION: str = __version__
PREFIX: str = '/' + __prefix__
class OpenAPITags(BaseConfig):
API_TAGS: list = [{
"name": "Jina Remote Management",
"description": "API to invoke remote Flows/Pods/Peas",
"externalDocs": {
"description": "Jina Remote Context Manager",
"url": "https://docs.jina.ai/",
},
}]
FLOW_API_TAGS: list = [{
"name": "Remote Flow Manager",
"description": "API to invoke local/remote Flows",
"externalDocs": {
"description": "Jina Flow Context Manager",
"url": "https://docs.jina.ai/chapters/flow/index.html",
},
}]
POD_API_TAGS: list = [{
"name": "Remote Pod Manager",
"description": "API to invoke remote Pods (__should be used by Flow APIs only__)",
"externalDocs": {
"description": "Jina 101",
"url": "https://docs.jina.ai/chapters/101/.sphinx.html",
},
}]
PEA_API_TAGS: list = [{
"name": "Remote Pea Manager",
"description": "API to invoke remote Peas",
"externalDocs": {
"description": "Jina 101",
"url": "https://docs.jina.ai/chapters/101/.sphinx.html",
},
}]
LOG_API_TAGS: list = [{
"name": "logs",
"description": "Endpoint to get streaming logs from flows/pods",
}]
class ServerConfig(BaseConfig):
# TODO: check if HOST can be a ipaddress.IPv4Address
HOST: str = '0.0.0.0'
PORT: int = 8000
class JinaDConfig(BaseConfig):
CONTEXT: str = 'all'
@validator('CONTEXT')
def validate_name(cls, value):
if value.lower() not in ['all', 'flow', 'pod', 'pea']:
raise ValueError('CONTEXT must be either all, flow or pod or pea')
return value.lower()
class LogConfig(BaseConfig):
# TODO: Read config from some file
PATH: str = '/tmp/jina-log/%s/log.log'
jinad_config = JinaDConfig()
log_config = LogConfig()
fastapi_config = FastAPIConfig()
server_config = ServerConfig()
openapitags_config = OpenAPITags()
```
#### File: jinad/models/custom.py
```python
import requests
import argparse
from typing import Union
from pydantic import create_model, validator, Field
JINA_API_URL = 'https://api.jina.ai/latest'
def get_latest_api():
"""Fetches the latest jina cli args"""
response = requests.get(JINA_API_URL)
all_cli_args = response.json()
return all_cli_args
def get_module_args(all_args: list, module: str):
"""Fetches the cli args for modules like `flow`, `pod`"""
for current_module in all_args['methods']:
if current_module['name'] == module:
module_args = current_module
return module_args
def generate_validator(field: str, choices: list):
""" Pydantic validator classmethod generator to validate fields exist in choices """
def validate_arg_choices(v, values):
if v not in choices:
raise ValueError(f'Invalid value {v} for field {field}'
f'Valid choices are {choices}')
return v
validate_arg_choices.__qualname__ = 'validate_' + field
return validator(field, allow_reuse=True)(validate_arg_choices)
def get_pydantic_fields(config: Union[dict, argparse.ArgumentParser]):
all_options = {}
choices_validators = {}
if isinstance(config, dict):
for arg in config['options']:
arg_key = arg['name']
arg_type = arg['type']
if arg['choices']:
choices_validators[f'validator_for_{arg_key}'] = generate_validator(field=arg_key,
choices=arg['choices'])
if arg_type == 'method':
arg_type = type(arg['default']) if arg['default'] else int
arg_type = 'str' if arg_type == 'FileType' else arg_type
current_field = Field(default=arg['default'],
example=arg['default'],
description=arg['help'])
all_options[arg_key] = (arg_type, current_field)
# TODO(Deepankar): possible refactoring to `jina.api_to_dict()`
if isinstance(config, argparse.ArgumentParser):
# Ignoring first 3 as they're generic args
from jina.parsers.helper import KVAppendAction
for arg in config._actions[3:]:
arg_key = arg.dest
arg_type = arg.type
if arg.choices:
choices_validators[f'validator_for_{arg_key}'] = generate_validator(field=arg_key,
choices=arg.choices)
# This is to handle the Enum args (to check if it is a bound method)
if hasattr(arg_type, '__self__'):
arg_type = type(arg.default) if arg.default else int
arg_type = str if isinstance(arg_type, argparse.FileType) else arg_type
arg_type = dict if type(arg) == KVAppendAction else arg_type
current_field = Field(default=arg.default,
example=arg.default,
description=arg.help)
all_options[arg_key] = (arg_type, current_field)
return all_options, choices_validators
class PydanticConfig:
arbitrary_types_allowed = True
def build_pydantic_model(kind: str = 'local',
model_name: str = 'CustomModel',
module: str = 'pod'):
if kind == 'api':
all_cli_args = get_latest_api()
module_args = get_module_args(all_args=all_cli_args,
module=module)
all_fields, field_validators = get_pydantic_fields(config=module_args)
elif kind == 'local':
from jina.parsers import set_pea_parser, set_pod_parser
from jina.parsers.flow import set_flow_parser
if module == 'pod':
parser = set_pod_parser()
elif module == 'pea':
parser = set_pea_parser()
elif module == 'flow':
parser = set_flow_parser()
all_fields, field_validators = get_pydantic_fields(config=parser)
return create_model(model_name,
**all_fields,
__config__=PydanticConfig,
__validators__=field_validators)
```
#### File: integration/distributed/conftest.py
```python
import os
import time
import pytest
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
```
#### File: api/endpoints/test_flow.py
```python
import uuid
import pytest
from fastapi import UploadFile
from jinad.api.endpoints import flow
_temp_id = uuid.uuid1()
def mock_create_success(**kwargs):
return _temp_id, '0.0.0.0', 12345
def mock_flow_creation_exception(**kwargs):
raise flow.FlowCreationException
def mock_flow_parse_exception(**kwargs):
raise flow.FlowYamlParseException
def mock_flow_start_exception(**kwargs):
raise flow.FlowStartException
def mock_fetch_success(**kwargs):
return '0.0.0.0', 12345, '!Flow\npods:\n pod1:\n uses:_pass'
def mock_fetch_exception(**kwargs):
raise KeyError
@pytest.mark.asyncio
async def test_create_from_pods_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_pods()
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_pods_flow_create_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_creation_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Bad pods args'
@pytest.mark.asyncio
async def test_create_from_pods_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_pods()
assert response.value.status_code == 404
assert response.value.detail == 'Flow couldn\'t get started'
@pytest.mark.asyncio
async def test_create_from_yaml_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_create_success)
response = await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response['status_code'] == 200
assert response['flow_id'] == _temp_id
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['status'] == 'started'
@pytest.mark.asyncio
async def test_create_from_yaml_parse_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_parse_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert response.value.detail == 'Invalid yaml file.'
@pytest.mark.asyncio
async def test_create_from_yaml_flow_start_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._create_from_yaml(yamlspec=UploadFile(filename='abc.yaml'),
uses_files=[UploadFile(filename='abcd.yaml')],
pymodules_files=[UploadFile(filename='abc.py')])
assert response.value.status_code == 404
assert 'Flow couldn\'t get started' in response.value.detail
@pytest.mark.asyncio
async def test_fetch_flow_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id)
assert response['status_code'] == 200
assert response['host'] == '0.0.0.0'
assert response['port'] == 12345
assert response['yaml'] == '!Flow\npods:\n pod1:\n uses:_pass'
@pytest.mark.asyncio
async def test_fetch_flow_success_yaml_only(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_success)
response = await flow._fetch(_temp_id, yaml_only=True)
assert response.status_code == 200
assert response.body == b'!Flow\npods:\n pod1:\n uses:_pass'
assert response.media_type == 'application/yaml'
@pytest.mark.asyncio
async def test_fetch_flow_keyerror(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_get', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._fetch(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
def mock_ping_exception(**kwargs):
raise flow.GRPCServerError
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_success(monkeypatch, mocker):
response = await flow._ping(host='0.0.0.0', port=12345)
assert response['status_code'] == 200
assert response['detail'] == 'connected'
@pytest.mark.asyncio
@pytest.mark.skip('unblocking jinad tests. will fix in next PR')
async def test_ping_exception(monkeypatch):
monkeypatch.setattr(flow, 'py_client', mock_ping_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._ping(host='0.0.0.0', port=12345)
assert response.value.status_code == 404
assert response.value.detail == 'Cannot connect to GRPC Server on 0.0.0.0:12345'
@pytest.mark.asyncio
async def test_delete_success(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', lambda **kwargs: None)
response = await flow._delete(_temp_id)
assert response['status_code'] == 200
@pytest.mark.asyncio
async def test_delete_exception(monkeypatch):
monkeypatch.setattr(flow.flow_store, '_delete', mock_fetch_exception)
with pytest.raises(flow.HTTPException) as response:
await flow._delete(_temp_id)
assert response.value.status_code == 404
assert response.value.detail == f'Flow ID {_temp_id} not found! Please create a new Flow'
```
#### File: unit/models/test_pod.py
```python
from jinad.models import SinglePodModel, ParallelPodModel
def test_single_no_exceptions():
SinglePodModel()
# this gets executed while verifying inputs
SinglePodModel().dict()
# this gets executed while creating docs
SinglePodModel().schema()
def test_parallel_no_exceptions():
ParallelPodModel()
# this gets executed while verifying inputs
ParallelPodModel().dict()
# this gets executed while creating docs
ParallelPodModel().schema()
``` |
{
"source": "jina-ai/jina-meme-search",
"score": 3
} |
#### File: jina-meme-search/frontend/helper.py
```python
from jina import Client, Document
from config import TEXT_PORT, TEXT_SERVER, IMAGE_PORT, IMAGE_SERVER, TOP_K
class UI:
about_block = """
### About
This is a meme search engine using [Jina's neural search framework](https://github.com/jina-ai/jina/).
- [Live demo](https://examples.jina.ai/memes)
- [Play with it in a notebook](https://colab.research.google.com/github/jina-ai/workshops/blob/main/memes/meme_search.ipynb) (text-only)
- [Repo](https://github.com/alexcg1/jina-meme-search)
- [Dataset](https://www.kaggle.com/abhishtagatya/imgflipscraped-memes-caption-dataset)
"""
css = f"""
<style>
.reportview-container .main .block-container{{
max-width: 1200px;
padding-top: 2rem;
padding-right: 2rem;
padding-left: 2rem;
padding-bottom: 2rem;
}}
.reportview-container .main {{
color: "#111";
background-color: "#eee";
}}
</style>
"""
headers = {"Content-Type": "application/json"}
def search_by_text(input, server=TEXT_SERVER, port=TEXT_PORT, limit=TOP_K):
client = Client(host=server, protocol="http", port=port)
response = client.search(
Document(text=input),
parameters={"limit": limit},
)
matches = response[0].matches
return matches
def search_by_file(document, server=IMAGE_SERVER, port=IMAGE_PORT, limit=TOP_K):
"""
Wrap file in Jina Document for searching, and do all necessary conversion to make similar to indexed Docs
"""
client = Client(host=server, protocol="http", port=port)
query_doc = document
query_doc.convert_blob_to_image_tensor()
query_doc.set_image_tensor_shape((64,64))
query_doc.set_image_tensor_normalization()
response = client.search(
query_doc,
parameters={"limit": limit},
return_results=True,
show_progress=True,
)
matches = response[0].matches
return matches
def convert_file_to_document(query):
data = query.read()
doc = Document(blob=data)
# print(doc)
return doc
def get_image_url(file_path, domain="http://i.imgflip.com/"):
filename = file_path.split("/")[-1]
url = domain + filename
return url
``` |
{
"source": "jina-ai/now",
"score": 2
} |
#### File: v1/routers/text.py
```python
from typing import List
from docarray import Document, DocumentArray
from fastapi import APIRouter
from jina import Client
from now.bff.v1.models.text import (
NowTextIndexRequestModel,
NowTextResponseModel,
NowTextSearchRequestModel,
)
from now.bff.v1.routers.helper import process_query
router = APIRouter()
# Index
@router.post(
"/index",
summary='Add more data to the indexer',
)
def index(data: NowTextIndexRequestModel):
"""
Append the list of texts to the indexer.
"""
index_docs = DocumentArray()
for text in data.texts:
index_docs.append(Document(text=text))
c = Client(host=data.host, port=data.port)
c.post('/index', index_docs)
# Search
@router.post(
"/search",
response_model=List[NowTextResponseModel],
summary='Search text data via text or image as query',
)
def search(data: NowTextSearchRequestModel):
"""
Retrieve matching texts for a given text as query. Query should be `base64` encoded
using human-readable characters - `utf-8`.
"""
query_doc = process_query(data.text, data.image)
c = Client(host=data.host, port=data.port)
docs = c.post('/search', query_doc, parameters={"limit": data.limit})
return docs[0].matches.to_dict()
```
#### File: now/now/constants.py
```python
from typing import List
class Modalities:
IMAGE = 'image'
MUSIC = 'music'
TEXT = 'text'
@classmethod
def as_list(cls) -> List[str]:
return [cls.IMAGE, cls.MUSIC, cls.TEXT]
class DatasetTypes:
DEMO = 'demo'
PATH = 'path'
URL = 'url'
DOCARRAY = 'docarray'
@classmethod
def as_list(cls) -> List[str]:
return [cls.DEMO, cls.PATH, cls.URL, cls.DOCARRAY]
class Qualities:
MEDIUM = 'medium'
GOOD = 'good'
EXCELLENT = 'excellent'
@classmethod
def as_list(cls) -> List[str]:
return [cls.MEDIUM, cls.GOOD, cls.EXCELLENT]
BASE_STORAGE_URL = (
'https://storage.googleapis.com/jina-fashion-data/data/one-line/datasets'
)
IMAGE_MODEL_QUALITY_MAP = {
Qualities.MEDIUM: ('ViT-B32', 'openai/clip-vit-base-patch32'),
Qualities.GOOD: ('ViT-B16', 'openai/clip-vit-base-patch16'),
Qualities.EXCELLENT: ('ViT-L14', 'openai/clip-vit-large-patch14'),
}
AVAILABLE_DATASET = {
Modalities.IMAGE: [
'best-artworks',
'nft-monkey',
'tll',
'bird-species',
'stanford-cars',
'deepfashion',
'nih-chest-xrays',
'geolocation-geoguessr',
],
Modalities.MUSIC: [
'music-genres-small',
'music-genres-large',
],
Modalities.TEXT: [
'rock-lyrics',
'pop-lyrics',
'rap-lyrics',
'indie-lyrics',
'metal-lyrics',
],
}
```
#### File: now/data_loading/build_datasets.py
```python
import csv
import json
import multiprocessing as mp
import os
import re
from dataclasses import dataclass, field
from random import shuffle
from typing import Any, Dict, Optional
import pandas as pd
from jina import Document, DocumentArray
from tqdm import tqdm
from now.data_loading.utils import upload_to_gcloud_bucket
IMAGE_SHAPE = (224, 224)
@dataclass
class _DataPoint:
# id: str
text: Optional[str] = None
image_path: Optional[str] = None
content_type: str = 'image'
label: str = ''
split: str = 'none'
tags: Dict[str, Any] = field(default_factory=lambda: {})
def _build_doc(datapoint: _DataPoint) -> Document:
# doc = Document(id=datapoint.id)
doc = Document()
if datapoint.content_type == 'image':
doc.uri = datapoint.image_path
doc.load_uri_to_image_tensor()
doc.set_image_tensor_shape(IMAGE_SHAPE)
else:
doc.text = datapoint.text
doc.tags = {'finetuner_label': datapoint.label, 'split': datapoint.split}
doc.tags.update(datapoint.tags)
doc.tags.update({'content_type': datapoint.content_type})
return doc
def _build_deepfashion(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the deepfashion dataset.
Download the raw dataset from
https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?resourcekey=0-4R4v6zl4CWhHTsUGOsTstw
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.jpg'
imagedir = os.path.join(root, 'Img')
fsplit = os.path.join(root, 'Eval', 'list_eval_partition.txt')
fcolors = os.path.join(root, 'Anno', 'attributes', 'list_color_cloth.txt')
# read list_eval_partition.txt
img2split = {}
with open(fsplit, 'r') as f:
for line in f.read().splitlines()[2:]:
img, _, split, _ = re.split(r' +', line)
img2split[img] = split
# read list_color_cloth.txt
img2color = {}
with open(fcolors, 'r') as f:
for line in f.read().splitlines()[2:]:
img, color, *_ = re.split(r' +', line)
img2color[img] = color
# add image docs
data = []
for rootdir, _, fnames in os.walk(imagedir):
labels = []
productid = os.path.relpath(rootdir, imagedir)
for fname in fnames:
if fname.endswith(extension):
path = os.path.join(rootdir, fname)
imgid = os.path.relpath(path, imagedir)
split = img2split[imgid]
color = img2color[imgid]
label = productid + '/' + color
labels.append(label)
data.append(
_DataPoint(
id=imgid,
image_path=path,
label=label,
split=split,
tags={'color': color},
)
)
# add text doc
if len(labels) > 0:
for label in set(labels):
_, gender, category, _, color = label.split('/')
text_elements = [category, gender, color]
shuffle(text_elements)
text = (
f'{" ".join(text_elements)}'.lower()
.replace('-', ' ')
.replace('_', ' ')
)
data.append(
_DataPoint(
id=rootdir,
text=text,
content_type='text',
label=label,
tags={'color': color},
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_nih_chest_xrays(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the NIH chest xrays dataset.
Download the raw dataset from
https://www.kaggle.com/nih-chest-xrays/data
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.png'
flabels = 'Data_Entry_2017.csv'
ftrain = 'train_val_list.txt'
ftest = 'test_list.txt'
# read Data_Entry_2017.csv
# labels - fname: (finding, patient id)
with open(os.path.join(root, flabels), 'r') as f:
reader = csv.reader(f)
next(reader)
labels = {row[0]: (row[1], row[3]) for row in reader}
# read train_val_list.txt
with open(os.path.join(root, ftrain), 'r') as f:
train_list = f.read().splitlines()
# read test_list.txt
with open(os.path.join(root, ftest), 'r') as f:
test_list = f.read().splitlines()
# add image docs
data = []
for rootdir, _, fnames in os.walk(root):
for fname in fnames:
if fname.endswith(extension):
path = os.path.join(rootdir, fname)
label = labels.get(fname)[0] # or labels[1]
if fname in train_list:
split = 'train'
elif fname in test_list:
split = 'test'
else:
raise ValueError(
f'Doc with fname: {fname} not in train or test splits'
)
data.append(
_DataPoint(id=fname, image_path=path, label=label, split=split)
)
# add text docs
labelnames = {label for _, (label, __) in labels.items()}
for label in labelnames:
data.append(
_DataPoint(
id=label,
text=label.lower()
.replace('|', ' ')
.replace('_', ' ')
.replace('-', ' '),
content_type='text',
label=label,
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_geolocation_geoguessr(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the geolocation-geoguessr dataset.
Download the raw dataset from
https://www.kaggle.com/ubitquitin/geolocation-geoguessr-images-50k
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.jpg'
# add image docs
data = []
for rootdir, _, fnames in os.walk(root):
label = os.path.relpath(rootdir, root)
for fname in fnames:
if fname.endswith(extension):
path = os.path.join(rootdir, fname)
data.append(_DataPoint(id=fname, image_path=path, label=label))
# add text doc
if len(fnames) > 0:
data.append(
_DataPoint(
id=label, text=label.lower(), content_type='text', label=label
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_stanford_cars(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the stanford cars dataset.
Download the raw dataset from
https://www.kaggle.com/jessicali9530/stanford-cars-dataset
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.jpg'
train_data = os.path.join(root, 'car_data', 'train')
test_data = os.path.join(root, 'car_data', 'test')
# add image docs
data = []
labels = []
for split, root in [('train', train_data), ('test', test_data)]:
for rootdir, _, fnames in os.walk(root):
if len(fnames) > 0:
label = os.path.relpath(rootdir, root)
labels.append(label)
for fname in fnames:
if fname.endswith(extension) and 'cropped' not in fname:
path = os.path.join(rootdir, fname)
data.append(
_DataPoint(
id=fname, image_path=path, label=label, split=split
)
)
# add text docs
labels = set(labels)
for label in labels:
data.append(
_DataPoint(id=label, text=label.lower(), content_type='text', label=label)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_bird_species(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the bird species dataset.
Download the raw dataset from
https://www.kaggle.com/veeralakrishna/200-bird-species-with-11788-images
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.jpg'
root = os.path.join(root, 'CUB_200_2011', 'CUB_200_2011')
fimages = os.path.join(root, 'images.txt')
fclasses = os.path.join(root, 'classes.txt')
flabels = os.path.join(root, 'image_class_labels.txt')
fsplit = os.path.join(root, 'train_test_split.txt')
contentdir = os.path.join(root, 'images')
# read images.txt
image2id = {}
with open(fimages, 'r') as f:
for line in f.read().splitlines():
iid, fname = line.split()
iid = int(iid)
image2id[fname] = iid
# read classes.txt
id2class = {}
with open(fclasses, 'r') as f:
for line in f.read().splitlines():
iid, classname = line.split()
iid = int(iid)
id2class[iid] = classname
# read image_class_labels.txt
imageid2classid = {}
with open(flabels, 'r') as f:
for line in f.read().splitlines():
iid, cid = line.split()
iid, cid = int(iid), int(cid)
imageid2classid[iid] = cid
# read train_test_split.txt
imageid2split = {}
with open(fsplit, 'r') as f:
for line in f.read().splitlines():
iid, split = line.split()
iid, split = int(iid), int(split)
imageid2split[iid] = split
# add image docs
data = []
for rootdir, _, fnames in os.walk(contentdir):
for fname in fnames:
if fname.endswith(extension):
path = os.path.join(rootdir, fname)
image = os.path.relpath(path, contentdir)
iid = image2id[image]
cid = imageid2classid[iid]
label = id2class[cid]
split = imageid2split[iid]
split = 'train' if split else 'test'
data.append(
_DataPoint(id=fname, image_path=path, label=label, split=split)
)
# add text docs
labels = {label for _, label in id2class.items()}
for label in labels:
data.append(
_DataPoint(
id=label,
text=label[4:].lower().replace('_', ' '),
content_type='text',
label=label,
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_best_artworks(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the best artworks dataset.
Download the raw dataset from
https://www.kaggle.com/ikarus777/best-artworks-of-all-time
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
extension = '.jpg'
fartists = os.path.join(root, 'artists.csv')
contentdir = os.path.join(root, 'images', 'images')
# read artists.csv
with open(fartists, 'r') as f:
reader = csv.reader(f)
next(reader)
label2genre = {row[1]: row[3] for row in reader}
# add image docs
data = []
for rootdir, _, fnames in os.walk(contentdir):
label = os.path.relpath(rootdir, contentdir).replace('_', ' ')
for fname in fnames:
if fname.endswith(extension):
path = os.path.join(rootdir, fname)
data.append(_DataPoint(id=fname, image_path=path, label=label))
if len(fnames) > 0:
if label == 'Albrecht Dürer':
genre = 'Northern Renaissance'
else:
genre = label2genre[label]
text = genre.lower().replace(',', ' ').replace('"', '')
data.append(
_DataPoint(id=genre, text=text, label=label, content_type='text')
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def create_file_to_text_map(dict_list):
file_to_text = {}
for d in dict_list:
meta = d['metadata']
file = meta['image'].split('//')[-1]
attributes = meta['attributes']
values = [d['value'] for d in attributes]
shuffle(values)
text = ' '.join(values)
file_to_text[file] = text.lower()
return file_to_text
def _build_nft(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the nft dataset.
Download the raw dataset from
https://github.com/skogard/apebase
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
f_labels = os.path.join(root, 'db')
contentdir = os.path.join(root, 'ipfs')
# read artists.csv
with open(f_labels, 'r') as f:
lines = f.readlines()
dict_list = [json.loads(line) for line in lines]
file_to_text = create_file_to_text_map(dict_list)
data = []
for file, text in file_to_text.items():
data.append(_DataPoint(id=file, image_path=f'{contentdir}/{file}', label=file))
data.append(
_DataPoint(
id=file + '_text',
text=file_to_text[file],
label=file,
content_type='text',
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_tll(root: str, num_workers: int = 8) -> DocumentArray:
"""
Build the tll dataset.
Download the raw dataset from
https://sites.google.com/view/totally-looks-like-dataset
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:return: DocumentArray
"""
def transform(d: Document):
d.load_uri_to_blob()
d.tags['content_type'] = 'image'
return d
da = DocumentArray.from_files(root + '/**')
da.apply(lambda d: transform(d))
return da
def _build_lyrics(
root: str, num_workers: int = 8, genre: str = '', max_size: int = 0
) -> DocumentArray:
"""
Builds lyrics dataset of given size and genre if specified, else the entire dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param genre: if genre isn't empty string this will only select subset of artist with this genre
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
artists_path = os.path.join(root, 'artists-data.csv')
lyrics_path = os.path.join(root, 'lyrics-data.csv')
artists_df = pd.read_csv(artists_path).dropna()
lyrics = pd.read_csv(lyrics_path).dropna()
# select English lyrics with <= 100 sentences
lyrics = lyrics.query("language == 'en'")
lyrics['num_sentences'] = lyrics.apply(
lambda x: len(x['Lyric'].split('\n')), axis=1
)
lyrics = lyrics.query('num_sentences <= 100')
lyrics = pd.merge(lyrics, artists_df, left_on='ALink', right_on='Link')
lyrics = lyrics[lyrics['Genres'].str.contains(genre)]
if 0 < max_size:
lyrics = lyrics.sample(frac=1)
# create sentences from lyrics
data, all_sentences = [], []
for idx, row in tqdm(lyrics.iterrows()):
if 0 < max_size <= len(data):
break
row = row.to_dict()
_sentences = row.pop('Lyric').split('\n')
# filter empty, duplicate and one-word sentences and the ones containing special characters in beginning and end
_sentences = set(
filter(
lambda s: len(s) > 0
and not re.fullmatch(r"\W+[\s\w]*\W+", s)
and not re.fullmatch(r"\W", s)
and not re.fullmatch(r"\w+", s)
and not re.fullmatch(r"\w+[.]+", s)
and s not in all_sentences,
_sentences,
)
)
for _sentence in _sentences:
if 0 < max_size <= len(data):
break
all_sentences.append(_sentence)
if re.fullmatch(r".*\w", _sentence):
_sentence += "."
data.append(
_DataPoint(
text=_sentence,
content_type='text',
tags={
# 'artist': row['Artist'],
# 'artist_genres': row['Genres'],
# 'song': row['SName'],
'additional_info': [row['SName'], row['Artist']],
},
)
)
# build docs
with mp.Pool(processes=num_workers) as pool:
docs = list(tqdm(pool.imap(_build_doc, data)))
return DocumentArray(docs)
def _build_rock_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the rock lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Rock',
root=root.replace('rock-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
def _build_pop_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the pop lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Pop',
root=root.replace('pop-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
def _build_rap_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the rap lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Rap',
root=root.replace('rap-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
def _build_indie_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the indie lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Indie',
root=root.replace('indie-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
def _build_metal_lyrics(
root: str, num_workers: int = 8, max_size: int = 200000
) -> DocumentArray:
"""
Builds the indie lyrics dataset. Download the CSV files from:
https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres
:param root: the dataset root folder.
:param num_workers: the number of parallel workers to use.
:param max_size: used to randomly subsample from dataset if greater than 0
:return: DocumentArray
"""
return _build_lyrics(
genre='Metal',
root=root.replace('metal-lyrics', 'lyrics'),
num_workers=num_workers,
max_size=max_size,
)
def process_dataset(
datadir: str,
name: str,
project: str,
bucket: str,
location: str,
sample_k: bool = True,
k: int = 10,
) -> None:
"""
Build, save and upload a dataset.
"""
root = f'{datadir}/{name}'
out = f'{name}.bin'
out_img10 = f'{name}.img{k}.bin'
out_txt10 = f'{name}.txt{k}.bin'
print(f'===> {name}')
print(f' Building {name} from {root} ...')
docs = globals()[f'_build_{name.replace("-", "_")}'](root)
docs = docs.shuffle(42)
image_docs = DocumentArray(
[doc for doc in docs if doc.tags['content_type'] == 'image']
)
text_docs = DocumentArray(
[doc for doc in docs if doc.tags['content_type'] == 'text']
)
print(f' Dataset size: {len(docs)}')
print(f' Num image docs: {len(image_docs)}')
print(f' Num text docs: {len(text_docs)}')
if sample_k:
print(f' Sampling {k} image and {k} text docs ...')
image_docs = image_docs[:k]
text_docs = text_docs[:k]
print(' Saving datasets ...')
docs.save_binary(out)
print(f' Saved dataset to {out}')
if sample_k:
if len(image_docs) > 0:
image_docs.save_binary(out_img10)
print(f' Saved dataset to {out_img10}')
if len(text_docs) > 0:
text_docs.save_binary(out_txt10)
print(f' Saved dataset to {out_txt10}')
print(' Uploading datasets ...')
upload_to_gcloud_bucket(project, bucket, location, out)
print(f' Uploaded dataset to gs://{bucket}/{location}/{out}')
if sample_k:
if len(image_docs) > 0:
upload_to_gcloud_bucket(project, bucket, location, out_img10)
print(f' Uploaded dataset to gs://{bucket}/{location}/{out_img10}')
if len(text_docs) > 0:
upload_to_gcloud_bucket(project, bucket, location, out_txt10)
print(f' Uploaded dataset to gs://{bucket}/{location}/{out_txt10}')
def main():
"""
Main method.
"""
localdir = 'data'
project = 'jina-simpsons-florian'
bucket = 'jina-fashion-data'
location = 'data/one-line/datasets/jpeg'
datasets = [
'tll',
'nft-monkey',
'deepfashion',
'nih-chest-xrays',
'geolocation-geoguessr',
'stanford-cars',
'bird-species',
'best-artworks',
]
for name in datasets:
process_dataset(localdir, name, project, bucket, location)
location = 'data/one-line/datasets/text'
datasets = [
'rock-lyrics',
'pop-lyrics',
'rap-lyrics',
'indie-lyrics',
'metal-lyrics',
'lyrics',
]
for name in datasets:
process_dataset(localdir, name, project, bucket, location)
if __name__ == '__main__':
main()
```
#### File: now/data_loading/migrate_datasets.py
```python
import multiprocessing as mp
from docarray import Document, DocumentArray
from tqdm import tqdm
# $ pip uninstall jina
# $ pip uninstall docarray
# $ pip install jina==2.6.4
# $ mv .venv/lib/python3.8/site-packages/docaarray .venv/lib/python3.8/site-packages/old
# $ pip install docarray
from old import Document as OldDocument
from old import DocumentArray as OldDocumentArray
def _convert_doc(old: OldDocument) -> Document:
new = Document(id=old.id, mime_type=old.mime_type)
if old.tags['content_type'] == 'image':
new.uri = old.uri
new.tensor = old.blob
else:
new.text = old.text
for k, v in old.tags.items():
new.tags[k] = v
new.embedding = old.embedding
return new
def convert_dataset(dataset: str, num_workers: int = 8):
path = f'{dataset}.bin'
print(f'===> {dataset}')
print(f' Loading {dataset} dataset from {path} ...')
old_docs = OldDocumentArray.load_binary(path)
print(f' Old dataset size: {len(old_docs)}')
print(' Converting docs ...')
with mp.Pool(processes=num_workers) as pool:
new_docs = list(tqdm(pool.imap(_convert_doc, old_docs)))
new_docs = DocumentArray(new_docs)
print(f' New dataset size: {len(new_docs)}')
print(' Saving new docs ...')
out = f'new.{dataset}.bin'
new_docs.save_binary(out, compress='gzip')
print(f' Saved new docs to {out} ...')
def main():
"""
Main method.
"""
datasets = [
'deepfashion',
'deepfashion.img10',
'deepfashion.txt10',
'deepfashion.ViT-B32',
'deepfashion.ViT-B16',
'deepfashion.ViT-L14',
'nih-chest-xrays',
'nih-chest-xrays.img10',
'nih-chest-xrays.txt10',
'nih-chest-xrays.ViT-B32',
'nih-chest-xrays.ViT-B16',
'nih-chest-xrays.ViT-L14',
'geolocation-geoguessr',
'geolocation-geoguessr.img10',
'geolocation-geoguessr.txt10',
'geolocation-geoguessr.ViT-B32',
'geolocation-geoguessr.ViT-B16',
'geolocation-geoguessr.ViT-L14',
'stanford-cars',
'stanford-cars.img10',
'stanford-cars.txt10',
'stanford-cars.ViT-B32',
'stanford-cars.ViT-B16',
'stanford-cars.ViT-L14',
'bird-species',
'bird-species.img10',
'bird-species.txt10',
'bird-species.ViT-B32',
'bird-species.ViT-B16',
'bird-species.ViT-L14',
'best-artworks',
'best-artworks.img10',
'best-artworks.txt10',
'best-artworks.ViT-B32',
'best-artworks.ViT-B16',
'best-artworks.ViT-L14',
]
num_workers = 8
for dataset in datasets:
convert_dataset(dataset, num_workers)
if __name__ == '__main__':
main()
```
#### File: now/now/gke_deploy.py
```python
import json
import pathlib
from os.path import expanduser as user
import cowsay
from now.deployment.deployment import cmd
from now.dialog import maybe_prompt_user
from now.log.log import yaspin_extended
from now.utils import custom_spinner, sigmap
cur_dir = pathlib.Path(__file__).parent.resolve()
def ask_projects(options):
options = sorted(options, key=lambda x: x.lower())
questions = [
{
'type': 'list',
'name': 'project',
'message': 'What project you want to use for kubernetes deployment?',
'choices': options,
}
]
return maybe_prompt_user(questions, 'project')
def ask_regions(options):
questions = [
{
'type': 'list',
'name': 'region',
'message': 'Which region to chose?',
'choices': options,
'filter': lambda val: val.lower(),
}
]
return maybe_prompt_user(questions, 'region')
def ask_zones(options):
questions = [
{
'type': 'list',
'name': 'zone',
'message': 'Which zone you would like to select?',
'choices': options,
}
]
return maybe_prompt_user(questions, 'zone')
# Google cloud authentication ->
def init_gcloud(gcloud_path):
out, _ = cmd(f'{gcloud_path} auth list')
if not out:
print('Please perform gcloud authentication to deploy Flow on GKE')
cmd(f'{gcloud_path} auth login', std_output=True)
# List the projects and present it as options to user
def get_project(gcloud_path):
project_list = []
output, _ = cmd(f'{gcloud_path} projects list --format=json')
projects = output.decode('utf-8')
projects = json.loads(projects)
for proj in projects:
project_list.append(proj['projectId'])
return ask_projects(project_list)
def get_region(gcloud_path):
regions_list = []
output, _ = cmd(f'{gcloud_path} compute regions list --format=json')
regions = output.decode('utf-8')
regions = json.loads(regions)
for region in regions:
regions_list.append(region['name'])
return ask_regions(regions_list)
def get_zone(region, gcloud_path):
zones_list = []
output, _ = cmd(f'{gcloud_path} compute zones list --format=json')
zones = output.decode('utf-8')
zones = json.loads(zones)
for zone in zones:
if region in zone['name']:
zones_list.append(zone['name'])
return ask_zones(zones_list)
def final_confirmation():
questions = [
{
'type': 'list',
'name': 'proceed',
'message': 'Creating a cluster will create some costs. '
'Are you sure you want to continue? '
'Prices can be checked here: '
'https://cloud.google.com/kubernetes-engine/pricing',
'choices': [
{'name': '💸🔥 yes', 'value': True},
{'name': ':⛔ no', 'value': False},
],
}
]
proceed = maybe_prompt_user(questions, 'proceed')
if not proceed:
cowsay.cow('see you soon 👋')
exit(0)
def create_gke_cluster():
gcloud_path, _ = cmd('which gcloud')
gcloud_path = gcloud_path.strip()
if not gcloud_path:
gcloud_path = user('~/.cache/jina-now/google-cloud-sdk/bin/gcloud')
else:
gcloud_path = gcloud_path.decode('utf-8')
application_name = 'jina-now'
init_gcloud(gcloud_path)
proj = get_project(gcloud_path)
cmd(f'{gcloud_path} config set project {proj}')
region = get_region(gcloud_path)
cmd(f'{gcloud_path} config set compute/region {region}')
zone = get_zone(region, gcloud_path)
cmd(f'{gcloud_path} config set compute/zone {zone}')
final_confirmation()
out, _ = cmd(f'{gcloud_path} container clusters list')
out = out.decode('utf-8')
if application_name in out and zone in out:
with yaspin_extended(
sigmap=sigmap, text='Cluster exists already', color='green'
) as spinner:
spinner.ok('✅')
else:
with yaspin_extended(
custom_spinner().weather, sigmap=sigmap, text="Create cluster"
) as spinner:
cmd(
f'/bin/bash {cur_dir}/scripts/gke_deploy.sh {application_name} {gcloud_path}'
)
spinner.ok('🌥')
if __name__ == '__main__':
create_gke_cluster()
```
#### File: now/log/log.py
```python
from yaspin.core import Yaspin
TEST = False
def yaspin_extended(*args, **kwargs):
return YaspinExtended(*args, **kwargs)
class YaspinExtended(Yaspin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __enter__(self):
if not TEST:
return super().__enter__()
else:
return self
def __exit__(self, exc_type, exc_val, traceback):
if not TEST:
return super().__exit__(exc_type, exc_val, traceback)
```
#### File: unit/bff/conftest.py
```python
import pytest
from fastapi.testclient import TestClient
from now.bff.app import build_app
data_url = 'https://storage.googleapis.com/jina-fashion-data/data/one-line/datasets/jpeg/best-artworks.img10.bin'
@pytest.fixture
def test_client():
app = build_app()
return TestClient(app)
```
#### File: unit/bff/test_app.py
```python
def test_check_liveness(test_client):
response = test_client.get('/ping')
assert response.status_code == 200
assert response.json() == 'pong!'
def test_read_root(test_client):
response = test_client.get('/')
assert response.status_code == 200
def test_get_docs(test_client):
response = test_client.get('/docs')
assert response.status_code == 200
def test_get_redoc(test_client):
response = test_client.get('/redoc')
assert response.status_code == 200
```
#### File: unit/bff/test_decorators.py
```python
import asyncio
import time
import pytest
from fastapi import HTTPException
from now.bff.decorators import api_method, async_timed, timed
def test_timed():
@timed
def monty():
"""Monty Python!"""
time.sleep(0.1)
monty()
def test_async_timed():
@async_timed
async def monty():
"""Monty Python!"""
await asyncio.sleep(0.1)
asyncio.run(monty())
def test_api_method():
@api_method
def monty():
"""Monty Python!"""
time.sleep(0.1)
monty()
def test_api_method_error():
@api_method
def monty():
"""Monty Python!"""
time.sleep(0.1)
raise HTTPException(status_code=500, detail='Unknown error')
with pytest.raises(HTTPException):
monty()
```
#### File: unit/data_loading/test_embed_dataset.py
```python
import numpy as np
from docarray import Document, DocumentArray
from now.data_loading.embed_datasets import to_jpg
def test_to_jpg():
da = DocumentArray([Document(tensor=np.zeros((200, 200, 3), dtype=np.uint8))])
to_jpg(da)
assert da
assert da[0].blob
``` |
{
"source": "jina-ai/pqlite",
"score": 2
} |
#### File: core/index/flat_index.py
```python
from typing import List, Optional
import numpy as np
from loguru import logger
from ...math import cdist, top_k
from .base import BaseIndex
class FlatIndex(BaseIndex):
def __init__(self, *args, **kwargs):
super(FlatIndex, self).__init__(*args, **kwargs)
self._data = np.zeros((self.initial_size, self.dim), dtype=self.dtype)
def search(
self, x: np.ndarray, limit: int = 10, indices: Optional[np.ndarray] = None
):
_dim = x.shape[-1]
assert (
_dim == self.dim
), f'the query embedding dimension does not match with index dimension: {_dim} vs {self.dim}'
x = x.reshape((-1, self.dim))
data = self._data[: self.size]
data_ids = np.arange(self.size)
if indices is not None:
data = self._data[indices]
data_ids = data_ids[indices]
dists = cdist(x, data, metric=self.metric.name.lower())
dists, idx = top_k(dists, limit, descending=False)
# TODO: change the shape of return
dists = dists[0]
data_ids = data_ids[idx[0]]
return dists, data_ids
def add_with_ids(self, x: np.ndarray, ids: List[int]):
for idx in ids:
if idx >= self._capacity:
self._expand_capacity()
start = self._size
end = start + len(x)
self._data[ids, :] = x
self._size = end
def _expand_capacity(self):
new_block = np.zeros((self.expand_step_size, self.dim), dtype=self.dtype)
self._data = np.concatenate((self._data, new_block), axis=0)
self._capacity += self.expand_step_size
logger.debug(
f'total storage capacity is expanded by {self.expand_step_size}',
)
def reset(self, capacity: Optional[int] = None):
super().reset(capacity=capacity)
self._data = np.zeros((self.capacity, self.dim), dtype=self.dtype)
def delete(self, ids: List[int]):
raise RuntimeError(
f'the deletion operation is not allowed for {self.__class__.__name__}!'
)
def update_with_ids(self, x: np.ndarray, ids: List[int], **kwargs):
self._data[ids, :] = x
```
#### File: pqlite/annlite/helper.py
```python
import sys
import lmdb
import numpy as np
from loguru import logger
def setup_logging(debug: bool):
"""
Setup the log formatter for AnnLite.
"""
log_level = 'INFO'
if debug:
log_level = 'DEBUG'
logger.remove()
logger.add(
sys.stdout,
colorize=True,
level=log_level,
)
def str2dtype(dtype_str: str):
if dtype_str in ['double', 'float64']:
dtype = np.float64
elif dtype_str in ['half', 'float16']:
dtype = np.float16
elif dtype_str in ['float', 'float32']:
dtype = np.float32
elif dtype_str in ['bfloat16']:
dtype = np.bfloat16
elif dtype_str in ['long', 'int64']:
dtype = np.int64
elif dtype_str in ['int', 'int32']:
dtype = np.int32
elif dtype_str in ['int16']:
dtype = np.int16
elif dtype_str in ['int8']:
dtype = np.int8
elif dtype_str in ['uint8']:
dtype = np.uint8
elif dtype_str in ['bool']:
dtype = np.bool
else:
raise TypeError(f'Unrecognized dtype string: {dtype_str}')
return dtype
def open_lmdb(db_path: str):
return lmdb.Environment(
db_path,
map_size=int(3.436e10), # in bytes, 32G,
subdir=False,
readonly=False,
metasync=True,
sync=True,
map_async=False,
mode=493,
create=True,
readahead=True,
writemap=False,
meminit=True,
max_readers=126,
max_dbs=0, # means only one db
max_spare_txns=1,
lock=True,
)
```
#### File: pqlite/annlite/math.py
```python
from typing import Tuple
import numpy as np
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat**2, axis=1)
+ np.sum(x_mat**2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def euclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sqeuclidean(x_mat, y_mat))
def pdist(
x_mat: 'np.ndarray',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(x_mat: 'np.ndarray', y_mat: 'np.ndarray', metric: str) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
dists = {'cosine': cosine, 'sqeuclidean': sqeuclidean, 'euclidean': euclidean}[
metric
](x_mat, y_mat)
return dists
def top_k(
values: 'np.ndarray', k: int, descending: bool = False
) -> Tuple['np.ndarray', 'np.ndarray']:
"""Finds values and indices of the k largest entries for the last dimension.
:param values: array of distances
:param k: number of values to retrieve
:param descending: find top k biggest values
:return: indices and distances
"""
if descending:
values = -values
if k >= values.shape[1]:
idx = values.argsort(axis=1)[:, :k]
values = np.take_along_axis(values, idx, axis=1)
else:
idx_ps = values.argpartition(kth=k, axis=1)[:, :k]
values = np.take_along_axis(values, idx_ps, axis=1)
idx_fs = values.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
values = np.take_along_axis(values, idx_fs, axis=1)
if descending:
values = -values
return values, idx
```
#### File: annlite/storage/base.py
```python
import abc
from typing import TYPE_CHECKING, List, Optional
if TYPE_CHECKING:
import numpy as np
from ..enums import ExpandMode
class Storage(abc.ABC):
def __init__(
self,
initial_size: Optional[int] = None,
expand_step_size: int = 10240,
expand_mode: ExpandMode = ExpandMode.ADAPTIVE,
):
if initial_size is None:
initial_size = expand_step_size
assert initial_size >= 0
assert expand_step_size > 0
self.initial_size = initial_size
self.expand_step_size = expand_step_size
self.expand_mode = expand_mode
@property
@abc.abstractmethod
def capacity(self) -> int:
...
@property
@abc.abstractmethod
def size(self):
...
@abc.abstractmethod
def clean(self):
...
@abc.abstractmethod
def add(
self,
data: 'np.ndarray',
cells: 'np.ndarray',
ids: List[str],
doc_tags: Optional[List[dict]] = None,
):
...
@abc.abstractmethod
def delete(self, ids: List[str]):
...
@abc.abstractmethod
def update(
self,
data: 'np.ndarray',
cells: 'np.ndarray',
ids: List[str],
doc_tags: Optional[List[dict]] = None,
):
...
```
#### File: annlite/storage/kv.py
```python
import shutil
from pathlib import Path
from typing import Dict, List, Union
import lmdb
from docarray import Document, DocumentArray
LMDB_MAP_SIZE = 100 * 1024 * 1024 * 1024
class DocStorage:
"""The backend storage engine of Documents"""
def __init__(
self, path: Union[str, Path], serialize_config: Dict = {}, lock: bool = True
):
self._path = path
self._env = self._open(path, lock=lock)
self._serialize_config = serialize_config
def _open(self, db_path: Union[str, Path], lock: bool = True):
return lmdb.Environment(
str(self._path),
map_size=LMDB_MAP_SIZE,
subdir=True,
readonly=False,
metasync=True,
sync=True,
map_async=False,
mode=493,
create=True,
readahead=True,
writemap=False,
meminit=True,
max_readers=126,
max_dbs=0, # means only one db
max_spare_txns=1,
lock=lock,
)
def insert(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
success = txn.put(
doc.id.encode(),
doc.to_bytes(**self._serialize_config),
overwrite=True,
)
if not success:
txn.abort()
raise ValueError(
f'The Doc ({doc.id}) has already been added into database!'
)
def update(self, docs: 'DocumentArray'):
with self._env.begin(write=True) as txn:
for doc in docs:
old_value = txn.replace(
doc.id.encode(), doc.to_bytes(**self._serialize_config)
)
if not old_value:
txn.abort()
raise ValueError(f'The Doc ({doc.id}) does not exist in database!')
def delete(self, doc_ids: List[str]):
with self._env.begin(write=True) as txn:
for doc_id in doc_ids:
txn.delete(doc_id.encode())
def get(self, doc_ids: Union[str, list]) -> DocumentArray:
docs = DocumentArray()
if isinstance(doc_ids, str):
doc_ids = [doc_ids]
with self._env.begin(write=False) as txn:
for doc_id in doc_ids:
buffer = txn.get(doc_id.encode())
if buffer:
doc = Document.from_bytes(buffer, **self._serialize_config)
docs.append(doc)
return docs
def clear(self):
self._env.close()
shutil.rmtree(self._path)
self._env = self._open(self._path)
def close(self):
self._env.close()
@property
def stat(self):
with self._env.begin(write=False) as txn:
return txn.stat()
@property
def size(self):
return self.stat['entries']
def batched_iterator(self, batch_size: int = 1, **kwargs):
with self._env.begin(write=False) as txn:
count = 0
docs = DocumentArray()
cursor = txn.cursor()
cursor.iternext()
iterator = cursor.iternext(keys=False, values=True)
for value in iterator:
doc = Document.from_bytes(value, **self._serialize_config)
docs.append(doc)
count += 1
if count == batch_size:
yield docs
count = 0
docs = DocumentArray()
if count > 0:
yield docs
```
#### File: annlite/storage/table.py
```python
import datetime
import sqlite3
import threading
from pathlib import Path
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import numpy as np
if TYPE_CHECKING:
from docarray import DocumentArray
sqlite3.register_adapter(np.int64, lambda x: int(x))
sqlite3.register_adapter(np.int32, lambda x: int(x))
COLUMN_TYPE_MAPPING = {
float: 'FLOAT',
int: 'INTEGER',
bool: 'INTEGER',
str: 'TEXT',
bytes.__class__: 'BLOB',
bytes: 'BLOB',
memoryview: 'BLOB',
datetime.datetime: 'TEXT',
datetime.date: 'TEXT',
datetime.time: 'TEXT',
None.__class__: 'TEXT',
# SQLite explicit types
'TEXT': 'TEXT',
'INTEGER': 'INTEGER',
'FLOAT': 'FLOAT',
'BLOB': 'BLOB',
'text': 'TEXT',
'integer': 'INTEGER',
'float': 'FLOAT',
'blob': 'BLOB',
}
# If numpy is available, add more types
if np:
COLUMN_TYPE_MAPPING.update(
{
np.int8: 'INTEGER',
np.int16: 'INTEGER',
np.int32: 'INTEGER',
np.int64: 'INTEGER',
np.uint8: 'INTEGER',
np.uint16: 'INTEGER',
np.uint32: 'INTEGER',
np.uint64: 'INTEGER',
np.float16: 'FLOAT',
np.float32: 'FLOAT',
np.float64: 'FLOAT',
}
)
def _converting(value: Any) -> str:
if isinstance(value, bool):
if value:
return 1
else:
return 0
return str(value)
def _get_table_names(
conn: 'sqlite3.Connection', fts4: bool = False, fts5: bool = False
) -> List[str]:
"""A list of string table names in this database."""
where = ["type = 'table'"]
if fts4:
where.append("sql like '%USING FTS4%'")
if fts5:
where.append("sql like '%USING FTS5%'")
sql = 'select name from sqlite_master where {}'.format(' AND '.join(where))
return [r[0] for r in conn.execute(sql).fetchall()]
class Table:
def __init__(
self,
name: str,
data_path: Optional[Union[Path, str]] = None,
in_memory: bool = True,
):
if in_memory:
self._conn_name = ':memory:'
else:
if isinstance(data_path, str):
data_path = Path(data_path)
self._conn_name = data_path / f'{name}.db'
self._name = name
self._conn = sqlite3.connect(self._conn_name, check_same_thread=False)
self._conn_lock = threading.Lock()
def execute(self, sql: str, commit: bool = True):
self._conn.execute(sql)
if commit:
self.commit()
def execute_many(self, sql: str, parameters: List[Tuple], commit: bool = True):
self._conn.executemany(sql, parameters)
if commit:
self.commit()
def commit(self):
self._conn.commit()
def create_table(self):
...
def drop_table(self):
self._conn.execute(f'DROP table {self.name}')
self._conn.commit()
def clear(self):
"""Drop the table and create a new one"""
self.drop_table()
self.create_table()
@property
def name(self):
return self._name
@property
def schema(self):
"""SQL schema for this database"""
result = []
for row in self._conn.execute(
f'''PRAGMA table_info("{self.name}")'''
).fetchall():
result.append(', '.join([str(_) for _ in row]))
return '\n'.join(result)
class CellTable(Table):
def __init__(
self,
name: str,
columns: Optional[List[tuple]] = None,
in_memory: bool = True,
data_path: Optional[Path] = None,
lazy_create: bool = False,
):
super().__init__(name, data_path=data_path, in_memory=in_memory)
self._columns = []
self._indexed_keys = set()
if columns is not None:
for name, dtype in columns:
self.add_column(name, dtype, True)
if not lazy_create:
self.create_table()
@property
def columns(self) -> List[str]:
return ['_id', '_doc_id'] + [c.split()[0] for c in self._columns]
def existed(self):
return self.name in _get_table_names(self._conn)
def add_column(self, name: str, dtype: str, create_index: bool = True):
self._columns.append(f'{name} {COLUMN_TYPE_MAPPING[dtype]}')
if create_index:
self._indexed_keys.add(name)
def create_index(self, column: str, commit: bool = True):
sql_statement = f'''CREATE INDEX idx_{column}_
ON {self.name}(_deleted, {column})'''
self._conn.execute(sql_statement)
if commit:
self._conn.commit()
def create_table(self):
sql = f'''CREATE TABLE {self.name}
(_id INTEGER PRIMARY KEY AUTOINCREMENT,
_doc_id TEXT NOT NULL UNIQUE,
_deleted NUMERIC DEFAULT 0'''
if len(self._columns) > 0:
sql += ', ' + ', '.join(self._columns)
sql += ')'
self._conn.execute(sql)
sql_statement = f'''CREATE INDEX idx__delete_
ON {self.name}(_deleted)'''
self._conn.execute(sql_statement)
for name in self._indexed_keys:
self.create_index(name, commit=False)
self._conn.commit()
def insert(
self,
docs: 'DocumentArray',
commit: bool = True,
) -> List[int]:
"""Add a single record into the table.
:param docs: The list of dict docs
:param commit: If set, commit is applied
"""
sql_template = 'INSERT INTO {table}({columns}) VALUES ({placeholders});'
column_names = self.columns[1:]
columns = ', '.join(column_names)
placeholders = ', '.join('?' for c in column_names)
sql = sql_template.format(
table=self.name, columns=columns, placeholders=placeholders
)
values = []
docs_size = 0
for doc in docs:
doc_value = tuple(
[doc.id]
+ [
_converting(doc.tags[c]) if c in doc.tags else None
for c in self.columns[2:]
]
)
values.append(doc_value)
docs_size += 1
with self._conn_lock:
cursor = self._conn.cursor()
if docs_size > 1:
cursor.executemany(sql, values[:-1])
cursor.execute(sql, values[-1])
last_row_id = cursor.lastrowid
row_ids = list(range(last_row_id - len(docs), last_row_id))
if commit:
self._conn.commit()
return row_ids
def query(
self,
where_clause: str = '',
where_params: Tuple = (),
) -> List[int]:
"""Query the records which matches the given conditions
:param where_clause: where clause for query
:param where_params: where parameters for query
:return: offsets list of matched docs
"""
sql = 'SELECT _id from {table} WHERE {where} ORDER BY _id ASC;'
where_conds = ['_deleted = ?']
if where_clause:
where_conds.append(where_clause)
where = ' and '.join(where_conds)
sql = sql.format(table=self.name, where=where)
params = (0,) + tuple([_converting(p) for p in where_params])
# # EXPLAIN SQL query
# for row in self._conn.execute('EXPLAIN QUERY PLAN ' + sql, params):
# print(row)
# Use `row_factor`
# https://docs.python.org/3.6/library/sqlite3.html#sqlite3.Connection.row_factory
def _offset_factory(_, record):
return record[0] - 1
self._conn.row_factory = _offset_factory
cursor = self._conn.cursor()
offsets = cursor.execute(sql, params).fetchall()
self._conn.row_factory = None
return offsets if offsets else []
def delete(self, doc_ids: List[str]):
"""Delete the docs
:param doc_ids: The IDs of docs
"""
sql = f'UPDATE {self.name} SET _deleted = 1 WHERE _doc_id = ?'
self._conn.executemany(sql, doc_ids)
self._conn.commit()
def get_docid_by_offset(self, offset: int):
sql = f'SELECT _doc_id from {self.name} WHERE _id = ? and _deleted = 0 LIMIT 1;'
result = self._conn.execute(sql, (offset + 1,)).fetchone()
if result:
return result[0]
return None
def delete_by_offset(self, offset: int):
"""Delete the doc with specific offset
:param offset: The offset of the doc
"""
sql = f'UPDATE {self.name} SET _deleted = 1 WHERE _id = ?'
self._conn.execute(sql, (offset + 1,))
self._conn.commit()
def undo_delete_by_offset(self, offset: int):
sql = f'UPDATE {self.name} SET _deleted = 0 WHERE _id = ?'
self._conn.execute(sql, (offset + 1,))
self._conn.commit()
def exist(self, doc_id: str):
sql = f'SELECT count(*) from {self.name} WHERE _deleted = 0 and _doc_id = ?;'
return self._conn.execute(sql, (doc_id,)).fetchone()[0] > 0
def count(self, where_clause: str = '', where_params: Tuple = ()):
"""Return the total number of records which match with the given conditions.
:param where_clause: where clause for query
:param where_params: where parameters for query
:return: the total number of matched records
"""
if where_clause:
sql = 'SELECT count(_id) from {table} WHERE {where} LIMIT 1;'
where = f'_deleted = ? and {where_clause}'
sql = sql.format(table=self.name, where=where)
params = (0,) + tuple([_converting(p) for p in where_params])
# # EXPLAIN SQL query
# for row in self._conn.execute('EXPLAIN QUERY PLAN ' + sql, params):
# print(row)
return self._conn.execute(sql, params).fetchone()[0]
else:
sql = f'SELECT MAX(_id) from {self.name} LIMIT 1;'
result = self._conn.execute(sql).fetchone()
if result[0]:
return result[0] - self.deleted_count()
return 0
def deleted_count(self):
"""Return the total number of record what is marked as soft-deleted."""
sql = f'SELECT count(_id) from {self.name} WHERE _deleted = 1 LIMIT 1'
return self._conn.execute(sql).fetchone()[0]
@property
def size(self):
return self.count()
class MetaTable(Table):
def __init__(
self,
name: str = 'meta',
data_path: Optional[Path] = None,
in_memory: bool = False,
):
super().__init__(name, data_path=data_path, in_memory=in_memory)
self.create_table()
def create_table(self):
sql = f'''CREATE TABLE {self.name}
(_doc_id TEXT NOT NULL PRIMARY KEY,
cell_id INTEGER NOT NULL,
offset INTEGER NOT NULL)'''
self._conn.execute(sql)
self._conn.commit()
def get_address(self, doc_id: str):
sql = f'SELECT cell_id, offset from {self.name} WHERE _doc_id = ?;'
cursor = self._conn.execute(sql, (doc_id,))
row = cursor.fetchone()
return (row[0], row[1]) if row else (None, None)
def add_address(self, doc_id: str, cell_id: int, offset: int, commit: bool = True):
sql = f'INSERT OR REPLACE INTO {self.name}(_doc_id, cell_id, offset) VALUES (?, ?, ?);'
self._conn.execute(
sql,
(
doc_id,
cell_id,
offset,
),
)
if commit:
self._conn.commit()
def bulk_add_address(
self,
doc_ids: List[str],
cell_ids: Union[List[int], np.ndarray],
offsets: Union[List[int], np.ndarray],
commit: bool = True,
):
sql = f'INSERT OR REPLACE INTO {self.name}(_doc_id, cell_id, offset) VALUES (?, ?, ?);'
self._conn.executemany(
sql,
[
(doc_id, cell_id, offset)
for doc_id, cell_id, offset in zip(doc_ids, cell_ids, offsets)
],
)
if commit:
self._conn.commit()
```
#### File: pqlite/examples/pq_benchmark.py
```python
import time
from datetime import date
import numpy as np
import pandas as pd
from docarray import Document, DocumentArray
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from utils import evaluate
from annlite import AnnLite
from annlite.math import cdist
from annlite.math import top_k as _top_k
# N = 100_000 # number of data points
Nt = 100_020
Nq = 1
D = 128 # dimentionality / number of features
top_k = 10
n_cells = 64
n_subvectors = 64
n_queries = 1000
# 2,000 128-dim vectors for training
np.random.seed(123)
Xtr, Xte = train_test_split(
make_blobs(n_samples=Nt, n_features=D)[0].astype(np.float32), test_size=20
)
print(f'Xtr: {Xtr.shape} vs Xte: {Xte.shape}')
def get_documents(nr=10, index_start=0, embeddings=None):
for i in range(index_start, nr + index_start):
d = Document()
d.id = f'{i}' # to test it supports non-int ids
d.embedding = embeddings[i - index_start]
yield d
precision_per_query = []
recall_per_query = []
results = []
for n_cells in [1, 4, 8]:
for n_subvectors in [64, 128]:
pq = AnnLite(
dim=D, metric='euclidean', n_cells=n_cells, n_subvectors=n_subvectors
)
t0 = time.time()
pq.train(Xtr[:20480])
train_time = abs(time.time() - t0)
t0 = time.time()
pq.index(DocumentArray(get_documents(len(Xtr), embeddings=Xtr)))
index_time = abs(t0 - time.time())
dists = cdist(Xte, Xtr, metric='euclidean')
true_dists, true_ids = _top_k(dists, top_k, descending=False)
docs = DocumentArray(get_documents(len(Xte), embeddings=Xte))
t0 = time.time()
pq.search(docs, limit=top_k)
query_time = abs(t0 - time.time())
pq_ids = []
for doc in docs:
pq_ids.append([m.id for m in doc.matches])
recall, precision = evaluate(pq_ids, true_ids, top_k)
results_dict = {
'precision': precision,
'recall': recall,
'train_time': train_time,
'index_time': index_time,
'query_time': query_time,
'query_qps': len(Xte) / query_time,
'index_qps': len(Xtr) / index_time,
'indexer_hyperparams': {'n_cells': n_cells, 'n_subvectors': n_subvectors},
}
print(results_dict)
results.append(results_dict)
pq.clear()
pq.close()
today = date.today()
results_df = pd.DataFrame(results)
results_df.sort_values('recall', ascending=False)
results_df.to_csv(f'bench-results-{today.strftime("%b-%d-%Y")}.csv')
```
#### File: pqlite/tests/test_pq_bind.py
```python
import numpy as np
import pytest
from annlite.core.codec.pq import PQCodec
from annlite.pq_bind import dist_pqcodes_to_codebooks, precompute_adc_table
n_examples = 2000
n_features = 128
n_queries = 5
n_cells = 10
n_clusters = 256
n_subvectors = 32
d_subvector = int(n_features / n_subvectors)
top_k = 100
@pytest.fixture
def build_data():
Xt = np.random.random((n_examples, n_features)).astype(np.float32)
return Xt
@pytest.fixture
def build_pq_codec(build_data):
Xt = build_data
pq_codec = PQCodec(dim=n_features, n_subvectors=n_subvectors, n_clusters=n_clusters)
pq_codec.fit(Xt)
return pq_codec
def test_pq_adc_table_shape(build_pq_codec):
pq_codec = build_pq_codec
assert pq_codec.codebooks.shape == (n_subvectors, n_clusters, d_subvector)
def test_pq_adc_table_computation(build_data):
def numpy_adc_table(query, n_subvectors, n_clusters, d_subvector, codebooks):
dtable = np.empty((n_subvectors, n_clusters), dtype=np.float32)
for m in range(n_subvectors):
query_sub = query[m * d_subvector : (m + 1) * d_subvector]
dtable[m, :] = np.linalg.norm(codebooks[m] - query_sub, axis=1) ** 2
return dtable
query = build_data[0]
codebooks = np.random.random((n_subvectors, n_clusters, d_subvector)).astype(
np.float32
)
np_distance_table = numpy_adc_table(
query, n_subvectors, n_clusters, d_subvector, codebooks
)
distance_table_cy = precompute_adc_table(query, d_subvector, n_clusters, codebooks)
np_distance_table_cy = np.asarray(distance_table_cy)
np.testing.assert_array_almost_equal(
np_distance_table, np_distance_table_cy, decimal=5
)
def test_pq_adc_table_computation_interface(build_pq_codec, build_data):
pq_codec = build_pq_codec
query = build_data[0]
np_distance_table = pq_codec.precompute_adc(query).dtable
distance_table_cy = precompute_adc_table(
query, pq_codec.d_subvector, pq_codec.n_clusters, pq_codec.codebooks
)
np_distance_table_cy = np.asarray(distance_table_cy)
np.testing.assert_array_almost_equal(
np_distance_table, np_distance_table_cy, decimal=5
)
```
#### File: pqlite/tests/test_store.py
```python
import pytest
from annlite.storage.kv import DocStorage
def test_get(tmpdir, docs):
storage = DocStorage(tmpdir + 'test_doc_store')
storage.insert(docs)
doc = storage.get('doc1')[0]
assert doc.id == 'doc1'
assert (doc.embedding == [1, 0, 0, 0]).all()
docs = storage.get('doc7')
assert len(docs) == 0
def test_update(tmpdir, docs, update_docs):
storage = DocStorage(tmpdir + 'test_doc_store')
storage.insert(docs)
storage.update(update_docs)
doc = storage.get('doc1')[0]
assert (doc.embedding == [0, 0, 0, 1]).all()
def test_delete(tmpdir, docs):
storage = DocStorage(tmpdir + 'test_doc_store')
storage.insert(docs)
storage.delete(['doc1'])
docs = storage.get('doc1')
assert len(docs) == 0
def test_clear(tmpdir, docs):
storage = DocStorage(tmpdir + 'test_doc_store')
storage.insert(docs)
assert storage.size == 6
storage.clear()
assert storage.size == 0
def test_batched_iterator(tmpdir, docs):
storage = DocStorage(tmpdir + 'test_doc_store')
storage.insert(docs)
for docs in storage.batched_iterator(batch_size=3):
assert len(docs) == 3
@pytest.mark.parametrize('protocol', ['pickle', 'protobuf'])
def test_searalize(protocol, tmpdir, docs):
storage = DocStorage(
tmpdir + 'test_doc_store', serialize_config={'protocol': protocol}
)
storage.insert(docs)
doc = storage.get('doc1')[0]
assert doc.id == 'doc1'
assert (doc.embedding == [1, 0, 0, 0]).all()
docs = storage.get('doc7')
assert len(docs) == 0
``` |
{
"source": "jina-ai/stress-test",
"score": 2
} |
#### File: benchmark/aws/ssm.py
```python
import sys
import boto3
import botocore
from logger import get_logger
from client import AWSClientWrapper
from helper import TimeContext, waiter
from excepts import SSMDocumentCreationFailed, SSMDocumentDeletionFailed
from enums import SSMCreationStatus, SSMAssociationStatus, SSMDeletionStatus, \
SSMCreationTime, SSMAssociationTime, SSMDeletionTime
class SSMDocument:
def __init__(self, name, template, plugin='runStressTest'):
self.logger = get_logger(self.__class__.__name__)
self._client_wrapper = AWSClientWrapper(service='ssm')
self._client = self._client_wrapper.client
self._name = name
self._template = template
self._plugin = plugin
def __enter__(self):
self.logger.info(f'Entering SSMDocument context. Creating the document with name `{self._name}`')
self.create()
return self
def create(self):
try:
response = self._client.create_document(Content=self._template,
Name=self._name,
DocumentType='Command',
DocumentFormat='YAML')
self._status = response['DocumentDescription']['Status']
self.logger.info(f'Calling waiter for `Document creation`!')
self._is_created = waiter(func=self._describe_document,
logger=self.logger,
success_status=SSMCreationStatus.SUCCESS.value,
wait_status=SSMCreationStatus.WAIT.value,
failure_status=SSMCreationStatus.FAILURE.value,
time_to_wait=SSMCreationTime.TIMEOUT.value,
time_to_sleep=SSMCreationTime.SLEEP.value)
except botocore.exceptions.ClientError as exp:
raise SSMDocumentCreationFailed(f'Document creation failed with folliwng exception. Exiting! \n{exp}')
except (self._client.exceptions.InvalidDocument,
self._client.exceptions.InvalidDocumentSchemaVersion) as exp:
raise SSMDocumentCreationFailed(f'Document schema is not correct! Please check AWS Docs. Exiting! \n{exp}')
except Exception as exp:
raise SSMDocumentCreationFailed(f'Document creation failed with following exception. Exiting! \n{exp}')
def _describe_document(self):
try:
response = self._client.describe_document(Name=self._name)
self._status = response['Document']['Status']
except (self._client.exceptions.InvalidDocument,
self._client.exceptions.InvalidDocumentSchemaVersion) as exp:
self._status = 'Deleted'
except Exception as exp:
self._status = 'Deleted'
self.logger.error(f'Got the following exception {exp}')
return self._status
@property
def name(self):
return self._name
@property
def status(self):
return self._status
@property
def is_created(self):
if hasattr(self, '_is_created'):
return self._is_created
@property
def association_status(self):
if hasattr(self, '_association_status'):
return self._association_status
@property
def association_id(self):
if hasattr(self, '_association_id'):
return self._association_id
@property
def is_associated(self):
if hasattr(self, '_is_associated'):
return self._is_associated
def associate(self, instance_id):
try:
self.logger.info(f'Associating document with instance `{instance_id}`')
response = self._client.create_association(
Name=self._name,
Targets=[{'Key': 'InstanceIds',
'Values': [instance_id]}]
)
self._association_id = response['AssociationDescription']['AssociationId']
self._association_status = response['AssociationDescription']['Overview']['Status']
self.logger.info(f'Calling waiter for `Document association`!')
self._is_associated = waiter(func=self._describe_association,
logger=self.logger,
success_status=SSMAssociationStatus.SUCCESS.value,
wait_status=SSMAssociationStatus.WAIT.value,
failure_status=SSMAssociationStatus.FAILURE.value,
time_to_wait=SSMAssociationTime.TIMEOUT.value,
time_to_sleep=SSMAssociationTime.SLEEP.value,
instance_id=instance_id)
except botocore.exceptions.ParamValidationError:
self.logger.error('Invalid parameters. Please check AWS Documents')
except Exception as exp:
self.logger.exception(f'Got the following error while associating doc with ec2 instance {exp}')
def _describe_association(self, instance_id):
try:
response = self._client.describe_association(AssociationId=self._association_id)
self._association_status = response['AssociationDescription']['Overview']['Status']
return self._association_status
except self._client.exceptions.InvalidDocument:
self.logger.exception(f'Document we are invoking is invalid!')
except Exception as exp:
self.logger.exception(f'Got the following error while triggering describe_association {exp}')
def _delete_association(self, instance_id):
try:
self.logger.info(f'Deleting document association')
self._client.delete_association(AssociationId=self._association_id)
except self._client.exceptions.InvalidDocument:
self.logger.error(f'Got delete association reqest for an invalid doc')
except Exception as exp:
self.logger.error(f'Got the following error while triggering describe_association {exp}')
def run(self, instance_id, s3_bucket_name, s3_key_prefix='blah'):
try:
self.logger.info(f'Triggering send_command!')
response = self._client.send_command(
DocumentName=self._name,
Targets=[{'Key': 'InstanceIds',
'Values': [instance_id]}],
TimeoutSeconds=14400,
OutputS3BucketName=s3_bucket_name,
OutputS3KeyPrefix=s3_key_prefix
)
self._command_id = response['Command']['CommandId']
self.logger.info(f'Got command id `{self._command_id}`')
self._command_status = {}
self.wait(waiter_name='command_executed',
instance_id=instance_id)
self._command_response = self._client.get_command_invocation(
CommandId=self._command_id,
InstanceId=instance_id,
PluginName=self._plugin
)
except self._client.exceptions.InvalidInstanceId as exp:
self.logger.exception(f'Got InvalidInstanceId error\n{exp}')
except Exception as exp:
self.logger.exception(f'Got the following error while triggering send_command {exp}')
def _list_command_invocations(self, instance_id):
try:
self.logger.info(f'Getting overall command invocations to fetch all plugins!')
response = self._client.list_command_invocations(
CommandId=self._command_id,
InstanceId=instance_id,
Details=True
)
self._command_invocation = response['CommandInvocations'][0]
self._command_plugins = self._command_invocation['CommandPlugins']
except Exception as exp:
self.logger.exception(f'Got the following error while triggering send_command {exp}')
@property
def command_status(self):
if hasattr(self, '_command_response'):
return self._command_response['Status']
@property
def command_s3_stdout(self):
if hasattr(self, '_command_response'):
return self._command_response['StandardOutputUrl']
@property
def command_s3_stderr(self):
if hasattr(self, '_command_response'):
return self._command_response['StandardErrorUrl']
@property
def command_start_time(self):
if hasattr(self, '_command_response'):
return self._command_response['ExecutionStartDateTime']
@property
def command_end_time(self):
if hasattr(self, '_command_response'):
return self._command_response['ExecutionEndDateTime']
def wait(self, waiter_name, instance_id):
self._client_wrapper.waiter = waiter_name
with TimeContext(f'Waiting for `{waiter_name}`'):
try:
self._client_wrapper.waiter.wait(CommandId=self._command_id,
InstanceId=instance_id,
PluginName=self._plugin)
except botocore.exceptions.WaiterError as exp:
self.logger.error(f'Operation failed after waiting! \n{exp}')
@property
def is_deleted(self):
if hasattr(self, '_is_deleted'):
return self._is_deleted
def delete(self):
try:
self._client.delete_document(Name=self._name)
self.logger.info(f'Calling waiter for `Document deletion`!')
self._is_deleted = waiter(func=self._describe_document,
logger=self.logger,
success_status=SSMDeletionStatus.SUCCESS.value,
wait_status=SSMDeletionStatus.WAIT.value,
failure_status=SSMDeletionStatus.FAILURE.value,
time_to_wait=SSMDeletionTime.TIMEOUT.value,
time_to_sleep=SSMDeletionTime.SLEEP.value)
except botocore.exceptions.ClientError as exp:
self.logger.error(f'Got the following error while triggering `delete_stack` {exp}')
except self._client.exceptions.InvalidDocument:
self.logger.error(f'Got an invalid document to delete. Please recheck')
except Exception as exp:
self.logger.exception(f'Got the following error while triggering `delete_stack` {exp}')
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.logger.info(f'Exiting SSMDocument context. Deleting the document with name `{self._name}`')
self.delete()
except Exception as exp:
self.logger.error('Please make sure document gets deleted by checking the AWS console..')
raise SSMDocumentDeletionFailed(f'Document deletion failed with exception {exp}')
```
#### File: stress-test/benchmark/post_process.py
```python
import os
import uuid
from typing import Dict
import pandas as pd
import matplotlib.pyplot as plt
COLUMN_OF_INTEREST = 'g:send' # to be changed to 'roundtrip' once issue is fixed
def clean_dataframe(file_path: str = 'routes.parquet') -> pd.DataFrame:
routes_df = pd.read_parquet(file_path)
# Dropping the last batch as end_time is defaulting
routes_df.drop(routes_df.index[-1], inplace=True)
columns_to_be_combined = {}
for c in routes_df.columns:
if c.endswith('-head') or c.endswith('-tail'):
new_c_name = c.replace('-head', '').replace('-tail', '')
c_combined = [_ for _ in routes_df.columns
if new_c_name in _ and not _.endswith('-head') and not _.endswith('-tail')]
try:
columns_to_be_combined[new_c_name].extend(c_combined)
except KeyError:
columns_to_be_combined[new_c_name] = []
for k, v in columns_to_be_combined.items():
routes_df[k] = routes_df[v[0]]
del routes_df[v[0]]
for i in range(1, len(v)):
routes_df[k] = routes_df[k].fillna(routes_df[v[i]])
del routes_df[v[i]]
for c in routes_df.columns:
routes_df[[f'{c}_start_time', f'{c}_end_time']] = \
pd.DataFrame(routes_df[c].tolist(), index= routes_df.index)
routes_df.drop(columns=c, inplace=True)
return routes_df
def evaluate_times(routes_df, num_docs, pod_names):
""" Evaluates different timestamps from the dataframe """
if 'gateway' in pod_names: pod_names.remove('gateway')
existing_cols = routes_df.columns
for i in range(len(pod_names) + 1):
if i == 0:
# print(f'gateway->{pod_names[i]}:send = {pod_names[i]}_start_time - gateway_start_time')
routes_df[f'gateway->{pod_names[i]}:send'] = routes_df[f'{pod_names[i]}_start_time'] - routes_df['gateway_start_time']
elif i == len(pod_names):
## This needs fix as routes_df['gateway_end_time'] are None (hence defaulting)
# print(f'{pod_names[i-1]}->gateway:send = gateway_end_time - {pod_names[i-1]}_start_time')
# routes_df[f'{pod_names[i-1]}->gateway:send'] = routes_df['gateway_end_time'] - routes_df[f'{pod_names[i-1]}_start_time']
continue
else:
# print(f'{pod_names[i-1]}->{pod_names[i]}:send = {pod_names[i]}_start_time - {pod_names[i-1]}_start_time')
routes_df[f'{pod_names[i-1]}->{pod_names[i]}:send'] = routes_df[f'{pod_names[i]}_start_time'] - \
routes_df[f'{pod_names[i-1]}_start_time']
## This needs fix as routes_df['gateway_end_time'] & routes_df['pod1_end_time'] are None (hence defaulting)
# routes_df['roundtrip'] = routes_df['gateway_end_time'] - routes_df['gateway_start_time']
columns_for_send = [c + '_start_time' for c in reversed(pod_names)] + ['gateway_start_time']
for i in range(len(columns_for_send)-1):
current_send = routes_df[columns_for_send[i]] - routes_df[columns_for_send[i+1]]
if i == 0:
total_send = current_send
else:
total_send += current_send
routes_df['g:send'] = total_send
columns_for_recv = [c + '_end_time' for c in reversed(pod_names)] # + ['gateway_end_time']
for i in range(len(columns_for_recv)-1):
current_recv = routes_df[columns_for_recv[i]] - routes_df[columns_for_recv[i+1]]
if i == 0:
total_recv = current_recv
else:
total_recv += current_recv
## This needs fix as routes_df['gateway_end_time'] is None (hence defaulting)
routes_df['g:recv'] = total_recv
## This needs fix as routes_df['gateway_end_time'] is None (hence defaulting)
# routes_df['docs/s'] = num_docs / (routes_df['roundtrip'].seconds)
columns_of_interest = list(set(routes_df.columns) - set(existing_cols))
return routes_df, columns_of_interest
def get_summary(routes_df, columns_of_interest) -> Dict:
""" Returns Stats summary of the timestamps """
summary = {}
for _ in columns_of_interest:
summary[_] = {}
summary[_]['mean'] = routes_df[_].mean().total_seconds()
summary[_]['median'] = routes_df[_].median().total_seconds()
summary[_]['std'] = routes_df[_].std().total_seconds()
summary[_]['max'] = routes_df[_].max().total_seconds()
summary[_]['min'] = routes_df[_].min().total_seconds()
summary[_]['sum'] = routes_df[_].sum().total_seconds()
return summary
def write_benchmark_to_markdown(overall_summary, click_help_msg):
with open('README.template.MD') as template_md:
template_text = template_md.readlines()
print(overall_summary)
html_table_text = html_table(overall_summary_dict=overall_summary)
uuid_gen = uuid.uuid4().hex.lower()[0:10]
image_filename = plot_num_docs_vs_time(overall_summary_dict=overall_summary,
column_of_interest=COLUMN_OF_INTEREST,
uid=uuid_gen,
file_dir=os.environ['FILE_DIR']) # to be changed to gh hash
with open('README.MD', 'w') as readme_f:
readme_f.writelines(template_text)
readme_f.write('<h3> Usage </h3>\n\n')
readme_f.write(f'```\n{click_help_msg}\n```')
readme_f.write('\n\n<h3>Results</h3>\n')
readme_f.writelines(f'\n\n{html_table_text}')
readme_f.write('\n\n\n<h3>Num docs vs Time<h3>\n\n')
readme_f.write(f'')
def html_table(overall_summary_dict) -> str:
table_html = ''
for num_docs, summary in overall_summary_dict.items():
table_html += pd.DataFrame(summary).loc['mean'].to_frame().rename(columns={'mean': num_docs}).T.round(3).to_html()
return table_html
def plot_num_docs_vs_time(overall_summary_dict, column_of_interest, uid, file_dir) -> str:
""" Plots num_docs (log scale) vs total time"""
x, y = [], []
for num_docs, summary in overall_summary_dict.items():
x.append(num_docs)
y.append(summary[column_of_interest]['sum'])
plt.figure(figsize=(16, 4))
plt.plot(x, y, linestyle='--', marker='o')
plt.xlabel('Number of docs indexed')
plt.ylabel(f'{column_of_interest} total time (secs)')
plt.xscale('log', base=2)
plt.tight_layout()
image_filename = f'{file_dir}/num-docs-vs-time-{uid}.svg'
plt.savefig(image_filename)
return image_filename
```
#### File: distributed/aws/client.py
```python
import os
import boto3
from logger import logger
class AWSClient:
"""Wrapper around boto3 to create aws clients
"""
def __init__(self, service: str, region: str = 'us-east-2'):
self._service = service
self._region = region
if all(len(os.getenv(k, '')) > 0 for k in ('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY')):
self._client = boto3.client(service_name=self._service,
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
region_name=self._region)
else:
raise EnvironmentError(f'Please set AWS related env vars')
@property
def client(self):
return self._client
@property
def all_waiters(self):
return self._client.waiter_names
@property
def waiter(self):
return self._waiter
@waiter.setter
def waiter(self, waiter_name):
try:
if waiter_name not in self.all_waiters:
logger.error(f'Invalid waiter `{waiter_name} for service `{self._service}`')
self._waiter = self.client.get_waiter(waiter_name=waiter_name)
except Exception as exp:
logger.exception(f'Got the following exception while getting the waiter {exp}')
raise
```
#### File: distributed/aws/s3.py
```python
import glob
from pathlib import Path
from logger import logger
from .client import AWSClient
class S3:
"""Wrapper around boto3 to upload to/download from S3 bucket
"""
def __init__(self, bucket: str):
self._client = AWSClient(service='s3').client
self._bucket = bucket
def add(self, path: str, key: str):
if not Path(path).exists():
logger.error(f'Invalid path: {path}! Nothing to upload!')
return
try:
logger.info(f'Uploading object from `{path}` to S3 bucket `{self._bucket}` key `{key}`')
for filename in glob.iglob(str(path) + '**/**', recursive=True):
if Path(filename).is_file():
self._client.upload_file(filename, self._bucket, f'{key}/{filename}')
except Exception as exp:
logger.error(f'Got following exception while uploading object to S3 \n{exp}')
raise
def get(self, key: str, local_path):
try:
logger.info(f'Downloading object from `{self._bucket}:{key}` to file: {local_path}')
self._client.download_file(self._bucket, key, local_path)
except Exception as exp:
logger.error(f'Got following exception while downloading object from S3 \n{exp}')
raise
```
#### File: stress-test/distributed/data.py
```python
import os
import time
from functools import partial
from multiprocessing import Pool
from typing import Dict, Callable
from jina.parsers import set_client_cli_parser
from jina.clients import Client, WebSocketClient
from pydantic import validate_arguments
from logger import logger
from helper import GatewayClients, Tasks
def _fetch_client(client: GatewayClients):
gateway_data_host = f'{os.getenv("JINA_GATEWAY_HOST") if os.getenv("JINA_GATEWAY_HOST") else "localhost"}'
gateway_data_port = f'{os.getenv("JINA_GATEWAY_PORT_EXPOSE") if os.getenv("JINA_GATEWAY_PORT_EXPOSE") else "23456"}'
args = set_client_cli_parser().parse_args(['--host', gateway_data_host, '--port-expose', str(gateway_data_port)])
return Client(args) if client == GatewayClients.GRPC else WebSocketClient(args)
def _trigger(task: Tasks, client: GatewayClients, execution_time: float, inputs: Callable,
inputs_args: Dict, request_size: int, on_always: Callable, on_always_args: Dict, top_k: int = 10):
run_until = time.time() + execution_time
client = _fetch_client(client=client)
while time.time() < run_until:
try:
if task == Tasks.INDEX:
client.index(
inputs(**inputs_args),
request_size=request_size,
on_always=partial(on_always, **on_always_args)
)
elif task == Tasks.SEARCH:
client.search(
inputs(**inputs_args),
request_size=request_size,
top_k=top_k,
on_always=partial(on_always, **on_always_args)
)
except ZeroDivisionError:
logger.error(f'ZeroDivisionError: seems to be an issue with profile logger, ignoring for now.')
continue
def _handle_clients(num_clients, *args):
with Pool(num_clients) as pool:
results = [pool.apply_async(_trigger, args=args) for _ in range(num_clients)]
[r.get() for r in results]
@validate_arguments
def index(*,
inputs: Callable,
inputs_args: Dict,
on_always: Callable,
on_always_args: Dict = {},
client: GatewayClients = 'grpc',
num_clients: int = 1,
request_size: int = 100,
execution_time: int = 10):
logger.info(f'👍 Starting indexing for {execution_time} secs')
start = time.time()
on_always_args.update({'client': client.value})
_handle_clients(num_clients, 'index', client, execution_time, inputs,
inputs_args, request_size, on_always, on_always_args)
logger.info(f'👍 Completed indexing. It took {time.time() - start} secs')
@validate_arguments
def query(*,
inputs: Callable,
inputs_args: Dict,
on_always: Callable,
on_always_args: Dict = {},
client: GatewayClients = 'grpc',
num_clients: int = 1,
request_size: int = 100,
execution_time: int = 10,
top_k: int = 10):
logger.info(f'👍 Starting querying for {execution_time} secs')
start = time.time()
on_always_args.update({'top_k': top_k, 'client': client.value})
_handle_clients(num_clients, 'search', client, execution_time, inputs,
inputs_args, request_size, on_always, on_always_args, top_k)
logger.info(f'👍 Completed querying. It took {time.time()-start} secs')
```
#### File: stress-test/distributed/steps.py
```python
import os
import uuid
from typing import List, Dict, Callable, ClassVar
import requests
from pydantic import validate_arguments, FilePath
from pydantic.types import DirectoryPath
import data
import control
from aws import S3
from stats import collect_and_push
from helper import update_environment_vars, GatewayClients
class StepItems:
workspace: ClassVar[uuid.uuid4] = None
flow: ClassVar[uuid.uuid4] = None
state: ClassVar[Dict] = {}
@classmethod
@validate_arguments
def create_workspace(cls,
*,
files: List[FilePath],
environment: Dict[str, str]):
update_environment_vars(files=files, environment=environment)
StepItems.workspace = control.create_or_update_workspace(files=files)
@classmethod
@validate_arguments
def update_workspace(cls,
*,
files: List[FilePath],
environment: Dict[str, str]):
update_environment_vars(files=files, environment=environment)
StepItems.workspace = control.create_or_update_workspace(files=files,
workspace_id=StepItems.workspace)
@classmethod
@validate_arguments
def delete_workspace(cls):
control.delete_workspace(workspace_id=StepItems.workspace)
StepItems.workspace = None
@classmethod
@validate_arguments
def start_flow(cls,
*,
file: FilePath):
StepItems.flow = control.start_flow(file=file,
workspace_id=StepItems.workspace)
@classmethod
@validate_arguments
def terminate_flow(cls):
control.terminate_flow(flow_id=StepItems.flow,
delete_workspace=False)
StepItems.flow = None
@classmethod
@validate_arguments
def index(cls,
*,
inputs: Callable,
inputs_args: Dict,
on_always: Callable,
on_always_args: Dict = {},
client: GatewayClients = GatewayClients.GRPC,
num_clients: int = 1,
request_size: int = 100,
execution_time: int = 10):
data.index(inputs=inputs,
inputs_args=inputs_args,
on_always=on_always,
on_always_args=on_always_args,
client=client,
execution_time=execution_time,
num_clients=num_clients,
request_size=request_size)
@classmethod
@validate_arguments
def query(cls,
*,
inputs: Callable,
inputs_args: Dict,
on_always: Callable,
on_always_args: Dict = {},
client: GatewayClients = GatewayClients.GRPC,
num_clients: int = 1,
request_size: int = 100,
execution_time: int = 10,
top_k: int = 10):
data.query(inputs=inputs,
inputs_args=inputs_args,
on_always=on_always,
on_always_args=on_always_args,
client=client,
execution_time=execution_time,
num_clients=num_clients,
request_size=request_size,
top_k=top_k)
@classmethod
@validate_arguments
def upload_to_s3(cls,
*,
directory: DirectoryPath,
bucket: str = 'e2e-distributed-stress-tests'):
s3_key = os.getenv('TFID') if 'TFID' in os.environ else str(uuid.uuid4())
S3(bucket=bucket).add(path=directory, key=s3_key)
@classmethod
@validate_arguments
def download_from_s3(cls,
*,
key: str,
local_directory: str = '.',
bucket: str = 'e2e-distributed-stress-tests'):
S3(bucket=bucket).get(key=key, local_path=f'{local_directory}/{key}')
@classmethod
@validate_arguments
def collect_stats(cls,
*,
slack: bool = False):
collect_and_push(slack=slack)
@classmethod
@validate_arguments
def download_and_extract_from_web(cls,
*,
uri: str,
format: str = None):
import shutil
file_name = f'./{os.path.basename(uri)}'
if not os.path.exists(file_name):
resp = requests.get(uri)
if resp.status_code != 200:
raise FileNotFoundError(f'Could not download data from {uri}')
else:
with open(file_name, 'wb') as f:
f.write(resp.content)
shutil.unpack_archive(file_name, format=format)
```
#### File: stress-test/vector_indexers/annoy_indexer.py
```python
import os
from jina.executors.indexers.vector.annoy import AnnoyIndexer
WORKSPACE_DIR = os.path.join(os.path.dirname(__file__), "workspace")
def get_annoy_indexer(params):
n_trees = params['n_trees']
search_k = params['search_k']
name = params['name']
return AnnoyIndexer(metric='euclidean', ntrees=n_trees, search_k=search_k,
index_filename=f'{name}.tgz', compress_level=1,
metas={'workspace': WORKSPACE_DIR,
'warn_unnamed': False,
'separated_workspace': False,
'is_trained': False,
'max_snapshot': 0
})
``` |
{
"source": "Jinadelee/spotpy",
"score": 3
} |
#### File: spotpy/examples/tutorial_padds.py
```python
import numpy as np
import sys
try:
import spotpy
except ModuleNotFoundError:
sys.path.append(".")
import spotpy
def ZDT1(x):
"""
Zitzler–Deb–Thiele's function number 1. Is used to benchmark or test algorithms, see also
https://en.wikipedia.org/wiki/Test_functions_for_optimization and Deb et al. 2002 IEEE.
## The schematic tradoff looks like this
# /\
# |
#1 .
# |
# |
# | .
# |
# | .
# |
# | .
# |
# | .
# |
# | .
# | .
# | .
# |------------------------------------------.------>
# 1
ZDT1 needs 30 parameters, which are in [0,1].
:param x:
:return: Two Value Tuple
"""
a = x[0] # objective 1 value
g = 0
for i in range(1,30):
g = g + x[i]
g = 1 + 9 * g / 29
b = g * (1 - (x[0] / g) ** 0.5) # objective 2 value
return np.array([a,b])
class padds_spot_setup(object):
def __init__(self):
self.params = []
for i in range(30):
self.params.append(spotpy.parameter.Uniform(str(i+1), 0, 1, 0, 0, 0, 1,doc="param no " + str(i+1)))
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self, vector):
firstSum = 0.0
secondSum = 0.0
for c in range(len(vector)):
firstSum += c**2.0
secondSum += np.cos(2.0*np.pi*vector[c])
n = float(len(vector))
return [-20.0*np.exp(-0.2*np.sqrt(firstSum/n)) - np.exp(secondSum/n) + 20 + np.e]
def evaluation(self):
observations = [0]
return observations
def objectivefunction(self, simulation, evaluation, params):
para, names = params
if len(para) != 30:
raise Exception("params must have length 30")
return ZDT1(para)
spot_setup = padds_spot_setup()
sampler = spotpy.algorithms.padds(spot_setup, dbname='padds_hymod', dbformat='csv')
res = sampler.sample(10000,trials=1)
``` |
{
"source": "JinaKim77/Project-2020_Emerging-Technologies",
"score": 3
} |
#### File: JinaKim77/Project-2020_Emerging-Technologies/power.py
```python
import flask as fl
from flask import jsonify
# handle a POST request
# import request object
from flask import request
# numpy for numerical work.
import numpy as np
# Import the json module
import json
from json import dumps
# keras model
import keras.models
from keras.models import load_model
model = load_model("my_h5_model.h5")
# tensorflow
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense
# neural networks
import tensorflow.keras as krs
# Create a new web app.
app = fl.Flask(__name__)
#Add root route
@app.route('/')
def home():
return app.send_static_file('index.html')
#Add test route
@app.route("/test")
def test():
return "yay it worked"
#Page not found
@app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
#Add power route
@app.route('/api/power', methods=["GET","POST"]) # allow both GET and POST requests
def power():
result = {"success": False}
speed = float(request.form["value"]) # get speed value
inToArray = np.array([speed]) # into array
print ('data from user - (speed):', speed) # it seems okay. it prints to the console.
prediction = model.predict([inToArray])
predictedData = prediction.tolist() # to the list
print('predicted value - (power):',predictedData[0]) # it seems okay. the value will be printed to the console.
jsons = json.dumps(predictedData)
power_value = jsons.strip('[[]]')
result["response"] = power_value
# return jsonify({'value': predictedData[0]})
result["success"] = True
return jsonify(result)
if __name__ == "__main__":
app.run(debug=True, port=5000) #run app in debug mode on port 5000
``` |
{
"source": "jinala/RLbaselines",
"score": 3
} |
#### File: baselines/ppo2/defaults.py
```python
import random
import numpy as np
def unif_range(a, b):
return random.random() * (b - a) + a
def rand_elem(xs):
return xs[random.randrange(len(xs))]
def rand_int_linspace(start, stop, num = 50):
return rand_elem([int(x) for x in np.linspace(start, stop, num)])
def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def retro():
return atari()
def car_retrieval_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
nminibatches = 1, # for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
'''
# best params for car retrieval bench
def car_retrieval_train():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train1():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train2():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train3():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train4():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)
def car_retrieval_train5():
lr = 0.002
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 128,
ent_coef = 0.01,
noptepochs = 33,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)'''
def pendulum_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
nminibatches = 1, #for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
'''
# best version for pendulum
def pendulum_train():
lr = 0.0003
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1,
ent_coef = 0.01,
noptepochs = 28,
cliprange = 0.1,
gamma = 0.99,
lr = lambda f : f * lr
)'''
def mountain_car_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def quad_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
#nminibatches = rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
nminibatches=1, # for lstm
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def quad_r_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def acrobot_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
def cartpole_train():
lr = unif_range(0.003, 5e-6)
print("lr: ", lr)
return dict(
# horizon = rand_int_linspace(32, 500),
nminibatches = 1, #rand_elem([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]),
ent_coef = rand_elem([0.0, 0.01, 0.05, 0.1]),
noptepochs = rand_int_linspace(3, 36),
cliprange = rand_elem([0.1, 0.2, 0.3]),
gamma = 0.99,
lr = lambda f : f * lr
)
``` |
{
"source": "jinalharia/flask-snippets",
"score": 2
} |
#### File: flask-snippets/apis/wordpress_xml_rpc.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import request, Response
from app import app
import string
from datetime import datetime
from flask import url_for
from flask.ext.xmlrpc import XMLRPCHandler, Fault
from labs import app, db
from labs.models import User, Post, Tag, Category
POST_ROOT = 'http://127.0.0.1/post/'
# MetaWeblogAPI XML-RPC
handler = XMLRPCHandler('api')
handler.connect(app, '/api')
metaweblog = handler.namespace('metaWeblog')
blogger = handler.namespace('blogger')
wordpress = handler.namespace('wp')
moveabletype = handler.namespace('mt')
@metaweblog.register
def newPost(blog_id, username, password, content, publish):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
post = Post(content['title'], content['description'])
post.author = user
post.teaser = content['mt_excerpt']
if 'wp_slug' in content:
post.slug = content['wp_slug']
if 'dateCreated' in content:
post.create_date = datetime.strptime(str(content['dateCreated']),
"%Y%m%dT%H:%M:%SZ")
if 'custom_fields' in content:
for custom_field in content['custom_fields']:
if custom_field['key'] == 'subtitle':
post.subtitle = custom_field['value']
elif custom_field['key'] == 'lead_img':
post.lead_img = custom_field['value']
tag_names = string.split(content['mt_tags'], ',')
for tag_name in tag_names:
tag = Tag.query.filter(Tag.name == tag_name).first()
if tag is None:
tag = Tag(tag_name)
db.session.add(tag)
db.session.commit()
post.tags.append(tag)
db.session.add(post)
db.session.commit()
return post.id
@metaweblog.register
def editPost(post_id, username, password, content, publish):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
post = Post.query.get(post_id)
post.title = content['title']
post.markdown = content['description']
post.set_html()
post.teaser = content['mt_excerpt']
if 'wp_slug' in content:
post.slug = content['wp_slug']
if 'dateCreated' in content:
post.create_date = datetime.strptime(str(content['dateCreated']),
"%Y%m%dT%H:%M:%SZ")
if 'custom_fields' in content:
for custom_field in content['custom_fields']:
if custom_field['key'] == 'subtitle':
post.subtitle = custom_field['value']
elif custom_field['key'] == 'lead_img':
post.lead_img = custom_field['value']
tag_names = string.split(content['mt_tags'], ',')
tags = []
for tag_name in tag_names:
tag = Tag.query.filter(Tag.name == tag_name).first()
if tag is None:
tag = Tag(tag_name)
db.session.add(tag)
db.session.commit()
tags.append(tag)
post.tags = tags
db.session.add(post)
db.session.commit()
return True
@metaweblog.register
def getPost(post_id, username, password):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
post = Post.query.filter(Post.id == post_id).first()
if not post:
raise Fault("not_found", "Post not found.")
item = {}
item['title'] = post.title
item['link'] = POST_ROOT + post.slug
item['description'] = post.markdown
item['postid'] = post.id
item['mt_excerpt'] = post.teaser
item['custom_fields'] = [
{
'key': 'subtitle',
'value': post.subtitle
},
{
'key': 'lead_img',
'value': post.lead_img
}
]
item['wp_slug'] = post.slug
if post.tags:
item['mt_tags'] = ','.join(map(lambda tag: tag.name, post.tags))
item['dateCreated'] = post.create_date
return item
@metaweblog.register
def getRecentPosts(blogid, username, password, numberOfPosts):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
posts = Post.query.order_by('create_date').all()
response = []
for post in posts:
item = {}
item['title'] = post.title
item['link'] = POST_ROOT + post.slug
item['description'] = post.markdown
item['postid'] = post.id
item['mt_excerpt'] = post.teaser
item['wp_slug'] = post.slug
item['custom_fields'] = [
{
'key': 'subtitle',
'value': post.subtitle
},
{
'key': 'lead_img',
'value': post.lead_img
}
]
tags = []
for tag in post.tags:
tags.append(tag.name)
item['mt_tags'] = ','.join(tags)
item['dateCreated'] = post.create_date
# if post['draft']:
# item['draft'] = 'Yes'
response.append(item)
return response
@wordpress.register
def getPages(blogid, username, password, numberOfPages):
return []
@wordpress.register
def newCategory(blogid, username, password, new_category):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
category = Category.query.filter(Category.name == new_category['name']).first()
if category is None:
category = Category(new_category['name'])
db.session.add(category)
db.session.commit()
return category.id
@wordpress.register
def getTags(blogid, username, password):
return map(lambda tag: {
'tag_id': tag.id,
'name': tag.name
}, Tag.query.all())
@wordpress.register
def getCategories(blogid, username, password):
return map(lambda category: {
'categoryId': category.id,
'categoryName': category.name,
'categoryDescription': category.description
}, Category.query.all())
@moveabletype.register
def setPostCategories(post_id, username, password, post_categories):
post = Post.query.get(post_id)
for post_category in post_categories:
category = Category.query.filter(
Category.name == post_category['categoryName']
).first()
# only single category per post supported
post.category = category
db.session.add(post)
db.session.commit()
return True
@moveabletype.register
def getPostCategories(post_id, username, password):
# only single per post supported
category = Post.query.get(post_id).category
if category is not None:
post_category = {
'categoryId': category.id,
'categoryName': category.name,
'categoryDescription': category.description
}
return [post_category]
return []
@moveabletype.register
def supportedTextFilters():
return []
@blogger.register
def deletePost(appkey, post_id, username, password, publish):
user = db.session.query(User).filter(User.username == username).first()
if user is None or not user.check_password(password):
raise Fault("invalid_user",
"Invalid username/password, please try again.")
post = Post.query.get(int(post_id))
db.session.delete(post)
db.session.commit()
pass
@app.route('/')
def index():
return 'index'
if __name__ == "__main__":
app.run()
```
#### File: admin_blueprint/admin/__init__.py
```python
from flask import Blueprint
from flask import redirect, request
from google.appengine.api import users
bp = Blueprint('admin', __name__)
@bp.before_request
def restrict_bp_to_admins():
if not users.is_current_user_admin():
return redirect(users.create_login_url(request.url))
```
#### File: appstructure/config_static/__init__.py
```python
import flask
class MyFlask(flask.Flask):
@property
def static_folder(self):
if self.config.get('STATIC_FOLDER') is not None:
return os.path.join(self.root_path,
self.config.get('STATIC_FOLDER'))
@static_folder.setter
def static_folder(self, value):
self.config.get('STATIC_FOLDER') = value
# Now these are equivalent:
app = Flask(__name__, static_folder='foo')
app = MyFlask(__name__)
app.config['STATIC_FOLDER'] = 'foo'
```
#### File: appstructure/create_with_func/__init__.py
```python
from flask import Flask
from sqlalchemy import create_engine
from myapp import config
from myapp.views import frontend
def create_app(database_uri, debug=False):
app = Flask(__name__)
app.debug = debug
# set up your database
app.engine = create_engine(database_uri)
# add your modules
app.register_module(frontend)
# other setup tasks
return app
if __name__ == "__main__":
app = create_app(config.DATABASE_URI, debug=True)
app.run()
"""
import unittest
from myapp import config
from myapp import create_app
class TestCase(unittest.TestCase):
def setUp(self):
self.app = create_app(config.TEST_DATABASE_URI)
self.client = self.app.test_client()
"""
```
#### File: appstructure/json_oriented/__init__.py
```python
from flask import Flask, jsonify
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
__all__ = ['make_json_app']
def make_json_app(import_name, **kwargs):
"""
Creates a JSON-oriented Flask app.
All error responses that you don't specifically
manage yourself will have application/json content
type, and will contain JSON like this (just an example):
{ "message": "405: Method Not Allowed" }
"""
def make_json_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
app = Flask(import_name, **kwargs)
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
return app
```
#### File: flask-snippets/decorators/http_headers.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from functools import wraps
from flask import make_response
from app import app
def add_response_headers(headers={}):
"""This decorator adds the headers passed in to the response"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return decorated_function
return decorator
def noindex(f):
"""This decorator passes X-Robots-Tag: noindex"""
return add_response_headers({'X-Robots-Tag': 'noindex'})(f)
@app.route('/')
@noindex
def no_indexed():
"""
This page will be served with X-Robots-Tag: noindex
in the response headers
"""
return "Check my headers!"
if __name__ == "__main__":
app.run()
# check the headers with: curl -I http://0.0.0.0:5000/
```
#### File: flask-snippets/deployment/pass_remote_user.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import request, Response
from app import app
"""
RequestHeader set X-Proxy-REMOTE-USER %{REMOTE_USER}
"""
class RemoteUserMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
user = environ.pop('HTTP_X_PROXY_REMOTE_USER', None)
environ['REMOTE_USER'] = user
return self.app(environ, start_response)
@app.route('/')
def index():
return 'index'
if __name__ == "__main__":
app.wsgi_app = RemoteUserMiddleware(app.wsgi_app)
app.run()
```
#### File: flask-snippets/forms/form_from_model.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import render_template, url_for, redirect
from flaskext.wtf import Form
from wtforms.ext.appengine.db import model_form
from wtforms import validators
from app import app
from models import MyModel
MyForm = model_form(MyModel, Form, field_args = {
'name' : {
'validators' : [validators.Length(max=10)]
}
})
@app.route("/edit<id>")
def edit(id):
MyForm = model_form(MyModel, Form)
model = MyModel.get(id)
form = MyForm(request.form, model)
if form.validate_on_submit():
form.populate_obj(model)
model.put()
flash("MyModel updated")
return redirect(url_for("index"))
return render_template("edit.html", form=form)
if __name__ == "__main__":
app.run()
```
#### File: flask-snippets/internationalizatioin/babel_lazyproxy.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from wtforms import Form, fields
from myapp.utils import ugettext_lazy as _
from flask import g
from babel.support import LazyProxy
from app import app
class MyForm(Form):
name = fields.TextField(_("Name"))
def ugettext(s):
# we assume a before_request function
# assigns the correct user-specific
# translations
return g.translations.ugettext(s)
ugettext_lazy = LazyProxy(ugettext)
@app.route('/')
def index():
return 'index'
if __name__ == "__main__":
app.run()
```
#### File: flask-snippets/security/salted_password.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from werkzeug.security import generate_password_hash, check_password_hash
from app import app
class User(object):
def __init__(self, username, password):
self.username = username
self.set_password(password)
def set_password(self, password):
self.pw_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
@app.route('/')
def index():
me = User('<NAME>', 'default')
print me.pw_hash
print me.check_password('<PASSWORD>')
print me.check_password('<PASSWORD>')
return 'index'
if __name__ == "__main__":
app.run()
```
#### File: flask-snippets/sessions/file_session.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import cPickle as pickle
import base64
import hmac
import hashlib
import random
import string
import datetime
from uuid import uuid4
from collections import OrderedDict
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
from app import app
def _generate_sid():
return str(uuid4())
def _calc_hmac(body, secret):
return base64.b64encode(hmac.new(secret, body, hashlib.sha1).digest())
class ManagedSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None, new=False, randval=None, hmac_digest=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.new = new
self.modified = False
self.randval = randval
self.hmac_digest = hmac_digest
def sign(self, secret):
if not self.hmac_digest:
self.randval = ''.join(random.sample(string.lowercase+string.digits, 20))
self.hmac_digest = _calc_hmac('%s:%s' % (self.sid, self.randval), secret)
class SessionManager(object):
def new_session(self):
'Create a new session'
raise NotImplementedError
def exists(self, sid):
'Does the given session-id exist?'
raise NotImplementedError
def remove(self, sid):
'Remove the session'
raise NotImplementedError
def get(self, sid, digest):
'Retrieve a managed session by session-id, checking the HMAC digest'
raise NotImplementedError
def put(self, session):
'Store a managed session'
raise NotImplementedError
class CachingSessionManager(SessionManager):
def __init__(self, parent, num_to_store):
self.parent = parent
self.num_to_store = num_to_store
self._cache = OrderedDict()
def _normalize(self):
print "Session cache size: %s" % len(self._cache)
if len(self._cache) > self.num_to_store:
while len(self._cache) > (self.num_to_store * 0.8): # flush 20% of the cache
self._cache.popitem(False)
def new_session(self):
session = self.parent.new_session()
self._cache[session.sid] = session
self._normalize()
return session
def remove(self, sid):
self.parent.remove(sid)
if sid in self._cache:
del self._cache[sid]
def exists(self, sid):
if sid in self._cache:
return True
return self.parent.exists(sid)
def get(self, sid, digest):
session = None
if sid in self._cache:
session = self._cache[sid]
if session.hmac_digest != digest:
session = None
# reset order in OrderedDict
del self._cache[sid]
if not session:
session = self.parent.get(sid, digest)
self._cache[sid] = session
self._normalize()
return session
def put(self, session):
self.parent.put(session)
if session.sid in self._cache:
del self._cache[session.sid]
self._cache[session.sid] = session
self._normalize()
class FileBackedSessionManager(SessionManager):
def __init__(self, path, secret):
self.path = path
self.secret = secret
if not os.path.exists(self.path):
os.makedirs(self.path)
def exists(self, sid):
fname = os.path.join(self.path, sid)
return os.path.exists(fname)
def remove(self, sid):
print 'Removing session: %s' % sid
fname = os.path.join(self.path, sid)
if os.path.exists(fname):
os.unlink(fname)
def new_session(self):
sid = _generate_sid()
fname = os.path.join(self.path, sid)
while os.path.exists(fname):
sid = _generate_sid()
fname = os.path.join(self.path, sid)
# touch the file
with open(fname, 'w'):
pass
print "Created new session: %s" % sid
return ManagedSession(sid=sid)
def get(self, sid, digest):
'Retrieve a managed session by session-id, checking the HMAC digest'
print "Looking for session: %s, %s" % (sid, digest)
fname = os.path.join(self.path, sid)
data = None
hmac_digest = None
randval = None
if os.path.exists(fname):
try:
with open(fname) as f:
randval, hmac_digest, data = pickle.load(f)
except:
print "Error loading session file"
if not data:
print "Missing data?"
return self.new_session()
# This assumes the file is correct, if you really want to
# make sure the session is good from the server side, you
# can re-calculate the hmac
if hmac_digest != digest:
print "Invalid HMAC for session"
return self.new_session()
return ManagedSession(data, sid=sid, randval=randval, hmac_digest=hmac_digest)
def put(self, session):
'Store a managed session'
print "Storing session: %s" % session.sid
if not session.hmac_digest:
session.sign(self.secret)
fname = os.path.join(self.path, session.sid)
with open(fname, 'w') as f:
pickle.dump((session.randval, session.hmac_digest, dict(session)), f)
class ManagedSessionInterface(SessionInterface):
def __init__(self, manager, skip_paths, cookie_timedelta):
self.manager = manager
self.skip_paths = skip_paths
self.cookie_timedelta = cookie_timedelta
def get_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
return datetime.datetime.now() + self.cookie_timedelta
def open_session(self, app, request):
cookie_val = request.cookies.get(app.session_cookie_name)
if not cookie_val or not '!' in cookie_val:
# Don't bother creating a cookie for static resources
for sp in self.skip_paths:
if request.path.startswith(sp):
return None
print 'Missing cookie'
return self.manager.new_session()
sid, digest = cookie_val.split('!', 1)
if self.manager.exists(sid):
return self.manager.get(sid, digest)
return self.manager.new_session()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
self.manager.remove(session.sid)
if session.modified:
response.delete_cookie(app.session_cookie_name, domain=domain)
return
if not session.modified:
# no need to save an unaltered session
# TODO: put logic here to test if the cookie is older than N days, if so, update the expiration date
return
self.manager.put(session)
session.modified = False
cookie_exp = self.get_expiration_time(app, session)
response.set_cookie(app.session_cookie_name,
'%s!%s' % (session.sid, session.hmac_digest),
expires=cookie_exp, httponly=True, domain=domain)
app.session_interface = ManagedSessionInterface(CachingSessionManager(FileBackedSessionManager(app.config['SESSION_PATH'], app.config['SECRET_KEY']), 1000), skip_paths, datetime.timedelta(days=1))
```
#### File: flask-snippets/sessions/old_new_session.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
try:
from flask.sessions import SessionMixin, SessionInterface
except ImportError:
class SessionInterface(object):
pass
class SessionMixin(object):
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
# you can use a werkzeug.datastructure.CallbackDict
# to automatically update modified if you want, but
# it's not a requirement.
new = False
modified = True
class MySession(dict, SessionMixin):
pass
class MySessionInterface(object):
def open_session(self, app, request):
# load the session and return it.
return MySession()
def save_session(self, app, session, response):
# save the session
...
def init_my_extension(app):
if not hasattr(app, 'session_interface'):
app.open_session = lambda r: \
app.session_interface.open_session(app, r)
app.save_session = lambda s, r: \
app.session_interface.save_session(app, s, r)
app.session_interface = MySessionInterface()
```
#### File: flask-snippets/templatetricks/use_genshi.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import Flask
from flaskext.genshi import Genshi, render_response
app = Flask(__name__)
genshi = Genshi(app)
@app.route('/')
def index():
render_response('index.html')
```
#### File: flask-snippets/urls/list_routes.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import request, Response, url_for
from app import app
@app.route('/')
def index():
return 'index'
@app.route('/test/<en>/', methods=['GET', 'POST'])
def test(en):
return en
def list_routes():
import urllib
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
# url = rule.rule
line = urllib.unquote("{:50s} {:20s} {}".format(
rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print line
if __name__ == "__main__":
ctx = app.test_request_context()
ctx.push()
list_routes()
```
#### File: flask-snippets/urls/permalink.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import url_for
from werkzeug.routing import BuildError
from app import app
def permalink(function):
def inner(*args, **kwargs):
endpoint, values = function(*args, **kwargs)
try:
return url_for(endpoint, **values)
except BuildError:
return
return inner
@permalink
def absolute_url():
return 'profiles', {'username': 'fsp'}
@app.route('/profiles/<username>/')
def profiles(username):
return username
if __name__ == "__main__":
ctx = app.test_request_context()
ctx.push()
print absolute_url()
```
#### File: flask-snippets/utilities/nice_errors.py
```python
from flask import Markup, render_template
from werkzeug.exceptions import default_exceptions
def show_errormessage(error):
desc = error.get_description(flask.request.environ)
return render_template('error.html',
code=error.code,
name=error.name,
description=Markup(desc)
), error.code
for _exc in default_exceptions:
app.error_handlers[_exc] = show_errormessage
del _exc
"""
{% extends "base.html" %}
{% block title %}Error {{ code }}: {{ name }}{% endblock %}
{% block body %}
{{ description}}
{% endblock %}
"""
```
#### File: flask-snippets/utilities/serve_transcompiling.py
```python
from subprocess import Popen, PIPE
import shlex
class NonZeroExitError(Exception):
def __init__(self, command, returncode, output, error):
self.command = command
self.returncode = returncode
self.output = output
self.error = error
def __str__(self):
return '''\
%s returned non-zero exit status %d with output
%s
and error
%s''' % (self.command, self.returncode,
self.output or "[NO OUTOUT]",
self.error or "[NO ERROR]")
def command_line_renderer_factory(command):
'''command should be a command reads input from stdin
and prints to stdout'''
args = shlex.split(command)
def renderer(script):
'''Accepts a file object or path and return the
rendered string'''
if isinstance(script, file):
pass
elif isinstance(script, str):
script = open(script)
else:
raise TypeError('script must be a file object of '
'or a string to the file')
process = Popen(args, stdin=script,
stdout=PIPE, stderr=PIPE)
returncode = process.wait()
stdoutdata, stderrdata = process.communicate()
if returncode != 0:
raise NonZeroExitError(command, returncode,
stdoutdata, stderrdata)
return stdoutdata
return renderer
"""
#!/usr/bin/env node
var less = require('less');
process.stdin.resume();
process.stdin.setEncoding('utf8');
process.stdin.on('data', function (less_css) {
less.render(less_css, function(e, css){
console.log(css)
});
});
"""
```
#### File: flask-snippets/utilities/trigger_debuggr.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import render_template_string
from app import app
@app.route('/')
def index():
do_something_wrong()
raise
return 'Ohnoes'
"""
assert app.debug == False
"""
``` |
{
"source": "jinankjain/PNS",
"score": 3
} |
#### File: PNS/code/block_pattern_probabilities.py
```python
from __future__ import print_function
import functools
import itertools
import math
import scipy.integrate
import sys
TOP_REPLICATION_FACTOR = 0.1
AVG_WEBPAGES_PER_DOMAIN = 150
USE_APPROX = True
def log_replication_function(k, N, m):
#return math.log(N - k + 1) + 1
max_replication = max(1.0, m * TOP_REPLICATION_FACTOR)
return max_replication / (k ** math.log(max_replication, N))
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
#print("Unable to find cached element", args)
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer
@memoize
def harmonic(n, s=1):
"""
Compute the n-th harmonic number. If s != 1, the generalized harmonic
number is computed:
H_{n, s} = Sum_{i=1}^{n} 1/i^s
"""
res = 0.0
for i in xrange(1, n+1):
res += 1 / (float(i) ** s)
return res
@memoize
def harmonic_approx(n, s=1):
"""
Compute the n-th harmonic number. If s != 1, the generalized harmonic
number is computed:
H_{n, s} = Sum_{i=1}^{n} 1/i^s
"""
#if s == 1:
# return scipy.integrate.quad(lambda x: (1 - x**n) / (float(1) - x), 0, 1)
partial = harmonic(min(1000, n), s)
res = 0
if n > 1000:
res, _ = scipy.integrate.quad(lambda x: 1 / (float(x) ** s), 1001, n+1)
return partial + res
@memoize
def _sum_repl_func(repl_func, N, m):
res_main = 0.0
for i in xrange(1, N+1):
res_main += repl_func(i, N, m)
return res_main
@memoize
def _sum_repl_func_zipf(repl_func, N, m, s=1):
res_linked = 0.0
for i in xrange(1, N+1):
res_linked += repl_func(i, N, m) / (float(i) ** s)
return res_linked
def average_replication(q, N, m, replication=(lambda k, N, m: 1), s=1):
"""
Computer the average replication of a domain, i.e., how many fingerprints
can be associated with that domain
"""
if USE_APPROX:
max_replication = max(1.0, m * TOP_REPLICATION_FACTOR)
main_domain_replication = (max_replication *
harmonic_approx(N, math.log(max_replication, N)) / N)
linked_domains_replication = (max_replication *
harmonic_approx(N, s + math.log(max_replication, N)) /
harmonic_approx(N, s)) ** (q-1)
else:
res_main = _sum_repl_func(replication, N, m)
res_linked = _sum_repl_func_zipf(replication, N, m, s)
main_domain_replication = res_main / N
linked_domains_replication = (res_linked / harmonic(N, s)) ** (q-1)
assert main_domain_replication >= 1
assert linked_domains_replication >= 1
return main_domain_replication * linked_domains_replication
def prob_zipf_distrib(q, t, m, alpha):
"""
Probability that a block pattern of q + t blocks contains another block
pattern of q blocks, assuming that all blocks are i.i.d. according to a
zipf distribution with decay parameter alpha. Parameter m represents the
total number of blocks.
"""
#prob_same_block = (harmonic_number(m, 2*alpha)
# / (harmonic_number(m, alpha) ** 2))
# The following would be the distribution if the distribution were uniform
# rather than the zipf distribution.
#prob_same_block = float(1)/m
prob_same_block = float(1)/m
prob_single_block_no_match = (1 - prob_same_block) ** (q + t)
prob_block_pattern_match = (1 - prob_single_block_no_match) ** q
return prob_block_pattern_match
def average_num_domain_pattern_matches(q, t, m, N, alpha=0.9):
"""
Compute the average number of domains (out of N) whose block pattern of
length q matches a reference pattern of q blocks (of a reference domain)
plus t cover blocks.
"""
# Compute the average of the binomial distribution, where prob_zipf_distrib
# gives the probability p of a match, and N is the number of "samples". The
# binomial distribution has an average of p * N.
repl_func = log_replication_function
return (prob_zipf_distrib(q, t, m, alpha) * N * AVG_WEBPAGES_PER_DOMAIN *
average_replication(q, N, m, repl_func, alpha))
if __name__ == "__main__":
N = 1000000000 # total number of domains
q_range = [1, 2, 5, 10] # q is the number of domains queried together (1 website)
#t_range = [0, 5, 10] # t is the number of cover blocks
n_range = [50000, 10000, 1000] # n is the number of domains per block
for q,n in itertools.product(q_range, n_range):
m = N/n # Total number of blocks
for t in [0, q]: # t is the number of cover blocks
res = average_num_domain_pattern_matches(q, t, m, N)
print("q={0}, t={1}, m={2}, n={3}: \t".format(q, t, m, n), res)
```
#### File: code/ns_scraper/alexa_fetcher.py
```python
import zipfile
import io
def _alexa_etl():
with zipfile.ZipFile('top-1m.csv.zip') as zfile:
buf = io.BytesIO(zfile.read('top-1m.csv'))
for line in buf:
line_str = line.decode('utf-8')
(rank, domain) = line_str.split(',')
yield domain.strip()
def get_top_domains(num=100):
domain_generator = _alexa_etl()
return [next(domain_generator) for _ in range(num)]
if __name__ == "__main__":
print(get_top_domains(10))
```
#### File: code/ns_scraper/get_alexa_30k.py
```python
import zipfile
import io
import random
# Random seed for generating the results in a repetetive way
random.seed(1119)
# Random sampling to get 30K domains from 1M domain set
domain_ids = random.sample(range(1, 1000001), 30000)
assert(len(domain_ids) == 30000)
domain_ids.sort()
ALEXA_DATA_URL = "alexa-top-1m-2017-05-01.csv.zip"
def get_30K_domains():
"""
Random sampling of 30K domains from
set of 1M domain set.
"""
zfile = zipfile.ZipFile(ALEXA_DATA_URL)
buf = io.BytesIO(zfile.read('top-1m.csv'))
result = []
i = 0
for line in buf:
line = line.decode('utf-8')
(rank, domain) = line.split(',')
if i<30000 and int(rank) == domain_ids[i]:
result.append((int(rank), domain.strip()))
i += 1
return result
if __name__ == '__main__':
print(get_30K_domains())
```
#### File: code/ns_scraper/new_scraper.py
```python
import dns.query
import dns.resolver
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdatatype
import dns.resolver
from dns.exception import DNSException
from get_alexa_30k import get_30K_domains
import argparse
import logging
import time
import datetime
import os
import yaml
_error_logger = logging.getLogger('ErrorLogger')
_console_logger = logging.getLogger('ConsoleLogger')
_RESULTS_DIR = "results/"
_ERROR_DIR = "errors/"
def _config_error_logger(_ERROR_LOG):
file_handler = logging.FileHandler(_ERROR_LOG, mode="w")
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
file_handler.setFormatter(formatter)
_error_logger.setLevel(logging.ERROR)
_error_logger.addHandler(file_handler)
def _config_console_logger(log_level):
stream_handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s")
stream_handler.setFormatter(formatter)
_console_logger.setLevel(log_level)
_console_logger.addHandler(stream_handler)
def _log_error(error: str, domain: str):
_error_logger.error("An error occurred when looking up the authoritative NS"
" for %s:\n%s" % (domain, error))
def query_authoritative_ns (domain, outfile, log=lambda msg: None):
default = dns.resolver.get_default_resolver()
ns = default.nameservers[0]
n = domain.split('.')
ns_all = []
ns_name = []
ttl = []
for i in range(0, len(n)):
sub = '.'.join(n[i-1:])
# log('Looking up %s on %s' % (sub, ns))
query = dns.message.make_query(sub, dns.rdatatype.NS)
try:
response = dns.query.udp(query, ns, timeout=10)
rcode = response.rcode()
if rcode != dns.rcode.NOERROR:
_log_error("Received rcode %d" % response.rcode(),
domain)
return [],[]
if len(response.authority) > 0:
rrsets = response.authority
elif len(response.additional) > 0:
rrsets = [response.additional]
else:
rrsets = response.answer
# Handle all RRsets, not just the first one
i = 0
j = 0
for rrset in rrsets:
j = 0
for rr in rrset:
if rr.rdtype == dns.rdatatype.SOA:
pass
# log('Same server is authoritative for %s' % (sub))
elif rr.rdtype == dns.rdatatype.A:
ns = rr.items[0].address
# log('Glue record for %s: %s' % (rr.name, ns))
elif rr.rdtype == dns.rdatatype.NS:
authority = rr.target
ns = default.query(authority).rrset[0].to_text()
# log('%s [%s] is authoritative for %s; ttl %i' %
# (authority, ns, sub, rrset.ttl))
if j == 0 and ns != "":
ns_all = []
ns_name = []
ttl = []
ns_all.append(ns)
ns_name.append(authority)
ttl.append(rrset.ttl)
result = rrset
else:
# IPv6 glue records etc
#log('Ignoring %s' % (rr))
pass
j+=1
i+=1
except dns.exception.DNSException as error:
_log_error(str(error), domain)
return [], []
except Exception as error:
_console_logger.error("Captured an exception while handling %s:\n%s" %
(domain, str(error)))
return [], []
new = zip(ns_name,ns_all)
new = list(sorted(new))
return new, ttl
import sys
def log (msg):
sys.stderr.write(msg + u'\n')
def scrape(ndomains, outfile):
report_threshold = max(int(ndomains / 100), 1)
res = get_30K_domains()
for i in range(len(res)):
new1, ttl1 = query_authoritative_ns(res[i][1], outfile, log)
new2, ttl2 = query_authoritative_ns(res[i][1], outfile, log)
new3, ttl3 = query_authoritative_ns(res[i][1], outfile, log)
if(len(new1)>0 and len(new2)>0 and len(new3)>0):
new = new1
ttl = ttl1
temp = len(new2)
while(i<temp):
found_match = False
for j in range(0, len(new)):
if(new[j][0] == new2[i][0]):
found_match = True
if not found_match:
new.append(new2[i])
ttl.append(ttl2[i])
i = i+1
temp = len(new3)
while(i<temp):
found_match = False
for j in range(0, len(new)):
if(new[j][0] == new3[i][0]):
found_match = True
if not found_match:
new.append(new3[i])
ttl.append(ttl3[i])
i = i+1
print(res[i][1], res[i][0], file=outfile)
for i in range(0, len(new)):
print(new[i][0], new[i][1], ttl[i], file=outfile)
print(file=outfile)
else:
print("Failed %s" % res[i][1])
idx = i
if (idx + 1) % report_threshold == 0:
progress_percentage = int((idx + 1) / ndomains * 100)
_console_logger.info("Progress: %d of %d (%d%%)" %
(idx + 1, ndomains, progress_percentage))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scrape-interval",
dest="scrape_interval",
help="Interval to scrape domains.",
type=int,
required=True)
parser.add_argument("-n", dest="n", help="Top N domains", type=int,
default=30000)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
_config_console_logger(log_level)
scrape_interval_counter = 1
while True:
_console_logger.info("Starting scrape interval %d" %
scrape_interval_counter)
start_unix_secs = time.time()
start = datetime.datetime.now()
fname = "ns_scrape-" + start.strftime("%Y-%m-%dT%H:%M:%S") + ".yml"
epath = os.path.join(_ERROR_DIR, fname)
_config_error_logger(epath)
path = os.path.join(_RESULTS_DIR, fname)
_console_logger.info("Writing results to: %s" % path)
# Open new file
with open(path, "w") as outfile:
scrape(args.n, outfile)
end = datetime.datetime.now()
_console_logger.info("Finished scrape interval %d in %s." %
(scrape_interval_counter, end - start))
scrape_interval_counter += 1
end_unix_secs = time.time()
duration = end_unix_secs - start_unix_secs
if duration > args.scrape_interval:
_console_logger.warning("Scraping took longer than the scrape "
"interval (%d s vs %d s)" %
(duration, args.scrape_interval))
else:
sleep_duration = args.scrape_interval - duration
_console_logger.info("Sleeping for %d s" % sleep_duration)
time.sleep(sleep_duration)
```
#### File: code/old_code/test_harmonic.py
```python
from __future__ import print_function
from fractions import Fraction as mpq
import functools
import math
import scipy.integrate
from sympy.functions.combinatorial.numbers import harmonic
import timeit
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
#print("Unable to find cached element", args)
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer
def harmonic_full(n, s=1):
res = 0.0
for i in xrange(1, n+1):
res += 1 / (float(i) ** s)
return res
def _harmonic4(a, b):
if b-a == 1:
return float(1) / a
m = (a+b)//2
return _harmonic4(a,m) + _harmonic4(m,b)
def harmonic4(n):
return _harmonic4(1,n+1)
def approx_harm(n, s=1):
partial = harmonic_full(min(1000, n), s)
res = 0
if n > 1000:
res, _ = scipy.integrate.quad(lambda x: 1 / (float(x) ** s), 1001, n+1)
return partial + res
setup_approx="""
import scipy.integrate
def approx_harm(n, s=1):
res, _ = scipy.integrate.quad(lambda x: 1 / (float(x) ** s), 1, n+1)
return res
"""
setup_split="""
def _harmonic4(a, b):
if b-a == 1:
return float(1) / a
m = (a+b)//2
return _harmonic4(a,m) + _harmonic4(m,b)
def harmonic4(n):
return _harmonic4(1,n+1)
"""
setup_full="""
def harmonic_full(n, s=1):
res = 0.0
for i in xrange(1, n+1):
res += 1 / (float(i) ** s)
return res
"""
setup_int="""
import scipy.integrate
def harmonic_int(n, s=1):
return scipy.integrate.quad(lambda x: (1 - x**n) / (float(1) - x), 0, 1)
"""
setup_lib="""
from sympy.functions.combinatorial.numbers import harmonic
"""
if __name__ == "__main__":
t_full = timeit.timeit('harmonic_full(10000000, 0.9)', setup=setup_full, number=1)
print("full: {}".format(t_full))
t_approx = timeit.timeit('approx_harm(10000000, 0.9)', setup=setup_approx, number=1)
print("approx: {}".format(t_approx))
#t_int = timeit.timeit('harmonic_int(1000000, 0.9)', setup=setup_int, number=10)
#print("int: {}".format(t_int))
#t_split = timeit.timeit('harmonic4(1000000)', setup=setup_split, number=10)
#print("split: {}".format(t_split))
max_replication = 1000
print(harmonic_full(1000000, 0.9 + math.log(max_replication, 1000000)))
print(approx_harm(1000000, 0.9 + math.log(max_replication, 1000000)))
```
#### File: code/top_1_m/get_majestic_1m.py
```python
import zipfile
import cStringIO
from urllib import urlopen
MAJ_DATA_URL = 'http://downloads.majestic.com/majestic_million.csv'
def majestic_etl():
"""
Generator that:
Extracts by downloading the csv.zip, unzipping.
Transforms the data into python via CSV lib
Loads it to the end user as a python list
"""
f = urlopen(MAJ_DATA_URL)
buf = cStringIO.StringIO(f.read())
next(buf) # Ignore the header
for line in buf:
splitted = line.split(',')
rank = splitted[0]
domain = splitted[2]
yield (int(rank), domain.strip())
def top_list(num=100):
a = majestic_etl()
return [a.next() for x in xrange(num)]
if __name__ == "__main__":
for _, domain in majestic_etl():
print domain
```
#### File: src/clients/client.py
```python
import requests
from src.utils import *
import timeit
API_URL = "http://localhost:5000/"
CACHE_DIR = ".pns"
USER_DIR = os.path.expanduser('~')
DIFF_SUFFIX = ".diff"
def save_page(page_data, page_id):
cache_path = os.path.join(USER_DIR, CACHE_DIR)
page_path = os.path.join(cache_path, page_id)
if os.path.exists(cache_path):
f = open(page_path, 'w+')
f.write(page_data)
else:
os.mkdir(cache_path)
f = open(page_path, 'w+')
f.write(page_data)
return page_path
def save_diff(diff_content, page_id, version):
# TODO: Check if the page is present on which we are trying to apply patch
file_name = "{}_{}{}".format(page_id, version, DIFF_SUFFIX)
diff_path = os.path.join(USER_DIR, CACHE_DIR, file_name)
f = open(diff_path, 'w+')
f.write(diff_content)
return diff_path
def get_page_without_version(page_id, thread_id=""):
path = os.path.join(USER_DIR, CACHE_DIR)
page_path = os.path.join(path, page_id)
if os.path.exists(page_path):
version = get_page_current_version(path, page_id)
get_page_diff_with_version(page_id, version)
else:
url = "{}get_page".format(API_URL)
req = "page_id: {} ".format(page_id)
start_time_net = timeit.default_timer()
r = requests.get(url, params={'page_id': page_id})
save_page(r.text, page_id+thread_id)
ans = timeit.default_timer() - start_time_net
req += "net_lat: {} ".format(ans)
start_time_ver = timeit.default_timer()
verify_signature(page_id, thread_id)
ans = timeit.default_timer() - start_time_ver
req += "ver_time: {}".format(ans)
print(req)
def get_page_diff_with_version(page_id, version):
url = "{}get_page".format(API_URL)
r = requests.get(url, params={'page_id': page_id, 'version': version})
if r.text == '"Updated"':
return True
else:
diff_path = save_diff(r.text, page_id, version)
old_version_path = os.path.join(USER_DIR, CACHE_DIR, page_id)
diff_content, signature = extract_signature(diff_path)
result = diff_verify_signature(diff_content, signature)
if result == "Failed":
print("Unsuccessful")
os.remove(diff_path)
apply_patch(diff_path, old_version_path)
def apply_patch(diff_path, old_version_path):
command = "patch {} {}".format(old_version_path, diff_path)
os.system(command)
def extract_signature(diff_path):
f = open(diff_path, 'r')
start = f.readline()
start += f.readline()
diff_content = []
for c in f:
diff_content.append(c)
signature = diff_content[-1]
diff_content = diff_content[0:-1]
diff_content = ''.join(diff_content)
# print(diff_content)
start += diff_content
f.close()
# print(signature)
f = open(diff_path, 'w+')
f.write(start)
return diff_content, signature
def verify_signature(page_id, thread_id=""):
url = "{}get_signature".format(API_URL)
r = requests.get(url, params={'page_id': page_id})
signature = r.text
# print(signature)
page_path = os.path.join(USER_DIR, CACHE_DIR)
result = verify_signature_page(signature, page_path, page_id+thread_id)
if result == "Failed":
# Don't save the file
print("Unsuccessful")
os.remove(os.path.join(page_path, page_id))
def diff_verify_signature(diff_content, signature):
result = verify_signature_diff(signature, diff_content)
return result
# get_page_without_version("1")
```
#### File: PNS/src/combine_diff.py
```python
import tempfile
import os
PAGE_STORAGE = "pages"
DIFF_SUFFIX = ".diff"
TMP_DIR = tempfile.gettempdir()
class Diff:
def init(self):
pass
def combine_diffs(self, start_version, end_version, page_id, page_path):
curr_file_name = os.path.join(page_path, page_id)
# print("Current File is: ", curr_file_name)
copy_file_url = os.path.join(TMP_DIR, page_id + "_diff")
# print("Copy File URL is: ", copy_file_url)
command = "cp " + curr_file_name + " " + copy_file_url
os.system(command)
end_version_copy = end_version
while end_version > start_version:
diff_file_path = os.path.join(page_path, page_id + "_" + str(end_version) + DIFF_SUFFIX)
# print("Diff file path is: ", diff_file_path)
# diff syntax diff -u new old > .diff
# create copy of original version
command = "patch " + copy_file_url + " " + diff_file_path
os.system(command)
end_version -= 1
new_diff_file = os.path.join(TMP_DIR, "comb_diff_" + str(page_id) + "_" + str(start_version) + "_" + str(
end_version_copy) + DIFF_SUFFIX)
# print("New diff file is: ", new_diff_file)
command = "diff -u " + copy_file_url + " " + curr_file_name + " > " + new_diff_file
os.system(command)
self.append_signature_diff(new_diff_file)
return TMP_DIR
def generate_diffs(self, new_version, page_id, page_path):
curr_file_name = os.path.join(page_path, page_id)
update_file_name = os.path.join(page_path, page_id + "_tmp")
new_diff_file = os.path.join(page_path, page_id + "_" + str(new_version) + DIFF_SUFFIX)
# Generate diffs
command = "diff -u " + update_file_name + " " + curr_file_name + " > " + new_diff_file
os.system(command)
# Replace old copy with new copy
command = "cp " + update_file_name + " " + curr_file_name
os.system(command)
# Remove _diff file
command = "rm " + update_file_name
os.system(command)
# append_signature_diff(new_diff_file)
def append_signature_diff(self, diff_path ):
f = open(diff_path , 'r+')
# Skip first two lines
f.readline()
f.readline()
diff_content = ""
for c in f:
diff_content += c
signature = compute_signature_diff(diff_content)
f.write(signature.decode("utf-8"))
```
#### File: PNS/src/generate_page.py
```python
import get_name_server
import hashlib
import os
import time
INPUT_FILE_NAME = "top_1m.txt"
PAGE_INDEX = 1
PAGE_STORAGE = "pages"
def generate_page( ):
f = open(INPUT_FILE_NAME, 'r')
f_iter = iter(f)
for line in f_iter:
temp = line.split()
temp = temp[2][1:-1]
fqdn_hash = hashlib.sha256(str(temp).encode('utf-8')).hexdigest()
ns_ipv4 = get_name_server.find_nameserver(temp)
fs = open(os.path.join(PAGE_STORAGE, fqdn_hash[0:PAGE_INDEX]), 'a')
print(fqdn_hash + " " + ns_ipv4 + " " + time.strftime("%c"))
fs.write(fqdn_hash + " " + ns_ipv4 + " " + time.strftime("%c") + "\n")
fs.close()
f.close()
def main( ):
generate_page()
if __name__ == "__main__":
main()
```
#### File: PNS/src/get_name_server.py
```python
import dns.resolver
def check_if_root_domain(url):
ans = [False, []]
try:
# TODO: Find other way to construct the array (Need help dns.resolver documentation)
for rdata in dns.resolver.query(url, 'NS'):
ans[0] = True
ans[1].append(rdata)
except dns.resolver.NoAnswer:
ans = ans
except dns.resolver.NXDOMAIN:
ans = ans
except dns.resolver.NoNameservers:
ans = ans
except dns.resolver.NotAbsolute:
ans = ans
except dns.resolver.YXDOMAIN:
ans = ans
except dns.exception.Timeout:
ans = ans
except dns.exception.NoRootSOA:
ans = ans
except dns.exception.NotAbsolute:
ans = ans
return ans
def get_A_record_for_ns(url):
ans = []
try:
for rdata in dns.resolver.query(url, 'A'):
ans.append(str(rdata))
# print(str(rdata))
except dns.resolver.NoAnswer:
ans = ans
except dns.resolver.NXDOMAIN:
ans = ans
except dns.resolver.NoNameservers:
ans = ans
except dns.resolver.NotAbsolute:
ans = ans
except dns.resolver.YXDOMAIN:
ans = ans
except dns.exception.Timeout:
ans = ans
except dns.exception.NoRootSOA:
ans = ans
except dns.exception.NotAbsolute:
ans = ans
return ans
def find_nameserver(url):
temp = url.split('.')
A_records = set()
for i in range(0, len(temp)):
check = check_if_root_domain(".".join(temp[i:]))
if check[0]:
for ns in check[1]:
A_records |= set(get_A_record_for_ns(url))
return " ".join(list(A_records)[0:4])
```
#### File: src/maintainer/maintainer.py
```python
from flask import Flask, send_from_directory, request, json
from src.update_page import *
from src.maintainer.config import huey
import os
app = Flask(__name__)
PAGE_STORAGE = "pages"
TEST_PAGE_STORAGE = "../../test/pages"
ABS_PATH = os.path.dirname(os.path.realpath('__file__'))
DIFF_SUFFIX = ".diff"
@huey.task()
@app.route('/get_page', methods=['GET'])
def get_page_api():
page_id = request.args.get('page_id')
version = request.args.get('version')
# TODO: Perform range check on page_id
# TODO: Perform range check on version number
if version is None:
# Return the current version of the page
path = os.path.join(ABS_PATH, "..", PAGE_STORAGE)
return send_from_directory(path, page_id)
else:
# Return the diff from the version passed
page_path = os.path.join(ABS_PATH, "..", PAGE_STORAGE)
curr_version = get_page_current_version(page_path, page_id)
if curr_version == version:
response = app.response_class(
response=json.dumps(str("Updated")),
status=200,
mimetype='application/json'
)
return response
else:
diff = Diff()
page_path = os.path.join(ABS_PATH, "..", PAGE_STORAGE)
diff_url = diff.combine_diffs(int(version), int(curr_version), page_id, page_path)
return send_from_directory(diff_url, "comb_diff_" + page_id + "_" + version + "_" + str(curr_version)
+ DIFF_SUFFIX)
@app.route('/update_page', methods=['GET'])
def update_page_api():
# GET Parameters
page_id = request.args.get('page_id')
entry = request.args.get('SHA256')
a_record = request.args.get('ARecord')
# Sanitize parameters according to function params
a_record = a_record.split(",")
a_record = ' '.join(a_record)
# Find page path which needs to passed via function
page_path = os.path.join(ABS_PATH, "..", PAGE_STORAGE)
update_queue.put((page_id, entry, a_record, page_path))
response = app.response_class(
response=json.dumps(str("Will get Updated")),
status=200,
mimetype='application/json'
)
return response
@app.route('/get_signature', methods=['GET'])
def compute_signature_api():
# GET Parameters
page_id = request.args.get('page_id')
page_path = os.path.join(ABS_PATH, "..", PAGE_STORAGE)
return send_from_directory(page_path, page_id+".sig")
```
#### File: PNS/test/util_test.py
```python
from src.utils import *
import unittest
PAGE_STORAGE = "pages"
class UtilsTest(unittest.TestCase):
def test_current_version_1( self ):
version = get_page_current_version("0")
self.assertEqual(version, "1", "Current Version for page 0 is 1")
def test_current_version_2( self ):
version = get_page_current_version("a")
self.assertEqual(version, "1", "Current Version for page 0 is 1")
def test_current_version_invalid_page_1( self ):
version = get_page_current_version("10")
self.assertEqual(version, "-1", "Page does exists!")
def test_current_version_invalid_page_2( self ):
version = get_page_current_version("ab")
self.assertEqual(version, "-1", "Page does exists!")
def test_update_version( self ):
result = update_version("0")
version = get_page_current_version("0")
self.assertEqual(result, "Success", "Successfully updated Version")
self.assertEqual(version, "2", "Updated version is 2")
def test_update_version_invalid_page( self ):
result = update_version("11")
self.assertEqual(result, "Error", "Page does not exist")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jinanloubani/aTEAM",
"score": 2
} |
#### File: nn/modules/FD.py
```python
import numpy as np
from numpy import *
from numpy.linalg import *
from functools import reduce
from scipy.special import factorial
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import MK
from ..functional import periodicpad
__all__ = ['FDMK','FD1d','FD2d','FD3d','FDProj']
def _inv_equal_order_m(d,m):
A = []
assert d >= 1 and m >= 0
if d == 1:
A = [[m,],]
return A
if m == 0:
for i in range(d):
A.append(0)
return [A,]
for k in range(m+1):
B = _inv_equal_order_m(d-1,m-k)
for b in B:
b.append(k)
A = A+B
return A
def _less_order_m(d,m):
A = []
for k in range(m+1):
B = _inv_equal_order_m(d,k)
for b in B:
b.reverse()
B.sort()
B.reverse()
A.append(B)
return A
class FDMK(nn.Module):
"""
Moment matrix and kernel for finite difference.
Arguments:
dim (int): dimension
kernel_size (tuple of int): size of differential kernels
order (tuple of int): order of differential kernels
dx (double): the MomentBank.kernel will automatically compute kernels
according to MomentBank.moment and MomentBank.dx
constraint (string): 'moment' or 'free', See FDMK.x_proj
and FDMK.grad_proj.
"""
def __init__(self, dim, kernel_size, order, dx=1.0, constraint='moment'):
super(FDMK, self).__init__()
self._dim = dim
if isinstance(kernel_size, int):
kernel_size = [kernel_size,]*self.dim
assert min(kernel_size) > max(order)
self.m2k = MK.M2K(kernel_size)
self.k2m = MK.K2M(kernel_size)
self._kernel_size = tuple(kernel_size)
self._order = order
self.constraint = constraint
scale = torch.DoubleTensor(1)[0]
self.register_buffer('scale',scale)
if not iterable(dx):
dx = [dx,]*dim
self.dx = dx.copy()
self._order_bank = _less_order_m(dim, max(kernel_size))
moment = torch.DoubleTensor(*kernel_size).zero_()
moment[tuple(self._order)] = 1
self.moment = nn.Parameter(moment)
@property
def dim(self):
return self._dim
@property
def dx(self):
return self._dx.copy()
@dx.setter
def dx(self,v):
"""
v (ndarray): dx for each axis
"""
if not iterable(v):
v = [v,]*self.dim
self._dx = v
l = lambda a,b:a*b
s = reduce(l, (self.dx[j]**oj for j,oj in enumerate(self._order)), 1)
self.scale.fill_(1/s)
return v
@property
def kernel(self):
kernel = self.m2k(self.moment)
return kernel
@kernel.setter
def kernel(self, v):
if isinstance(v, (list,tuple)):
v = np.array(v)
if isinstance(v, np.ndarray):
v = torch.from_numpy(v)
if isinstance(v, torch.Tensor):
v = v.to(self.moment)
moment = self.k2m(v)
self.moment.data.copy_(moment)
return self.moment
def _proj_(self,M,s,c):
for j in range(s):
for o in self._order_bank[j]:
M[tuple(o)] = c
def x_proj(self,*args,**kw):
if self.constraint == 'free':
return None
if isinstance(self.constraint,int):
acc = self.constraint
else:
acc = 1
self._proj_(self.moment.data,sum(self._order)+acc,0)
self.moment.data[tuple(self._order)] = 1
return None
def grad_proj(self,*args,**kw):
if self.constraint == 'free':
return None
if isinstance(self.constraint,int):
acc = self.constraint
else:
acc = 1
self._proj_(self.moment.grad.data,sum(self._order)+acc,0)
return None
def forward(self):
raise NotImplementedError
class _FDNd(FDMK):
"""
Finite difference automatically handle boundary conditions
Arguments for class:`_FDNd`:
dim (int): dimension
kernel_size (tuple of int): finite difference kernel size
boundary (string): 'Dirichlet' or 'Periodic'
Arguments for class:`FDMK`:
order, dx, constraint
"""
def __init__(self, dim, kernel_size, order,
dx=1.0, constraint='moment', boundary='Dirichlet'):
super(_FDNd, self).__init__(dim, kernel_size, order, dx, constraint)
padwidth = []
for k in reversed(self._kernel_size):
padwidth.append((k-1)//2)
padwidth.append(k-1-(k-1)//2)
self._padwidth = padwidth
self.boundary = boundary.upper()
@property
def padwidth(self):
return self._padwidth.copy()
@property
def boundary(self):
return self._boundary
@boundary.setter
def boundary(self,v):
self._boundary = v.upper()
def pad(self, inputs):
if self.boundary == 'DIRICHLET':
return F.pad(inputs, self.padwidth)
else:
return periodicpad(inputs, self.padwidth)
def conv(self, inputs, weight):
raise NotImplementedError
def forward(self, inputs, kernel=None, scale=None):
"""
Arguments:
inputs (Tensor): torch.size:
(batch_size, spatial_size[0], spatial_size[1], ...)
kernel (Tensor): torch.size:
(kernel_size[0], kernel_size[1], ...)
scale (scalar): depends on self.dx
Returns:
approximation of self.order partial derivative of inputs
"""
scale = (self.scale if scale is None else scale)
kernel = (self.kernel if kernel is None else kernel)
kernel = kernel*scale
assert inputs.dim() == kernel.dim()+1
inputs = self.pad(inputs)
inputs = inputs[:,newaxis]
return self.conv(inputs, kernel[newaxis,newaxis])[:,0]
class FD1d(_FDNd):
def __init__(self, kernel_size, order,
dx=1.0, constraint='moment', boundary='Dirichlet'):
if isinstance(order, int):
order = (order,)
super(FD1d, self).__init__(1, kernel_size, order,
dx=dx, constraint=constraint, boundary=boundary)
self.conv = F.conv1d
class FD2d(_FDNd):
def __init__(self, kernel_size, order,
dx=1.0, constraint='moment', boundary='Dirichlet'):
super(FD2d, self).__init__(2, kernel_size, order,
dx=dx, constraint=constraint, boundary=boundary)
self.conv = F.conv2d
class FD3d(_FDNd):
def __init__(self, kernel_size, order,
dx=1.0, constraint='moment', boundary='Dirichlet'):
super(FD3d, self).__init__(3, kernel_size, order,
dx=dx, constraint=constraint, boundary=boundary)
self.conv = F.conv3d
class FDProj(nn.Module):
"""
project convolution kernel to finite difference coefficient
"""
def __init__(self, kernel_size, order, acc=1):
super(FDProj, self).__init__()
assert sum(order)<min(kernel_size)
self.dim = len(kernel_size)
self.n = 1
for i in kernel_size:
self.n *= i
self.order = order
self.m = sum(order)
m = self.m+acc-1
self._order_bank = _less_order_m(self.dim, m)
s = [1,]*self.dim
base = []
for i in range(self.dim):
b = torch.arange(kernel_size[i], dtype=torch.float64)-(kernel_size[i]-1)//2
s[i] = -1
b = b.view(*s)
s[i] = 1
base.append(b)
subspaces = []
for j in range(m+1):
for o in self._order_bank[j]:
b = torch.ones(*kernel_size, dtype=torch.float64)
for i in range(self.dim):
if o[i]>0:
b *= base[i]**o[i]
b = b.view(-1)
if tuple(o) == tuple(order):
subspaces.insert(0,b)
continue
subspaces.append(b)
subspaces.reverse()
l = len(subspaces)
# Schimidt orthogonalization
for i in range(l):
for j in range(i):
subspaces[i] -= torch.dot(subspaces[j], subspaces[i])*subspaces[j]
subspaces[i] = subspaces[i]/torch.sqrt(torch.dot(subspaces[i], subspaces[i]))
subspace = torch.stack(subspaces, dim=0)
self.register_buffer('subspace', subspace)
moment = torch.ones(*kernel_size, dtype=torch.float64)
for i in range(self.dim):
if order[i]>0:
moment *= base[i]**order[i]/factorial(order[i]).item()
moment = moment.view(-1)
self.register_buffer('_renorm', 1/torch.dot(moment,subspace[-1]))
def forward(self, kernel):
shape = kernel.shape
kernel = kernel.contiguous()
kernel = kernel.view(-1,self.n)
kernel = [email protected](0,1)@self.subspace
kernel = kernel+self._renorm*self.subspace[-1:]
kernel = kernel.view(shape)
return kernel
```
#### File: aTEAM/pdetools/init.py
```python
import numpy as np
from numpy import *
import torch
__all__ = ['initgen']
def _initgen_periodic(mesh_size, freq=3):
dim = len(mesh_size)
x = random.randn(*mesh_size)
coe = fft.ifftn(x)
# set frequency of generated initial value
freqs = [freq,]*dim
for i in range(dim):
perm = arange(dim, dtype=int32)
perm[i] = 0
perm[0] = i
coe = coe.transpose(*perm)
coe[freqs[i]+1:-freqs[i]] = 0
coe = coe.transpose(*perm)
x = fft.fftn(coe)
assert linalg.norm(x.imag) < 1e-8
x = x.real
x = x/np.abs(x).max()
return x
def _initgen(mesh_size, freq=3, boundary='Periodic', dtype=None, device=None):
if iterable(freq):
return freq
x = _initgen_periodic(mesh_size, freq=freq)
if boundary.upper() == 'DIRICHLET':
dim = x.ndim
for i in range(dim):
y = arange(mesh_size[i])/mesh_size[i]
y = y*(1-y)
s = ones(dim, dtype=int32)
s[i] = mesh_size[i]
y = reshape(y, s)
x = x*y
x = x[[slice(1,None),]*dim]
x = x*16
return torch.from_numpy(x).to(dtype=dtype, device=device)
def initgen(mesh_size, freq=3, boundary='Periodic', dtype=None, device=None, batch_size=1):
xs = []
for k in range(batch_size):
xs.append(_initgen(mesh_size, freq=freq, boundary=boundary, dtype=dtype, device=device))
x = torch.stack(xs, dim=0)
if batch_size == 1:
return x[0]
else:
return x
```
#### File: aTEAM/test/modules_test3d.py
```python
from numpy import *
import numpy as np
import torch
from torch.autograd import grad
import torch.nn as nn
from torch.nn import functional as F
from scipy.optimize.lbfgsb import fmin_l_bfgs_b as lbfgsb
from scipy.optimize.slsqp import fmin_slsqp as slsqp
import matplotlib.pyplot as plt
from aTEAM.optim import NumpyFunctionInterface,ParamGroupsManager
from aTEAM.nn.modules import LagrangeInterp,LagrangeInterpFixInputs
from aTEAM.utils import meshgen
#%%
def testfunc(inputs):
"""inputs (ndarray)"""
return sin(inputs[...,0]*8)+cos(sqrt(inputs[...,1]*4))*sin(inputs[...,2]*4)
def compare(I, inputs):
infe = I(inputs).data.cpu().numpy()
infe_true = testfunc(inputs.data.cpu().numpy())
return infe,infe_true
def forward(I, inputs):
outputs = I(inputs)
outputs_true = torch.from_numpy(testfunc(inputs.data.cpu().numpy()))
outputs_true = outputs.data.new(outputs_true.size()).copy_(outputs_true)
return ((outputs-outputs_true)**2).mean()
def forwardFixInputs(IFixInputs, outputs_true):
outputs = IFixInputs()
return ((outputs-outputs_true)**2).mean()
#%%
m = 3
d = 2
device = -1
mesh_bound = zeros((2,m))
# mesh_bound[0] = arange(m)-1
# mesh_bound[1] = arange(m)+1
mesh_bound[0] = 0
mesh_bound[1] = 1
mesh_size = array([40,]*m)
I = LagrangeInterp(m, d, mesh_bound, mesh_size)
I.double()
if device>=0:
I.cuda(device)
mesh_bound[1] += 1/200
dataset = meshgen(mesh_bound, [201,201,201])
dataset = torch.from_numpy(dataset).clone()
dataset = I.interp_coe.data.new(dataset.size()).copy_(dataset)
mesh_bound[1] -= 1/200
IFixInputs = LagrangeInterpFixInputs(dataset[:1,:1,:1],m,d,mesh_bound,mesh_size)
IFixInputs.double()
if device>=0:
IFixInputs.cuda(device)
#%%
inputs_shape = [50,50,50]
IN,JN,KN = int(200/inputs_shape[0]), int(200/inputs_shape[1]), int(200/inputs_shape[2])
indx = zeros((IN*JN*KN,3),dtype=int32)
idx = 0
for i in range(IN):
for j in range(JN):
for k in range(KN):
indx[idx] = array([i,j,k])*array(inputs_shape)
idx += 1
#%%
nfi = NumpyFunctionInterface([I.interp_coe,],forward=lambda :forward(I,dataset))
nfi.flat_param = random.randn(nfi.numel())
x0 = nfi.flat_param
for i in range(64):
inputs = dataset[
indx[i,0]:indx[i,0]+inputs_shape[0],
indx[i,1]:indx[i,1]+inputs_shape[1],
indx[i,2]:indx[i,2]+inputs_shape[2]
]
inputs = inputs.clone()
nfi.forward = lambda :forward(I,inputs)
x = nfi.flat_param
x,f,d = lbfgsb(nfi.f,x,nfi.fprime,m=1000,maxiter=20,factr=1,pgtol=1e-16,iprint=10)
#%%
outputs = IFixInputs()
outputs_true = torch.from_numpy(testfunc(IFixInputs.inputs.cpu().numpy()))
outputs_true = outputs_true.view(outputs.size())
outputs_true = outputs.data.new(outputs_true.size()).copy_(outputs_true)
nfi = NumpyFunctionInterface([IFixInputs.interp_coe,],forward=lambda :forwardFixInputs(IFixInputs,outputs_true))
nfi.flat_param = random.randn(nfi.numel())
for i in range(64):
inputs = dataset[
indx[i,0]:indx[i,0]+inputs_shape[0],
indx[i,1]:indx[i,1]+inputs_shape[1],
indx[i,2]:indx[i,2]+inputs_shape[2]
]
inputs = inputs.clone()
IFixInputs.inputs = inputs
outputs = IFixInputs()
outputs_true = torch.from_numpy(testfunc(IFixInputs.inputs.cpu().numpy()))
outputs_true = outputs_true.view(outputs.size())
outputs_true = outputs.data.new(outputs_true.size()).copy_(outputs_true)
nfi.forward = lambda :forwardFixInputs(IFixInputs,outputs_true)
x = nfi.flat_param
x,f,d = lbfgsb(nfi.f,nfi.flat_param,nfi.fprime,m=1000,maxiter=20,factr=1,pgtol=1e-14,iprint=10)
#%%
inputs = dataset[
random.randint(200/inputs_shape[0])+int(200/inputs_shape[0])*arange(0,inputs_shape[0],dtype=int32)[:,newaxis,newaxis],
random.randint(200/inputs_shape[1])+int(200/inputs_shape[1])*arange(0,inputs_shape[1],dtype=int32)[newaxis,:,newaxis],
random.randint(200/inputs_shape[2])+int(200/inputs_shape[2])*arange(0,inputs_shape[2],dtype=int32)[newaxis,newaxis,:]
]
inputs = inputs.clone()
nfi.forward = lambda :forward(I,inputs)
infe,infe_true = compare(I,inputs)
print(sqrt((infe-infe_true)**2).mean())
print(sqrt((infe-infe_true)**2).max())
h = plt.figure()
indx = random.randint(20)
a = h.add_subplot(4,2,1)
a.imshow(infe_true[indx])
a.set_title('true')
a = h.add_subplot(4,2,2)
a.imshow(infe[indx])
a.set_title('inferenced')
indx = random.randint(20)
a = h.add_subplot(4,2,3)
a.plot(infe_true[indx,indx])
a = h.add_subplot(4,2,4)
a.plot(infe[indx,indx])
indx = random.randint(20)
a = h.add_subplot(4,2,5)
a.plot(infe_true[indx,:,indx])
a = h.add_subplot(4,2,6)
a.plot(infe[indx,:,indx])
indx = random.randint(20)
a = h.add_subplot(4,2,7)
a.plot(infe_true[:,indx,indx])
a = h.add_subplot(4,2,8)
a.plot(infe[:,indx,indx])
#%%
inputs = dataset[
random.randint(200/inputs_shape[0])+int(200/inputs_shape[0])*arange(0,inputs_shape[0],dtype=int32)[:,newaxis,newaxis],
random.randint(200/inputs_shape[1])+int(200/inputs_shape[1])*arange(0,inputs_shape[1],dtype=int32)[newaxis,:,newaxis],
random.randint(200/inputs_shape[2])+int(200/inputs_shape[2])*arange(0,inputs_shape[2],dtype=int32)[newaxis,newaxis,:]
]
inputs = inputs.clone()
IFixInputs.inputs = inputs
outputs = IFixInputs()
outputs_true = torch.from_numpy(testfunc(IFixInputs.inputs.cpu().numpy()))
outputs_true = outputs_true.view(outputs.size())
infe = outputs.data.cpu().numpy()
infe_true = outputs_true.numpy()
print(sqrt((infe-infe_true)**2).mean())
print(sqrt((infe-infe_true)**2).max())
h = plt.figure()
indx = random.randint(20)
a = h.add_subplot(4,2,1)
a.imshow(infe_true[indx])
a.set_title('true')
a = h.add_subplot(4,2,2)
a.imshow(infe[indx])
a.set_title('inferenced')
indx = random.randint(20)
a = h.add_subplot(4,2,3)
a.plot(infe_true[indx,indx])
a = h.add_subplot(4,2,4)
a.plot(infe[indx,indx])
indx = random.randint(20)
a = h.add_subplot(4,2,5)
a.plot(infe_true[indx,:,indx])
a = h.add_subplot(4,2,6)
a.plot(infe[indx,:,indx])
indx = random.randint(20)
a = h.add_subplot(4,2,7)
a.plot(infe_true[:,indx,indx])
a = h.add_subplot(4,2,8)
a.plot(infe[:,indx,indx])
#%%
```
#### File: aTEAM/test/optim_quickstart.py
```python
from numpy import *
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from aTEAM.optim import NumpyFunctionInterface,ParamGroupsManager
"""
Example 1
Let us start with an example.
At first, we define a PyTorch tensor function "powell_bs"
"""
def powell_bs(x):
return (1e4*x[0]*x[1]-1)**2+((-x[0]).exp()+(-x[1]).exp()-1.0001)**2
"""
And then define the variable "nfix" to be optimized:
min_{nfix} powell_bs(nfix)
"""
nfix = torch.tensor([0,1], dtype=torch.float64, requires_grad=True)
"""
a interface "forward" for NumpyFunctionInterface is needed
"""
def forward():
return powell_bs(nfix)
"""
At last, construct your NumpyFunctionInterface of the PyTorch tensor function
"""
listofparameters = [nfix,]
nfi = NumpyFunctionInterface(listofparameters,forward=forward)
"""
Now it's ready to use interfaces given by "nfi": "nfi.flat_param,nfi.f,nfi.fprime".
What these interfaces do is somethine like
```
class NumpyFunctionInterface:
@property
def params(self):
# notice that nfi = NumpyFunctionInterface(listofparameters,forward)
for p in listofparameters:
yield p
@property
def flat_param(self):
views = []
for p in self.params:
views.append(p.view(-1))
return torch.cat(views,0).numpy()
@property.setter
def flat_param(self,x): # x is a numpy array
for p in self.params:
p[:] = x[pidx_start:pidx_end]
# For simplicity here we do not show details of
# type conversion and subscript matching between p and x.
def f(self,x):
self.flat_param = x
return forward()
def fprime(self,x):
loss = self.f(x)
loss.backward() # Here we utilize autograd feature of PyTorch
grad = np.zeros(x.size)
for p in self.params:
grad[pidx_start:pidx_end] = p.grad
return grad
```
Try these commands:
x = np.random.randn(nfi.numel())
assert(np.equal(nfi.f(x),powell_bs(x)))
x[0] = 1
nfi.flat_param = x
# nfi.flat_param[0] = 1 is not permitted since property is not a ndarray
assert(np.equal(nfi.f(nfi.flat_param),powell_bs(x)))
These interfaces enable us to use lbfgs,slsqp from scipy.optimize.
"""
from scipy.optimize.lbfgsb import fmin_l_bfgs_b as lbfgsb
from scipy.optimize.slsqp import fmin_slsqp as slsqp
x0 = array([0,1])
print(" ***************** powell_bs ***************** ")
x,f,d = lbfgsb(nfi.f,x0,nfi.fprime,m=100,factr=1,pgtol=1e-14,iprint=10)
out,fx,its,imode,smode = slsqp(nfi.f,x0,fprime=nfi.fprime,
acc=1e-16,iter=15000,iprint=1,full_output=True)
print('\noptimial solution\n',out)
"""
Further more, if we want to impose constraint "nfix[0] = 1e-5" to the problem,
we can define the projection function of "nfix" and its gradient: "x_proj","grad_proj",
and then add these hooks by call "nfi.set_options".
"nfi.f" and "nfi.fprime" comes to
```
class NumpyFunctionInterface:
def _all_x_proj(self):
...
def _all_grad_proj(self):
...
@property
def flat_param(self):
self._all_x_proj()
...
@property.setter
def flat_param(self,x):
...
self._all_x_proj()
def fprime(self,x):
...
self._all_grad_proj()
...
return grad
```
"""
def x_proj(params):
params[0].data[0] = 1e-5
def grad_proj(params):
params[0].grad.data[0] = 0
## one can also simply set since nfix is globally accessible
# def x_proj(*args,**kw):
# nfix.data[0] = 1e-5
# def grad_proj(*args,**kw):
# nfix.grad.data[0] = 0
# nfi.set_oprions(0,x_proj=x_proj,grad_proj=grad_proj)
paramidx = 0
nfi.set_options(paramidx,x_proj=x_proj,grad_proj=grad_proj)
"""
Now we can solve this constraint optimization problem in a unconstraint manner
"""
print("\n\n\n\n ***************** constraint powell_bs ***************** ")
x,f,d = lbfgsb(nfi.f,x0,nfi.fprime,m=100,factr=1,pgtol=1e-14,iprint=10)
out,fx,its,imode,smode = slsqp(nfi.f,x0,fprime=nfi.fprime,
acc=1e-16,iter=15000,iprint=1,full_output=True)
"""
The original output ('x' or 'out') of the optimizer may not satisfy the constraint.
Recall that the nfi.flat_param will automatically do the projection in reader and setter,
```
class NumpyFunctionInterface:
@property
def flat_param(self):
self._all_x_proj()
views = []
for p in self.params:
views.append(p.view(-1))
return torch.cat(views,0).numpy()
@property.setter
def flat_param(self,x): # x is a numpy array
for p in self.params:
p[:] = x[pidx_start:pidx_end]
self._all_x_proj()
```
so we can obtain a constraint gauranteed solution by
out = nfi.flat_param
"""
out = nfi.flat_param
print('\noptimial solution\n',out)
"""
Example 2
To further understand "NumpyFunctionInterface", let us extend "powell_bs"
to a PyTorch custom module
(see https://pytorch.org/tutorials/beginner/examples_nn/two_layer_net_module.html?highlight=custom)
At first, define a pytorch module "penalty=Penalty(100,1e-5)"
"""
import torch.nn as nn
from torch.nn import functional as F
class Penalty(nn.Module):
def __init__(self,n,alpha=1e-5):
super(Penalty,self).__init__()
m = n//2
x1 = torch.arange(1,m+1).to(torch.float64)
x2 = torch.arange(m+1,n+1).to(torch.float64)
self.x1 = nn.Parameter(x1)
self.x2 = nn.Parameter(x2)
self.n = n
self.alpha = alpha
def forward(self):
x = torch.cat([self.x1.cpu(),self.x2.cpu()],0)
return self.alpha*((x-1)**2).sum()+((x**2).sum()-0.25)**2
penalty = Penalty(5,1e-5)
"""
Consider a constraint optimization problem
min_{penalty.x1,penalty.x2, s.t. penalty.x2[0]=1} penalty.forward()
Then, construct "NumpyFunctionInterface" for this problem (each of the following way is OK)
# method 0 # penalty.x2 is globally accessible
def x_proj(*args,**kw):
penalty.x2.data[0] = 1e-5
def grad_proj(*args,**kw):
penalty.x2.grad.data[0] = 0
nfi = NumpyFunctionInterface(penalty.parameters(),forward=penalty.forward,
x_proj=x_proj,grad_proj=grad_proj)
# method 1
def x_proj(params_of_param_group):
params_of_param_group[0].data[0] = 1e-5
def grad_proj(params_of_param_group):
params_of_param_group[0].grad.data[0] = 0
nfi = NumpyFunctionInterface([
dict(params=[penalty.x1,]),
dict(params=[penalty.x2,],x_proj=x_proj,grad_proj=grad_proj)
], penalty.forward)
# method 2
def x_proj(params_of_param_group):
params_of_param_group[1].data[0] = 1e-5
def grad_proj(params_of_param_group):
params_of_param_group[1].grad.data[0] = 0
nfi = NumpyFunctionInterface([
dict(params=[penalty.x1,penalty.x2],x_proj=x_proj,grad_proj=grad_proj),
], penalty.forward)
# method 3
def x_proj(params_of_param_group):
params_of_param_group[1].data[0] = 1e-5
def grad_proj(params_of_param_group):
params_of_param_group[1].grad.data[0] = 0
nfi = NumpyFunctionInterface([penalty.x1,penalty.x2], penalty.forward)
nfi.set_options(0, x_proj=x_proj, grad_proj=grad_proj)
In "NumpyFunctionInterface", parameters are devided into different parameter groups,
any parameter groups is a dict of
"""
def x_proj(*args,**kw):
penalty.x2.data[0] = 1e-5
def grad_proj(*args,**kw):
penalty.x2.grad.data[0] = 0
nfi = NumpyFunctionInterface(penalty.parameters(),forward=penalty.forward,
x_proj=x_proj,grad_proj=grad_proj)
# x0 = torch.cat([penalty.x1.cpu(),penalty.x2.cpu()],0).data.clone().numpy()
x0 = np.random.randn(nfi.numel())
print("\n\n\n\n ***************** penalty *****************")
x,f,d = lbfgsb(nfi.f,x0,nfi.fprime,m=100,factr=1,pgtol=1e-14,iprint=10)
out,fx,its,imode,smode = slsqp(nfi.f,x0,fprime=nfi.fprime,acc=1e-16,iter=15000,iprint=1,full_output=True)
# the following two assignments will inforce 'out' to satisfy the constraint
nfi.flat_param = out
out = nfi.flat_param
print('\noptimial solution\n',out)
```
#### File: aTEAM/test/pgm_test.py
```python
import torch
import torch.nn as nn
from aTEAM.optim import ParamGroupsManager
class Penalty(nn.Module):
def __init__(self,n,alpha=1e-5):
super(Penalty,self).__init__()
m = n//2
x1 = torch.arange(1,m+1,dtype=torch.float)
x2 = torch.arange(m+1,n+1,dtype=torch.float)
self.x1 = nn.Parameter(x1); self.x2 = nn.Parameter(x2)
self.n = n; self.alpha = alpha
def forward(self):
x = torch.cat([self.x1,self.x2],0)
return self.alpha*((x-1)**2).sum()+((x**2).sum()-0.25)**2
penalty = Penalty(4,1e-5)
# Each of the following case is OK, 'lr'='learning_rate'
# pgm = ParamGroupsManager(params=penalty.parameters(),
# defaults={'lr':0.1,'scale':10})
# pgm = ParamGroupsManager(params=penalty.named_parameters(),
# defaults={'lr':0.1,'scale':10})
pgm = ParamGroupsManager(params=[
{'params':[penalty.x1,]},{'params':{'x2':penalty.x2},'lr':0.2}
],defaults={'lr':0.1,'scale':10})
# show what ParamGroupsManager does:
print("pgm.param_groups")
print(pgm.param_groups)
print("\npgm.params")
print(list(pgm.params))
print("\npgm.named_params")
print(list(pgm.named_params))
print("\npgm.params_with_info 'scale' and 'lr' ")
print(list(pgm.params_with_info('scale','lr')))
#%%
``` |
{
"source": "jinay1991/motion_planning",
"score": 2
} |
#### File: third_party/base64/base64.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def base64():
if "base64" not in native.existing_rules():
http_archive(
name = "base64",
url = "https://github.com/ReneNyffenegger/cpp-base64/archive/refs/tags/V2.rc.08.tar.gz",
sha256 = "0a7ada789a99c2664437f1db8c38b60fe5815cf82b75bea0f0c08933c1317828",
build_file = "//third_party/base64:base64.BUILD",
strip_prefix = "cpp-base64-2.rc.08",
)
```
#### File: third_party/zlib/zlib.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def zlib():
if "zlib" not in native.existing_rules():
http_archive(
name = "zlib",
build_file = "//third_party/zlib:zlib.BUILD",
sha256 = "91844808532e5ce316b3c010929493c0244f3d37593afd6de04f71821d5136d9",
strip_prefix = "zlib-1.2.12",
url = "https://zlib.net/zlib-1.2.12.tar.gz",
)
``` |
{
"source": "jinay1991/spleeter",
"score": 3
} |
#### File: spleeter/scripts/unet.py
```python
import numpy as np
import tensorflow as tf
class UNet(tf.keras.Model):
def __init__(self, output_name='output', output_mask_logit=False):
super(UNet, self).__init__()
# First layer.
self.conv1 = tf.keras.layers.Conv2D(filters=16,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch1 = tf.keras.layers.BatchNormalization(axis=-1)
self.act1 = tf.keras.layers.Activation(activation="relu")
# Second layer.
self.conv2 = tf.keras.layers.Conv2D(filters=32,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch2 = tf.keras.layers.BatchNormalization(axis=-1)
self.act2 = tf.keras.layers.Activation(activation="relu")
# Third layer.
self.conv3 = tf.keras.layers.Conv2D(filters=64,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch3 = tf.keras.layers.BatchNormalization(axis=-1)
self.act3 = tf.keras.layers.Activation(activation="relu")
# Fourth layer.
self.conv4 = tf.keras.layers.Conv2D(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch4 = tf.keras.layers.BatchNormalization(axis=-1)
self.act4 = tf.keras.layers.Activation(activation="relu")
# Fifth layer.
self.conv5 = tf.keras.layers.Conv2D(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch5 = tf.keras.layers.BatchNormalization(axis=-1)
self.act5 = tf.keras.layers.Activation(activation="relu")
# Sixth layer
self.conv6 = tf.keras.layers.Conv2D(filters=512,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")
self.batch6 = tf.keras.layers.BatchNormalization(axis=-1)
self.act6 = tf.keras.layers.Activation(activation="relu")
#
#
#
self.up1 = tf.keras.layers.Conv2DTranspose(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch7 = tf.keras.layers.BatchNormalization(axis=-1)
self.drop1 = tf.keras.layers.Dropout(0.5)
#
self.up2 = tf.keras.layers.Conv2DTranspose(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch8 = tf.keras.layers.BatchNormalization(axis=-1)
self.drop2 = tf.keras.layers.Dropout(0.5)
#
self.up3 = tf.keras.layers.Conv2DTranspose(filters=64,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch9 = tf.keras.layers.BatchNormalization(axis=-1)
self.drop3 = tf.keras.layers.Dropout(0.5)
#
self.up4 = tf.keras.layers.Conv2DTranspose(filters=32,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch10 = tf.keras.layers.BatchNormalization(axis=-1)
#
self.up5 = tf.keras.layers.Conv2DTranspose(filters=16,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch11 = tf.keras.layers.BatchNormalization(axis=-1)
#
self.up6 = tf.keras.layers.Conv2DTranspose(filters=1,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")
self.batch12 = tf.keras.layers.BatchNormalization(axis=-1)
# Last layer to ensure initial shape reconstruction.
self.output_name = output_name
if not output_mask_logit:
self.output_mask_logit = False
self.up7 = tf.keras.layers.Conv2D(filters=2,
kernel_size=(4, 4),
dilation_rate=(2, 2),
activation='sigmoid',
padding='same',
kernel_initializer="he_uniform")
else:
self.output_mask_logit = True
self.logits = tf.keras.layers.Conv2D(filters=2,
kernel_size=(4, 4),
dilation_rate=(2, 2),
padding='same',
kernel_initializer="he_uniform")
def call(self, inputs, training=False):
conv1 = self.conv1(inputs)
batch1 = self.batch1(conv1)
act1 = self.act1(batch1)
# Second layer.
conv2 = self.conv2(act1)
batch2 = self.batch2(conv2)
act2 = self.act2(batch2)
# Third layer.
conv3 = self.conv3(act2)
batch3 = self.batch3(conv3)
act3 = self.act3(batch3)
# Fourth layer.
conv4 = self.conv4(act3)
batch4 = self.batch4(conv4)
act4 = self.act4(batch4)
# Fifth layer.
conv5 = self.conv5(act4)
batch5 = self.batch5(conv5)
act5 = self.act5(batch5)
# Sixth layer
conv6 = self.conv6(act5)
batch6 = self.batch6(conv6)
_ = self.act6(batch6)
#
#
#
up1 = self.up1(conv6)
batch7 = self.batch7(up1)
drop1 = self.drop1(batch7)
merge1 = tf.keras.layers.Concatenate(axis=-1)([conv5, drop1])
#
up2 = self.up1(merge1)
batch8 = self.batch8(up2)
drop2 = self.drop2(batch8)
merge2 = tf.keras.layers.Concatenate(axis=-1)([conv4, drop2])
#
up3 = self.up3(merge2)
batch9 = self.batch9(up3)
drop3 = self.drop3(batch9)
merge3 = tf.keras.layers.Concatenate(axis=-1)([conv3, drop3])
#
up4 = self.up4(merge3)
batch10 = self.batch10(up4)
merge4 = tf.keras.layers.Concatenate(axis=-1)([conv2, batch10])
#
up5 = self.up5(merge4)
batch11 = self.batch11(up5)
merge5 = tf.keras.layers.Concatenate(axis=-1)([conv1, batch11])
#
up6 = self.up6(merge5)
batch12 = self.batch12(up6)
# Last layer to ensure initial shape reconstruction.
if not self.output_mask_logit:
up7 = self.up7(batch12)
return tf.keras.layers.Multiply(name=self.output_name)([up7, input_tensor])
else:
return self.logits(batch12)
@tf.function
def apply_unet(
input_tensor,
output_name='output',
output_mask_logit=False):
""" Apply a convolutionnal U-net to model a single instrument (one U-net
is used for each instrument).
:param input_tensor:
:param output_name: (Optional) , default to 'output'
:param output_mask_logit: (Optional) , default to False.
"""
# First layer.
conv1 = tf.keras.layers.Conv2D(filters=16,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(input_tensor)
batch1 = tf.keras.layers.BatchNormalization(axis=-1)(conv1)
act1 = tf.keras.layers.Activation(activation="relu")(batch1)
# Second layer.
conv2 = tf.keras.layers.Conv2D(filters=32,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(act1)
batch2 = tf.keras.layers.BatchNormalization(axis=-1)(conv2)
act2 = tf.keras.layers.Activation(activation="relu")(batch2)
# Third layer.
conv3 = tf.keras.layers.Conv2D(filters=64,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(act2)
batch3 = tf.keras.layers.BatchNormalization(axis=-1)(conv3)
act3 = tf.keras.layers.Activation(activation="relu")(batch3)
# Fourth layer.
conv4 = tf.keras.layers.Conv2D(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(act3)
batch4 = tf.keras.layers.BatchNormalization(axis=-1)(conv4)
act4 = tf.keras.layers.Activation(activation="relu")(batch4)
# Fifth layer.
conv5 = tf.keras.layers.Conv2D(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(act4)
batch5 = tf.keras.layers.BatchNormalization(axis=-1)(conv5)
act5 = tf.keras.layers.Activation(activation="relu")(batch5)
# Sixth layer
conv6 = tf.keras.layers.Conv2D(filters=512,
kernel_size=(5, 5),
strides=(2, 2),
padding="same",
kernel_initializer="he_uniform")(act5)
batch6 = tf.keras.layers.BatchNormalization(axis=-1)(conv6)
_ = tf.keras.layers.Activation(activation="relu")(batch6)
#
#
#
up1 = tf.keras.layers.Conv2DTranspose(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(conv6)
batch7 = tf.keras.layers.BatchNormalization(axis=-1)(up1)
drop1 = tf.keras.layers.Dropout(0.5)(batch7)
merge1 = tf.keras.layers.Concatenate(axis=-1)([conv5, drop1])
#
up2 = tf.keras.layers.Conv2DTranspose(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(merge1)
batch8 = tf.keras.layers.BatchNormalization(axis=-1)(up2)
drop2 = tf.keras.layers.Dropout(0.5)(batch8)
merge2 = tf.keras.layers.Concatenate(axis=-1)([conv4, drop2])
#
up3 = tf.keras.layers.Conv2DTranspose(filters=64,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(merge2)
batch9 = tf.keras.layers.BatchNormalization(axis=-1)(up3)
drop3 = tf.keras.layers.Dropout(0.5)(batch9)
merge3 = tf.keras.layers.Concatenate(axis=-1)([conv3, drop3])
#
up4 = tf.keras.layers.Conv2DTranspose(filters=32,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(merge3)
batch10 = tf.keras.layers.BatchNormalization(axis=-1)(up4)
merge4 = tf.keras.layers.Concatenate(axis=-1)([conv2, batch10])
#
up5 = tf.keras.layers.Conv2DTranspose(filters=16,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(merge4)
batch11 = tf.keras.layers.BatchNormalization(axis=-1)(up5)
merge5 = tf.keras.layers.Concatenate(axis=-1)([conv1, batch11])
#
up6 = tf.keras.layers.Conv2DTranspose(filters=1,
kernel_size=(5, 5),
strides=(2, 2),
activation="relu",
padding='same',
kernel_initializer="he_uniform")(merge5)
batch12 = tf.keras.layers.BatchNormalization(axis=-1)(up6)
# Last layer to ensure initial shape reconstruction.
if not output_mask_logit:
up7 = tf.keras.layers.Conv2D(filters=2,
kernel_size=(4, 4),
dilation_rate=(2, 2),
activation='sigmoid',
padding='same',
kernel_initializer="he_uniform")(batch12)
output = tf.keras.layers.Multiply(name=output_name)([up7, input_tensor])
return output
return tf.keras.layers.Conv2D(filters=2,
kernel_size=(4, 4),
dilation_rate=(2, 2),
padding='same',
kernel_initializer="he_uniform")(batch12)
if __name__ == "__main__":
input_tensor = np.zeros(shape=(1, 256, 256, 2), dtype=np.float32)
model = UNet()
model.trainable = False
model._set_inputs(input_tensor)
# model.compile(tf.keras.optimizers.Adam(learning_rate=1e-3), loss=tf.keras.losses.MeanSquaredError())
# model.build()
# model.summary()
tf.saved_model.save(model, export_dir="saved_model")
```
#### File: third_party/nlohmann/nlohmann.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def nlohmann():
if "nlohmann" not in native.existing_rules():
http_archive(
name = "nlohmann",
build_file = "//third_party/nlohmann:nlohmann.BUILD",
url = "https://github.com/nlohmann/json/releases/download/v3.7.3/include.zip",
sha256 = "87b5884741427220d3a33df1363ae0e8b898099fbc59f1c451113f6732891014",
)
``` |
{
"source": "Jinaz/Py_datascraping",
"score": 2
} |
#### File: key_bot/gta/GTA.py
```python
import win32_connectors.KeyboardConnectors as kc
import time
import imageFunctions.imagecomparison as ic
from enum import Enum
from win32_connectors.KeyBoardLogger import keyIsUp
class State(Enum):
VOTING = 1
MENU = 2
INGAME = 3
STARTED = 4
NULL = 5
def movement_WA():
kc.PressKey(kc.W)
kc.PressKey(kc.A)
time.sleep(1.2)
kc.ReleaseKey(kc.W)
kc.ReleaseKey(kc.A)
def movement_SD():
kc.PressKey(kc.S)
kc.PressKey(kc.D)
time.sleep(1)
kc.ReleaseKey(kc.S)
kc.ReleaseKey(kc.D)
menuCoords = [118, 155, 304, 405]
votemenuCoords = [108, 155, 308, 615]
PrepareInGame = [1033, 1064, 772, 1148]
MissionInGame = [1030, 1069, 797, 1121]
threshold = 0.94
def loadTemplates():
import cv2 as cv
enemyTemplate = cv.imread("DATA/enemyFiltered.png",cv.IMREAD_GRAYSCALE)
menuTemplate = cv.imread("DATA/menuFiltered.png",cv.IMREAD_GRAYSCALE)
postTemplate = cv.imread("DATA/postFiltered.png",cv.IMREAD_GRAYSCALE)
prepTemplate = cv.imread("DATA/prepFiltered.png",cv.IMREAD_GRAYSCALE)
#enemyTemplate = ic.prefilter(enemyTemplate, "thresh")
#menuTemplate = ic.prefilter(menuTemplate, "thresh")
#postTemplate = ic.prefilter(postTemplate, "thresh")
#prepTemplate = ic.prefilter(prepTemplate, "thresh")
return enemyTemplate, menuTemplate, postTemplate, prepTemplate
def get_data():
im = ic.screenshotToData()
menu = im[menuCoords[0]:menuCoords[1], menuCoords[2]:menuCoords[3], :]
vote = im[votemenuCoords[0]:votemenuCoords[1], votemenuCoords[2]:votemenuCoords[3], :]
prep = im[PrepareInGame[0]:PrepareInGame[1], PrepareInGame[2]:PrepareInGame[3], :]
miss = im[MissionInGame[0]:MissionInGame[1], MissionInGame[2]:MissionInGame[3], :]
menu = ic.prefilter(menu, "opening")
vote = ic.prefilter(vote, "opening")
prep = ic.prefilter(prep, "opening")
miss = ic.prefilter(miss, "opening")
return menu, vote, prep, miss
def check_in_menu(menu, menuTemplate):
x = ic.compare_CV_images(menuTemplate, menu)
if x[1] > threshold:
print("in menu")
#print(x)
return True
return False
def check_gameStatus(prep,enemy, prepTemplate, enemyTemplate):
z = ic.compare_CV_images(prep, prepTemplate)
if z[1] > threshold:
print("preparing")
#print(z)
return 1
w = ic.compare_CV_images(enemy, enemyTemplate)
if w[1] > threshold:
print("enemy")
#print(w)
return 2
return 0
def checkVoting(vote, voteTemplate):
y = ic.compare_CV_images(vote, voteTemplate)
if y[1] > threshold:
print("voting")
#print(y)
return True
return False
def doVote():
kc.PressKey(kc.S)
time.sleep(.1)
kc.ReleaseKey(kc.S)
time.sleep(.8)
kc.PressKey(kc.S)
time.sleep(.1)
kc.ReleaseKey(kc.S)
time.sleep(.8)
kc.PressKey(kc.ENTER)
time.sleep(.2)
kc.ReleaseKey(kc.ENTER)
time.sleep(.2)
def afkTime(vote, voteTemplate):
# check for voting
sleeptime = 2
voting = checkVoting(vote, voteTemplate)
if voting:
sleeptime = 0
time.sleep(sleeptime)
menu, vote, prep, miss = get_data()
if checkVoting(vote, voteTemplate):
doVote()
return State.VOTING, True
return State.INGAME, False
# check for voting
def startMission():
kc.PressKey(kc.W)
time.sleep(.1)
kc.ReleaseKey(kc.W)
time.sleep(.2)
kc.PressKey(kc.ENTER)
time.sleep(.1)
kc.ReleaseKey(kc.ENTER)
time.sleep(.2)
def alttab():
kc.PressKey(kc.ALT)
kc.PressKey(kc.TAB)
time.sleep(.2)
kc.ReleaseKey(kc.TAB)
kc.ReleaseKey(kc.ALT)
time.sleep(.2)
def ingameDone():
alttab()
voted = False
startMission()
currentState = State.INGAME
currentwave = 0
while keyIsUp(81):
enemyTemplate, menuTemplate, postTemplate, prepTemplate = loadTemplates()
menu, vote, prep, miss = get_data()
# check menu
if currentState == State.INGAME:
menu, vote, prep, miss = get_data()
state = check_gameStatus(prep, miss, prepTemplate, enemyTemplate)
if state == 1:
movement_WA()
menu, vote, prep, miss = get_data()
if checkVoting(vote, voteTemplate=postTemplate):
doVote()
voted = True
if state == 2:
movement_SD()
menu, vote, prep, miss = get_data()
if checkVoting(vote, voteTemplate=postTemplate):
doVote()
voted = True
if state == 0:
menu, vote, prep, miss = get_data()
if checkVoting(vote, voteTemplate=postTemplate):
doVote()
voted = True
if not voted:
menu, vote, prep, miss = get_data()
currentState, voted = afkTime(vote, postTemplate)
if voted:
menu, vote, prep, miss = get_data()
if check_in_menu(menu, menuTemplate):
voted = False
time.sleep(12)
startMission()
currentState = State.INGAME
# up enter
# status ingame
# if ingame and null next is ingame or vote
time.sleep(1)
def tempcode():
alttab()
enemyTemplate, menuTemplate, postTemplate, prepTemplate = loadTemplates()
menu, vote, prep, miss = get_data()
if check_in_menu(menu, menuTemplate):
startMission()
if checkVoting(vote, voteTemplate=postTemplate):
doVote()
def debugcode():
alttab()
while keyIsUp(81):
enemyTemplate, menuTemplate, postTemplate, prepTemplate = loadTemplates()
menu, vote, prep, miss = get_data()
if check_in_menu(menu, menuTemplate):
time.sleep(15)
startMission()
if checkVoting(vote, postTemplate):
doVote()
time.sleep(2)
if __name__ == "__main__":
ingameDone()
# prefilter the current screen
# compare to the templates
# if in menu do restart session
# else wa sd movement
```
#### File: key_bot/imageFunctions/AmbrosioTortorelliMinimizer.py
```python
import cv2, scipy
import numpy as np
import sys
import scipy
from scipy.sparse.linalg import LinearOperator
class AmbrosioTortorelliMinimizer():
def __init__(self, img, iterations=1, solver_maxiterations=10, tol=0.1, alpha=1000, beta=0.01, epsilon=0.01):
self.iterations = iterations
self.tol = tol
self.g = np.float64(img) / np.max(img)
self.f = self.g
self.edges = np.zeros(img.shape)
self.update_gradients()
self.alpha, self.beta, self.epsilon = alpha, beta, epsilon
self.add_const = self.beta / (4 * self.epsilon)
self.multiply_const = self.epsilon * self.beta
self.maxiter = solver_maxiterations
def update_gradients(self):
self.grad_x, self.grad_y = self.gradients(self.f)
self.gradient_mag = np.power(self.grad_x, 2) + np.power(self.grad_y, 2)
def edge_linear_operator(self, input):
v = input.reshape(*self.g.shape)
result = np.multiply(v, self.gradient_mag * self.alpha + self.add_const) \
- self.multiply_const * cv2.Laplacian(v, cv2.CV_64F)
return result.reshape(*input.shape)
def image_linear_operator(self, input):
f = input.reshape(*self.g.shape)
x, y = self.gradients(f)
result = f - 2 * self.alpha * (
self.calc_grad_x(np.multiply(self.edges, x)) + self.calc_grad_y(np.multiply(self.edges, y)))
return result.reshape(*input.shape)
def solve_edges(self):
size = self.g.shape[0] * self.g.shape[1]
A = LinearOperator((size, size), matvec=self.edge_linear_operator, dtype=np.float64)
b = np.ones(size) * self.beta / (4 * self.epsilon)
self.edges, _ = scipy.sparse.linalg.cg(A, b, tol=self.tol, maxiter=self.maxiter)
self.edges = np.power(self.edges.reshape(*self.g.shape), 2)
return self.edges
def solve_image(self):
size = self.g.shape[0] * self.g.shape[1]
A = LinearOperator((size, size), matvec=self.image_linear_operator, dtype=np.float64)
b = self.g.reshape(size)
self.f, _ = scipy.sparse.linalg.cg(A, b, tol=self.tol, maxiter=self.maxiter)
self.f = self.f.reshape(*self.g.shape)
self.update_gradients()
return self.f
def minimize(self):
for i in range(0, self.iterations):
self.solve_edges()
self.solve_image()
self.edges = np.power(self.edges, 0.5)
cv2.normalize(self.f, self.f, 0, 255, cv2.NORM_MINMAX)
cv2.normalize(self.edges, self.edges, 0, 255, cv2.NORM_MINMAX)
self.f = np.uint8(self.f)
self.edges = 255 - np.uint8(self.edges)
return self.f, self.edges
def calc_grad_x(self, img):
return cv2.filter2D(img, cv2.CV_64F, np.array([[-1, 0, 1]]))
def calc_grad_y(self, img):
return cv2.filter2D(img, cv2.CV_64F, np.array([[-1, 0, 1]]).T)
def gradients(self, img):
return self.calc_grad_x(img), self.calc_grad_y(img)
def show_image(image, name):
img = image * 1
cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX)
img = np.uint8(img)
cv2.imshow(name, img)
if __name__ == "__main__":
img = cv2.imread(sys.argv[1], 1)
result, edges = [], []
for channel in cv2.split(img):
solver = AmbrosioTortorelliMinimizer(channel, iterations=1, tol=0.1, solver_maxiterations=6)
f, v = solver.minimize()
result.append(f)
edges.append(v)
f = cv2.merge(result)
v = np.maximum(*edges)
show_image(v, "edges")
show_image(f, "image")
show_image(img, "original")
cv2.waitKey(-1)
```
#### File: key_bot/mhw/MHW.py
```python
from pynput.keyboard import Key, Controller
import time
from win32_connectors.KeyboardConnectors import PressKey, ReleaseKey, A,S,D,N,E
sleeptimer = 0.1
waittimer = 0.1
def goRight(times):
for i in range(times):
PressKey(D)
time.sleep(sleeptimer)
ReleaseKey(D)
time.sleep(waittimer)
def goLeft(times):
for i in range(times):
PressKey(A)
time.sleep(sleeptimer)
ReleaseKey(A)
time.sleep(waittimer)
def pressE():
PressKey(E)
time.sleep(sleeptimer)
ReleaseKey(E)
time.sleep(waittimer)
def goDown():
PressKey(S)
time.sleep(sleeptimer)
ReleaseKey(S)
time.sleep(waittimer)
def pressN():
PressKey(N)
time.sleep(sleeptimer)
ReleaseKey(N)
time.sleep(waittimer)
def outerfunction():
for x in range(10):
pressN()
goRight(x)
pressN()
goLeft(x)
def initfuncction():
for out in range(1):
for c in range(5):
outerfunction()
goDown()
pressN()
pressE()
keyboard = Controller()
time.sleep(1)
keyboard.press(Key.alt)
time.sleep(0.1)
keyboard.press(Key.tab)
time.sleep(0.1)
keyboard.release(Key.tab)
time.sleep(0.1)
keyboard.release(Key.alt)
time.sleep(2)
initfuncction()
time.sleep(3)
print("We are done")
```
#### File: key_bot/old_code_fragments/CVFunctions.py
```python
import json
import pyautogui
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.metrics import structural_similarity as ssim
# for 1920,1080 resolution:
# first
# 479,927
# 672,927
# 479,1071
# 672,1071
# second
# 680,927
# 873,927
# 680,1071
# 873,1071
# third
# 881,927
# 1074,927
# 881,1071
# 1074,1071
# fourth
# 1083,927
# 1276,927
# 1083,1071
# 1276,1071
# fifth
# 1284,927
# 1477,927
# 1284,1071
# 1477,1071
def showimg(img):
imageasarray = np.array(img)
first = imageasarray[927:1071, 479:672, :]
firstasimage = cv.cvtColor(first, cv.COLOR_RGB2BGR)
second = imageasarray[927:1071, 680:873, :]
secondasimage = cv.cvtColor(second, cv.COLOR_RGB2BGR)
third = imageasarray[927:1071, 881:1074, :]
thirdasimage = cv.cvtColor(third, cv.COLOR_RGB2BGR)
fourth = imageasarray[927:1071, 1083:1276, :]
fourthasimage = cv.cvtColor(fourth, cv.COLOR_RGB2BGR)
fifth = imageasarray[927:1071, 1284:1477, :]
fifthasimage = cv.cvtColor(fifth, cv.COLOR_RGB2BGR)
DATA = '../TFT/DATA2/'
# cv.imshow('sliced image', firstasimage)
cv.imwrite(DATA + 'firstchamp.png', firstasimage)
# cv.waitKey()
# cv.imshow('sliced image', secondasimage)
cv.imwrite(DATA + 'secondchamp.png', secondasimage)
# cv.waitKey()
# cv.imshow('sliced image', thirdasimage)
cv.imwrite(DATA + 'thirdchamp.png', thirdasimage)
# cv.waitKey()
# cv.imshow('sliced image', fourthasimage)
cv.imwrite(DATA + 'fourthchamp.png', fourthasimage)
# cv.waitKey()
# cv.imshow('sliced image', fifthasimage)
cv.imwrite(DATA + 'fifthchamp.png', fifthasimage)
# cv.waitKey()
#features1 = get_feature_points(firstasimage)
return [first, second, third, fourth, fifth]
def imagetoData():
# take a screenshot of the screen and store it in memory, then
# convert the PIL/Pillow image to an OpenCV compatible NumPy array
# and finally write the image to disk
image = pyautogui.screenshot()
print(np.array(image).shape)
return showimg(image)
# image = cv.cvtColor(np.array(image), cv.COLOR_RGB2BGR)
# print(image)
# return image
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(imageA, imageB, title):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
s = ssim(imageA, imageB,multichannel=True)
# setup the figure
fig = plt.figure(title)
plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(imageA, cmap=plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(imageB, cmap=plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
if __name__ == "__main__":
images = imagetoData()
image2 = cv.imread("../TFT/DATA/all/shen.png")
for img in images:
compare_images(cv.cvtColor(img, cv.COLOR_RGB2BGR), image2, "")
```
#### File: Py_datascraping/key_bot/tesetmath.py
```python
import win32_connectors.MouseConnectors as mc
from win32gui import GetWindowText, GetForegroundWindow
import time
from pynput.keyboard import Key, Controller, Listener
keyboard = Controller()
m = mc.Mouse()
hppoints = 6
stampoints = 0
meleepoints = 60
def on_press(key):
#print(key , "pressed")
pass
def on_re_lease(key):
if key == Key.ctrl_r:
for i in range(20):
keyboard.press(Key.tab)
time.sleep(0.1)
keyboard.release(Key.tab)
time.sleep(0.1)
keyboard.press(Key.up)
time.sleep(0.1)
keyboard.release(Key.up)
time.sleep(0.1)
keyboard.press(Key.up)
time.sleep(0.1)
keyboard.release(Key.up)
time.sleep(0.1)
keyboard.press(Key.enter)
time.sleep(0.1)
keyboard.release(Key.enter)
time.sleep(0.5)
keyboard.press(Key.tab)
time.sleep(0.1)
keyboard.release(Key.tab)
time.sleep(0.1)
keyboard.press(Key.up)
time.sleep(0.1)
keyboard.release(Key.up)
time.sleep(0.1)
keyboard.press(Key.up)
time.sleep(0.1)
keyboard.release(Key.up)
time.sleep(0.1)
keyboard.press(Key.enter)
time.sleep(0.1)
keyboard.release(Key.enter)
time.sleep(0.5)
m.press_button((-1, -1), "left")
time.sleep(0.1)
m.press_button((-1, -1), "left", True)
time.sleep(0.1)
def on_release(key):
if key == Key.alt_l:
#keyboard.press(Key.alt)
#time.sleep(0.1)
#keyboard.press(Key.tab)
#time.sleep(0.1)
#keyboard.release(Key.tab)
#time.sleep(0.1)
#keyboard.release(Key.alt)
#time.sleep(0.4)
# health
for i in range(hppoints):
m.press_button((1120, 500), "left")
time.sleep(0.1)
m.press_button((1120, 500), "left", True)
time.sleep(0.01)
# stamina
for i in range(stampoints):
m.press_button((1120, 536), "left")
time.sleep(0.1)
m.press_button((1120, 536), "left", True)
time.sleep(0.01)
# melee
for i in range(meleepoints):
m.press_button((1120, 679), "left")
time.sleep(0.1)
m.press_button((1120, 679), "left", True)
time.sleep(0.01)
keyboard.press(Key.esc)
time.sleep(0.1)
keyboard.release(Key.esc)
current_window = (GetWindowText(GetForegroundWindow()))
desired_window_name = "ARK: Survival Evolved" #Whatever the name of your window should be
#print(GetWindowText(GetForegroundWindow()))
try:
while True:
#print(GetWindowText(GetForegroundWindow()))
if current_window == desired_window_name:
with Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
current_window = (GetWindowText(GetForegroundWindow()))
except KeyboardInterrupt:
pass
```
#### File: key_bot/tft/screenshotting.py
```python
import pyautogui
import cv2 as cv
import numpy as np
def screenshotToData(filename):
#convert array to opencv standards in BGR
image = pyautogui.screenshot()
cv.imwrite(filename, np.array(image))
screenshotToData('BG5.png')
```
#### File: key_bot/win32_connectors/KeyBoardLogger.py
```python
from win32api import GetKeyState
# Our Definitions
def keyIsUp(key):
keystate = GetKeyState(key)
if (keystate == 0) or (keystate == 1):
return True
else:
return False
def keyIsDown(key):
keystate = GetKeyState(key)
if (keystate != 0) and (keystate != 1):
return True
else:
return False
```
#### File: key_bot/win32_connectors/screenshotter.py
```python
import numpy as np
import pyautogui
import time
import cv2 as cv
from time import sleep
from win32api import GetCursorPos
from win32_connectors.VirtualKeycodeLookups import description2keycode as d2k
from win32_connectors.KeyBoardLogger import keyIsUp, keyIsDown
def demo2():
"""Demonstration 2: If the user presses the left mouse button, print the word clicked along with the screen coordinates that were clicked. Then sleep until the key is released."""
left_mouse = d2k['Left mouse button']
quit_key = d2k['Q key']
while keyIsUp(quit_key):
sleep(.01)
if keyIsDown(d2k['Left mouse button']):
x, y = GetCursorPos()
print(
'CLICKED {} {}'.format(x, y))
while keyIsDown(d2k['Left mouse button']):
sleep(.01)
def screenshot():
while keyIsUp(81):
if keyIsDown(189):
image = pyautogui.screenshot()
image = np.array(image)
cv.imwrite("../GTA/DATA/{}.png".format(time.time()), image)
print("screenshotted")
while keyIsDown(189):
sleep(.1)
def screenshot_to_numpyarray():
image = pyautogui.screenshot()
return image
```
#### File: rankcounter/generated/generatedcode.py
```python
import pandas as pd
import counterclass
def compare(cl1,cl2,tier):
if tier == "Challenger":
diffs = []
for el in cl1.Challenger:
if el not in cl2.Challenger:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Challenger:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Grandmaster":
diffs = []
for el in cl1.Grandmaster:
if el not in cl2.Grandmaster:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Master":
diffs = []
for el in cl1.Master:
if el not in cl2.Master:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia1_50":
diffs = []
for el in cl1.Dia1_50:
if el not in cl2.Dia1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia1_0":
diffs = []
for el in cl1.Dia1_0:
if el not in cl2.Dia1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia2_50":
diffs = []
for el in cl1.Dia2_50:
if el not in cl2.Dia2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia2_0":
diffs = []
for el in cl1.Dia2_0:
if el not in cl2.Dia2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia3_50":
diffs = []
for el in cl1.Dia3_50:
if el not in cl2.Dia3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia3_0":
diffs = []
for el in cl1.Dia3_0:
if el not in cl2.Dia3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia4_50":
diffs = []
for el in cl1.Dia4_50:
if el not in cl2.Dia4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Dia4_0":
diffs = []
for el in cl1.Dia4_0:
if el not in cl2.Dia4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat1_50":
diffs = []
for el in cl1.Plat1_50:
if el not in cl2.Plat1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat1_0":
diffs = []
for el in cl1.Plat1_0:
if el not in cl2.Plat1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat2_50":
diffs = []
for el in cl1.Plat2_50:
if el not in cl2.Plat2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat2_0":
diffs = []
for el in cl1.Plat2_0:
if el not in cl2.Plat2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat3_50":
diffs = []
for el in cl1.Plat3_50:
if el not in cl2.Plat3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat3_0":
diffs = []
for el in cl1.Plat3_0:
if el not in cl2.Plat3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat4_50":
diffs = []
for el in cl1.Plat4_50:
if el not in cl2.Plat4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Plat4_0":
diffs = []
for el in cl1.Plat4_0:
if el not in cl2.Plat4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold1_50":
diffs = []
for el in cl1.Gold1_50:
if el not in cl2.Gold1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold1_0":
diffs = []
for el in cl1.Gold1_0:
if el not in cl2.Gold1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold2_50":
diffs = []
for el in cl1.Gold2_50:
if el not in cl2.Gold2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold2_0":
diffs = []
for el in cl1.Gold2_0:
if el not in cl2.Gold2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold3_50":
diffs = []
for el in cl1.Gold3_50:
if el not in cl2.Gold3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold3_0":
diffs = []
for el in cl1.Gold3_0:
if el not in cl2.Gold3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold4_50":
diffs = []
for el in cl1.Gold4_50:
if el not in cl2.Gold4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Gold4_0":
diffs = []
for el in cl1.Gold4_0:
if el not in cl2.Gold4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil1_50":
diffs = []
for el in cl1.Sil1_50:
if el not in cl2.Sil1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil1_0":
diffs = []
for el in cl1.Sil1_0:
if el not in cl2.Sil1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil2_50":
diffs = []
for el in cl1.Sil2_50:
if el not in cl2.Sil2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil2_0":
diffs = []
for el in cl1.Sil2_0:
if el not in cl2.Sil2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil3_50":
diffs = []
for el in cl1.Sil3_50:
if el not in cl2.Sil3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil3_0":
diffs = []
for el in cl1.Sil3_0:
if el not in cl2.Sil3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil4_50":
diffs = []
for el in cl1.Sil4_50:
if el not in cl2.Sil4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Sil4_0":
diffs = []
for el in cl1.Sil4_0:
if el not in cl2.Sil4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B1_50":
diffs = []
for el in cl1.B1_50:
if el not in cl2.B1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B1_0":
diffs = []
for el in cl1.B1_0:
if el not in cl2.B1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B2_50":
diffs = []
for el in cl1.B2_50:
if el not in cl2.B2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B2_0":
diffs = []
for el in cl1.B2_0:
if el not in cl2.B2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B3_50":
diffs = []
for el in cl1.B3_50:
if el not in cl2.B3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B3_0":
diffs = []
for el in cl1.B3_0:
if el not in cl2.B3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B4_50":
diffs = []
for el in cl1.B4_50:
if el not in cl2.B4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "B4_0":
diffs = []
for el in cl1.B4_0:
if el not in cl2.B4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron1_50":
diffs = []
for el in cl1.Iron1_50:
if el not in cl2.Iron1_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron1_0":
diffs = []
for el in cl1.Iron1_0:
if el not in cl2.Iron1_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron2_50":
diffs = []
for el in cl1.Iron2_50:
if el not in cl2.Iron2_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron2_0":
diffs = []
for el in cl1.Iron2_0:
if el not in cl2.Iron2_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron3_50":
diffs = []
for el in cl1.Iron3_50:
if el not in cl2.Iron3_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron3_0":
diffs = []
for el in cl1.Iron3_0:
if el not in cl2.Iron3_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Master:
negatives.append(diff)
return negatives, positives
if tier == "Iron4_50":
diffs = []
for el in cl1.Iron4_50:
if el not in cl2.Iron4_50:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Grandmaster or diff in cl2.Grandmaster:
negatives.append(diff)
return negatives, positives
if tier == "Iron4_0":
diffs = []
for el in cl1.Iron4_0:
if el not in cl2.Iron4_0:
diffs.append(el)
negatives = []
positives = []
for diff in diffs:
if diff in cl2.Iron4_0:
positives.append(diff)
elif diff in cl2.Iron4_0 or diff in cl2.Iron4_0:
negatives.append(diff)
return negatives, positives
def save(cl1):
cl1.saveWithCSV(cl1.Challenger, 'Challenger')
cl1.saveWithCSV(cl1.Grandmaster, 'Grandmaster')
cl1.saveWithCSV(cl1.Master, 'Master')
cl1.saveWithCSV(cl1.Dia1_50, 'Dia1_50')
cl1.saveWithCSV(cl1.Dia1_0, 'Dia1_0')
cl1.saveWithCSV(cl1.Dia2_50, 'Dia2_50')
cl1.saveWithCSV(cl1.Dia2_0, 'Dia2_0')
cl1.saveWithCSV(cl1.Dia3_50, 'Dia3_50')
cl1.saveWithCSV(cl1.Dia3_0, 'Dia3_0')
cl1.saveWithCSV(cl1.Dia4_50, 'Dia4_50')
cl1.saveWithCSV(cl1.Dia4_0, 'Dia4_0')
cl1.saveWithCSV(cl1.Plat1_50, 'Plat1_50')
cl1.saveWithCSV(cl1.Plat1_0, 'Plat1_0')
cl1.saveWithCSV(cl1.Plat2_50, 'Plat2_50')
cl1.saveWithCSV(cl1.Plat2_0, 'Plat2_0')
cl1.saveWithCSV(cl1.Plat3_50, 'Plat3_50')
cl1.saveWithCSV(cl1.Plat3_0, 'Plat3_0')
cl1.saveWithCSV(cl1.Plat4_50, 'Plat4_50')
cl1.saveWithCSV(cl1.Plat4_0, 'Plat4_0')
cl1.saveWithCSV(cl1.Gold1_50, 'Gold1_50')
cl1.saveWithCSV(cl1.Gold1_0, 'Gold1_0')
cl1.saveWithCSV(cl1.Gold2_50, 'Gold2_50')
cl1.saveWithCSV(cl1.Gold2_0, 'Gold2_0')
cl1.saveWithCSV(cl1.Gold3_50, 'Gold3_50')
cl1.saveWithCSV(cl1.Gold3_0, 'Gold3_0')
cl1.saveWithCSV(cl1.Gold4_50, 'Gold4_50')
cl1.saveWithCSV(cl1.Gold4_0, 'Gold4_0')
cl1.saveWithCSV(cl1.Sil1_50, 'Sil1_50')
cl1.saveWithCSV(cl1.Sil1_0, 'Sil1_0')
cl1.saveWithCSV(cl1.Sil2_50, 'Sil2_50')
cl1.saveWithCSV(cl1.Sil2_0, 'Sil2_0')
cl1.saveWithCSV(cl1.Sil3_50, 'Sil3_50')
cl1.saveWithCSV(cl1.Sil3_0, 'Sil3_0')
cl1.saveWithCSV(cl1.Sil4_50, 'Sil4_50')
cl1.saveWithCSV(cl1.Sil4_0, 'Sil4_0')
cl1.saveWithCSV(cl1.B1_50, 'B1_50')
cl1.saveWithCSV(cl1.B1_0, 'B1_0')
cl1.saveWithCSV(cl1.B2_50, 'B2_50')
cl1.saveWithCSV(cl1.B2_0, 'B2_0')
cl1.saveWithCSV(cl1.B3_50, 'B3_50')
cl1.saveWithCSV(cl1.B3_0, 'B3_0')
cl1.saveWithCSV(cl1.B4_50, 'B4_50')
cl1.saveWithCSV(cl1.B4_0, 'B4_0')
cl1.saveWithCSV(cl1.Iron1_50, 'Iron1_50')
cl1.saveWithCSV(cl1.Iron1_0, 'Iron1_0')
cl1.saveWithCSV(cl1.Iron2_50, 'Iron2_50')
cl1.saveWithCSV(cl1.Iron2_0, 'Iron2_0')
cl1.saveWithCSV(cl1.Iron3_50, 'Iron3_50')
cl1.saveWithCSV(cl1.Iron3_0, 'Iron3_0')
cl1.saveWithCSV(cl1.Iron4_50, 'Iron4_50')
cl1.saveWithCSV(cl1.Iron4_0, 'Iron4_0')
def load(cl1):
df = pd.read_csv('Data/Challenger.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Challenger = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Challenger.append(newrow)
df = pd.read_csv('Data/Grandmaster.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Grandmaster = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Grandmaster.append(newrow)
df = pd.read_csv('Data/Master.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Master = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Master.append(newrow)
df = pd.read_csv('Data/Dia1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia1_50.append(newrow)
df = pd.read_csv('Data/Dia1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia1_0.append(newrow)
df = pd.read_csv('Data/Dia2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia2_50.append(newrow)
df = pd.read_csv('Data/Dia2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia2_0.append(newrow)
df = pd.read_csv('Data/Dia3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia3_50.append(newrow)
df = pd.read_csv('Data/Dia3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia3_0.append(newrow)
df = pd.read_csv('Data/Dia4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia4_50.append(newrow)
df = pd.read_csv('Data/Dia4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Dia4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Dia4_0.append(newrow)
df = pd.read_csv('Data/Plat1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat1_50.append(newrow)
df = pd.read_csv('Data/Plat1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat1_0.append(newrow)
df = pd.read_csv('Data/Plat2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat2_50.append(newrow)
df = pd.read_csv('Data/Plat2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat2_0.append(newrow)
df = pd.read_csv('Data/Plat3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat3_50.append(newrow)
df = pd.read_csv('Data/Plat3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat3_0.append(newrow)
df = pd.read_csv('Data/Plat4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat4_50.append(newrow)
df = pd.read_csv('Data/Plat4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Plat4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Plat4_0.append(newrow)
df = pd.read_csv('Data/Gold1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold1_50.append(newrow)
df = pd.read_csv('Data/Gold1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold1_0.append(newrow)
df = pd.read_csv('Data/Gold2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold2_50.append(newrow)
df = pd.read_csv('Data/Gold2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold2_0.append(newrow)
df = pd.read_csv('Data/Gold3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold3_50.append(newrow)
df = pd.read_csv('Data/Gold3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold3_0.append(newrow)
df = pd.read_csv('Data/Gold4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold4_50.append(newrow)
df = pd.read_csv('Data/Gold4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Gold4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Gold4_0.append(newrow)
df = pd.read_csv('Data/Sil1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil1_50.append(newrow)
df = pd.read_csv('Data/Sil1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil1_0.append(newrow)
df = pd.read_csv('Data/Sil2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil2_50.append(newrow)
df = pd.read_csv('Data/Sil2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil2_0.append(newrow)
df = pd.read_csv('Data/Sil3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil3_50.append(newrow)
df = pd.read_csv('Data/Sil3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil3_0.append(newrow)
df = pd.read_csv('Data/Sil4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil4_50.append(newrow)
df = pd.read_csv('Data/Sil4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Sil4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Sil4_0.append(newrow)
df = pd.read_csv('Data/B1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B1_50.append(newrow)
df = pd.read_csv('Data/B1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B1_0.append(newrow)
df = pd.read_csv('Data/B2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B2_50.append(newrow)
df = pd.read_csv('Data/B2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B2_0.append(newrow)
df = pd.read_csv('Data/B3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B3_50.append(newrow)
df = pd.read_csv('Data/B3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B3_0.append(newrow)
df = pd.read_csv('Data/B4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B4_50.append(newrow)
df = pd.read_csv('Data/B4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.B4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.B4_0.append(newrow)
df = pd.read_csv('Data/Iron1_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron1_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron1_50.append(newrow)
df = pd.read_csv('Data/Iron1_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron1_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron1_0.append(newrow)
df = pd.read_csv('Data/Iron2_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron2_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron2_50.append(newrow)
df = pd.read_csv('Data/Iron2_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron2_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron2_0.append(newrow)
df = pd.read_csv('Data/Iron3_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron3_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron3_50.append(newrow)
df = pd.read_csv('Data/Iron3_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron3_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron3_0.append(newrow)
df = pd.read_csv('Data/Iron4_50.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron4_50 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron4_50.append(newrow)
df = pd.read_csv('Data/Iron4_0.csv')
sn = df['Summonername']
rank = df['Rank']
lp = df['LP']
cl1.Iron4_0 = pd.DataFrame(columns=['Summonername', 'Rank', 'LP'])
for i in range(len(sn)):
newrow = {'Summonername': sn[i], 'Rank': rank[i], 'LP': lp[i]}
cl1.Iron4_0.append(newrow)
``` |
{
"source": "Jinaz/scaiot-project",
"score": 3
} |
#### File: scaiot-project/ControllerRoom/OutsideController.py
```python
from pathlib import Path
import Actuators.WindowControl as wc
import Actuators.L_Control as lc
import Actuators.doorControl as dc
import sys
import RoomModel
ROOMJSON = "/home/pi/Desktop/Implementation/DATA/room.json"
TRIGGERFILE = "/home/pi/Desktop/Implementation/DATA/trigger.llz"
def loadRoom():
room = RoomModel.initRoom()
roomfile = Path(ROOMJSON)
if roomfile.is_file():
# file exists
import json
with open(ROOMJSON) as json_file:
data = json.load(json_file)
room.curtain.name = data["curtain"]["name"]
room.curtain.status = data["curtain"]["status"]
room.heater.name = data["heater"]["name"]
room.heater.status = data["heater"]["status"]
room.light.name = data["light"]["name"]
room.light.status = data["light"]["status"]
room.door.name = data["door"]["name"]
room.door.status = data["door"]["status"]
room.cooler.name = data["cooler"]["name"]
room.cooler.status = data["cooler"]["status"]
room.window.name = data["window"]["name"]
room.window.status = data["window"]["status"]
print("data loaded")
return room
if __name__ == "__main__":
if len(sys.argv) > 1:
command = sys.argv[1]
if command == "roomstatus":
room = loadRoom()
print("room name:",room.name)
print("window name:", room.window.name, "window status", "open"if room.window.status == 1 else "closed")
print("curtain name:", room.curtain.name, "curtain status", "down" if room.curtain.status == 1 else "up")
print("heater name:", room.heater.name, "heater status", "on" if room.heater.status == 1 else "off")
print("air conditioner name:", room.cooler.name, "air conditioner status", "on" if room.cooler.status == 1 else "off")
lightstatus = "off"
if room.light.status == 1:
lightstatus = "dimmed"
elif room.light.status == 2:
lightstatus = "on"
print("light name:", room.light.name, "light status", lightstatus)
if command == "heater_on":
room = loadRoom()
if room.heater.status == 1:
print("heater already on")
else:
room.heater.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "heater_off":
room = loadRoom()
if room.heater.status == 0:
print("heater already off")
else:
room.heater.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "cooler_on":
room = loadRoom()
if room.cooler.status == 1:
print("air conditioner already on")
else:
room.cooler.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "cooler_off":
room = loadRoom()
if room.cooler.status == 0:
print("air conditioner already off")
else:
room.cooler.status = 0
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "window_open":
room = loadRoom()
if room.window.status == 1:
print("windows already open")
else:
wc.openWindow()
room.window.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "window_close":
room = loadRoom()
if room.window.status == 0:
print("windows already closed")
else:
wc.closeWindow()
room.window.status = 0
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "light_off":
room = loadRoom()
if room.light.status == 0:
print("lights already off")
else:
lc.signal("off")
room.light.status = 0
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "light_dim":
room = loadRoom()
if room.light.status == 1:
print("lights already dimmed")
else:
lc.signal("mid")
room.light.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "light_on":
room = loadRoom()
if room.light.status == 2:
print("lights already on")
else:
lc.signal("on")
room.light.status = 2
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "lock_door":
room = loadRoom()
if room.door.status == 1:
print("door already locked")
else:
dc.closeDoor()
room.door.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "unlock_door":
room = loadRoom()
if room.door.status == 0:
print("door already unlocked")
else:
dc.openDoor()
room.door.status = 0
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "curtain_up":
room = loadRoom()
if room.curtain.status == 0:
print("curtain already up")
else:
room.curtain.status = 0
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
if command == "curtain_down":
room = loadRoom()
if room.curtain.status == 1:
print("curtain already down")
else:
room.curtain.status = 1
room.saveConfig()
f = open(TRIGGERFILE, "w")
f.write("True")
f.close()
else:
print("invalid command")
```
#### File: scaiot-project/Implementation/mainInitPoint.py
```python
import PddlFiles.pddlLoop as pl
import IOTHubActuator.decider as decider
import time
import sys
import DataFiles.Constants as con
import DATA.dummyGenerator as dg
import WeatherForecast.weatherReaderPy as wrp
import SmartCitiesCalendar.calendarreader as crp
import RoomModel
import PddlFiles.pddlWriter as writer
from pathlib import Path
import InformActuator.sendEmail as se
from IOTHubSensors import SensorHub as sh
import numpy as np
import datetime
def loadRoom():
room = RoomModel.initRoom()
roomfile = Path(con.ROOMJSON)
if roomfile.is_file():
# file exists
import json
with open(con.ROOMJSON) as json_file:
data = json.load(json_file)
room.curtain.name = data["curtain"]["name"]
room.curtain.status = data["curtain"]["status"]
room.heater.name = data["heater"]["name"]
room.heater.status = data["heater"]["status"]
room.light.name = data["light"]["name"]
room.light.status = data["light"]["status"]
room.door.name = data["door"]["name"]
room.door.status = data["door"]["status"]
room.cooler.name = data["cooler"]["name"]
room.cooler.status = data["cooler"]["status"]
room.window.name = data["window"]["name"]
room.window.status = data["window"]["status"]
print("data loaded")
import LightControl.L_Control as lc
lc.initial()
room.light.status = 0
import DoorWindowControl.WindowControl as wc
wc.closeWindow()
room.window.status = 0
import DoorWindowControl.doorControl as dc
dc.closeDoor()
room.door.status = 1
return room
if __name__ == "__main__":
room = loadRoom()
loopcount = 0
datacontainer = np.zeros((4, 10))
current_time = datetime.datetime.fromisoformat('2020-07-13 08:05:00+02:00')
while True:
#hook to manipulate status from outside
f = open(con.TRIGGERPATH)
c = f.read(4)
if c == "True":
room = loadRoom()
f2 = open(con.TRIGGERPATH, "w")
f2.write("False")
f2.close()
# inLecture = True
# betweenLectures = False
# afterLastLecture = False
# shortBeforeFirstLecture = False
# print(room.presenting)
inLecture, betweenLectures, afterLastLecture, shortBeforeFirstLecture = crp.readurl(current_time)
outtemp, weather = wrp.readurl()
humidity, temperature, lightlevel, ir_value = sh.getData()
# humidity, temperature, lightlevel, ir_value = daga.readData()
datacontainer[0][loopcount] = humidity
datacontainer[1][loopcount] = temperature
datacontainer[2][loopcount] = lightlevel
datacontainer[3][loopcount] = ir_value
#def write_problem(room, filename=ss.TEST_PROBLEM, wanted_temp=24, wanted_lightlevel=400,
#outside_temp=30, heaterstatus=0, coolerstatus=0, lightstatus=0,
#windowstatus=0, doorstatus=0, curtainstatus=0,
#presentation=False, inlecture=True, betweenLectures=False,
#afterLecture=False, firstLecture=False, weather=0, tem=-1, hum=-1, lightl=-1, ir_val=-1):
writer.write_problem(room, con.TEST_PROBLEM, con.DESIRED_TEMP, con.DESIRED_LIGHTLEVEL, outtemp,
room.heater.status, room.cooler.status,
room.light.status,
room.window.status, room.door.status, room.curtain.status, room.presenting,
inLecture, betweenLectures, afterLastLecture, shortBeforeFirstLecture, weather, temperature, humidity,
lightlevel, ir_value)
# time.sleep(2)
actions = pl.pddlLoop()
if len(actions) > 0:
emailcontent = decider.generate_content(actions, room)
se.sendEmail("<EMAIL>", emailcontent)
room.saveConfig()
time.sleep(1)
current_time += datetime.timedelta(minutes=10, seconds=0)
loopcount += 1
if loopcount % 10 == 0:
from numpy import savetxt
loopcount = 0
data0 = datacontainer[0, :]
data1 = datacontainer[1, :]
data2 = datacontainer[2, :]
data3 = datacontainer[3, :]
# save to csv file
savetxt('DATA/hum.csv', data0, delimiter=',')
savetxt('DATA/temp.csv', data1, delimiter=',')
savetxt('DATA/light.csv', data2, delimiter=',')
savetxt('DATA/ir_data.csv', data3, delimiter=',')
``` |
{
"source": "jin/bazel_android_sdk_downloader",
"score": 2
} |
#### File: jin/bazel_android_sdk_downloader/rules.bzl
```python
_SDK_TOOLS_URL = 'https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip'
# Used as both the install target name and package name for shortand inlining, i.e.
# `@androidsdk//install` for `@androidsdk//install:install`
_INSTALL_TARGET_NAME = "install"
# This is the name of the output "file" the install target will produce. This name is chosen to
# look nice when running. saying it is building `bazel-bin/external/androidsdk/install/sdk`.
_INSTALL_OUTPUT_NAME = "sdk"
# Relative path from the execution of `sdkmanager` to install the SDK. A path is required to force
# it to build into the `bazel-bin` directory rather than the transient `bazel-<workspace-name>`
# folder. Without it you must build the downloader repository and your target in the same build
# invocation.
_SDK_ROOT_PATH = '_'
def _android_sdk_repository_impl(repo_ctx):
# Download Android SDK tools
repo_ctx.download_and_extract(_SDK_TOOLS_URL)
# BUILD folder in the root of the generated repository
repo_ctx.file("BUILD", content = "exports_files(['tools/bin/sdkmanager'], visibility = ['//visibility:public'])", executable = False)
# Bazel rules file for the repository. All logic should live here
repo_ctx.file(_INSTALL_TARGET_NAME + "/internal.bzl", content = """
def _install_sdk_impl(ctx):
ctx.actions.write(ctx.outputs._output, '''
{{sdkmanager}} --sdk_root='{path}' 'platforms;android-{api_level}' 'build-tools;{build_tools_version}' 'extras;android;m2repository'
'''.format(sdkmanager = ctx.file._sdk_manager.path), is_executable = True)
runfiles = ctx.runfiles(files = [ctx.file._sdk_manager])
return [DefaultInfo(executable = ctx.outputs._output, runfiles = runfiles)]
install_sdk = rule(
attrs = {{
"_sdk_manager": attr.label(
default = Label("//:tools/bin/sdkmanager"),
allow_single_file = True,
executable = True,
cfg = "host",
),
}},
outputs = {{
"_output": "{output_name}",
}},
implementation = _install_sdk_impl,
executable = True,
)
""".format(
output_name = _INSTALL_OUTPUT_NAME,
api_level = repo_ctx.attr.api_level,
build_tools_version = repo_ctx.attr.build_tools_version,
path = _SDK_ROOT_PATH),
executable = False)
# BUILD file for the single target of the repository. If the target name is the same as the
# package, you can `bazel run` ` @repo//target` as a shorthand for `@repo//target:target`.
repo_ctx.file(_INSTALL_TARGET_NAME + "/BUILD", content = """
load(":internal.bzl", "install_sdk")
install_sdk(
name = "{name}",
)
""".format(name = _INSTALL_TARGET_NAME), executable = False)
_android_sdk_repository = repository_rule(
implementation = _android_sdk_repository_impl,
local = False,
attrs = {
"api_level": attr.int(mandatory = True),
"build_tools_version": attr.string(mandatory = True),
}
)
# This is the main export for the file
def android_sdk_repository(name = None, workspace_name = None, api_level = None, build_tools_version = None):
# Support downloading the SDK as a repository (inspired by `@yarn//yarn` )
_android_sdk_repository(
name = name,
api_level = api_level,
build_tools_version = build_tools_version,
)
# Create an android_sdk_repository targetting the downloaded repo
# The path is long and convoluted because the SDK is downloaded into the runfiles of a bin target
native.android_sdk_repository(
name = "android_sdk_repository_" + name,
path = "bazel-bin/external/{name}/{install_target_name}/{install_output_name}.runfiles/{workspace_name}/{sdk_root_path}".format(
name = name,
install_target_name = _INSTALL_TARGET_NAME,
install_output_name = _INSTALL_OUTPUT_NAME,
workspace_name = workspace_name,
sdk_root_path = _SDK_ROOT_PATH),
api_level = api_level,
build_tools_version = build_tools_version,
)
``` |
{
"source": "jin/bazel-integration-testing",
"score": 2
} |
#### File: bazel-integration-testing/bazel_integration_test/test_base_test.py
```python
import os
import unittest
from bazel_integration_test import test_base
class TestBaseTest(test_base.TestBase):
def testVersion(self):
self.ScratchFile('WORKSPACE')
exit_code, stdout, stderr = self.RunBazel(['info', 'release'])
self.AssertExitCode(exit_code, 0, stderr)
self.assertTrue(("release " + self.bazelVersion) in stdout[0])
if __name__ == '__main__':
unittest.main()
```
#### File: bazel-integration-testing/tools/repositories.bzl
```python
load("//tools:bazel_hash_dict.bzl", "BAZEL_HASH_DICT")
load(":common.bzl", "BAZEL_VERSIONS")
_BAZEL_BINARY_PACKAGES = [
"http://releases.bazel.build/{version}/release/bazel-{version}{installer}-{platform}.{extension}",
"https://github.com/bazelbuild/bazel/releases/download/{version}/bazel-{version}{installer}-{platform}.{extension}",
]
def _get_platform_name(rctx):
os_name = rctx.os.name.lower()
if os_name.startswith("mac os"):
return "darwin-x86_64"
if os_name.startswith("windows"):
return "windows-x86_64"
# We default on linux-x86_64 because we only support 3 platforms
return "linux-x86_64"
def _is_windows(rctx):
return _get_platform_name(rctx).startswith("windows")
def _get_installer(rctx):
platform = _get_platform_name(rctx)
version = rctx.attr.version
if _is_windows(rctx):
extension = "zip"
installer = ""
else:
extension = "sh"
installer = "-installer"
urls = [url.format(version=version, installer=installer, platform=platform, extension=extension) for url in _BAZEL_BINARY_PACKAGES]
args = {"url": urls, "type": "zip"}
if version in BAZEL_HASH_DICT and platform in BAZEL_HASH_DICT[version]:
args["sha256"] = BAZEL_HASH_DICT[version][platform]
rctx.download_and_extract(**args)
def _bazel_repository_impl(rctx):
_get_installer(rctx)
rctx.file("WORKSPACE", "workspace(name='%s')" % rctx.attr.name)
rctx.file("BUILD", """
filegroup(
name = "bazel_binary",
srcs = select({
"@bazel_tools//src/conditions:windows" : ["bazel.exe"],
"//conditions:default": ["bazel-real","bazel"],
}),
visibility = ["//visibility:public"])""")
bazel_binary = repository_rule(
attrs = {
"version": attr.string(default = "0.5.3"),
},
implementation = _bazel_repository_impl,
)
"""Download a bazel binary for integration test.
Args:
version: the version of Bazel to download.
Limitation: only support Linux and macOS for now.
"""
def bazel_binaries(versions = BAZEL_VERSIONS):
"""Download all bazel binaries specified in BAZEL_VERSIONS."""
for version in versions:
name = "build_bazel_bazel_" + version.replace(".", "_")
if not native.existing_rule(name):
bazel_binary(name = name, version = version)
``` |
{
"source": "jinbeibei/VarNet-Exploring-Variations-for-Unsupervised-Video-Prediction",
"score": 3
} |
#### File: VarNet-Exploring-Variations-for-Unsupervised-Video-Prediction/models/network.py
```python
import os
import tensorflow as tf
from BasicConvLSTMCell import BasicConvLSTMCell
from operations import *
from utils import *
class NETWORK(object):
def __init__(self, image_size, batch_size=32, c_dim=3,
K=10, T=10, checkpoint_dir=None, is_train=True):
self.batch_size = batch_size
self.image_size = image_size
self.is_train = is_train
self.gf_dim = 64
self.df_dim = 64
self.c_dim = c_dim
self.K = K
self.T = T
self.diff_shape = [batch_size, self.image_size[0],
self.image_size[1], K - 1, 1]
self.xt_shape = [batch_size, self.image_size[0], self.image_size[1], c_dim]
self.target_shape = [batch_size, self.image_size[0], self.image_size[1],
K + T, c_dim]
self.build_model()
def build_model(self):
self.diff_in = tf.placeholder(tf.float32, self.diff_shape, name='diff_in')
self.xt = tf.placeholder(tf.float32, self.xt_shape, name='xt')
self.target = tf.placeholder(tf.float32, self.target_shape, name='target')
cell = BasicConvLSTMCell([self.image_size[0] / 8, self.image_size[1] / 8],
[3, 3], 256)
pred = self.forward(self.diff_in, self.xt, cell)
self.G = tf.concat(axis=3, values=pred)
if self.is_train:
true_sim = inverse_transform(self.target[:, :, :, self.K:, :])
if self.c_dim == 1: true_sim = tf.tile(true_sim, [1, 1, 1, 1, 3])
true_sim = tf.reshape(tf.transpose(true_sim, [0, 3, 1, 2, 4]),
[-1, self.image_size[0],
self.image_size[1], 3])
gen_sim = inverse_transform(self.G)
if self.c_dim == 1: gen_sim = tf.tile(gen_sim, [1, 1, 1, 1, 3])
gen_sim = tf.reshape(tf.transpose(gen_sim, [0, 3, 1, 2, 4]),
[-1, self.image_size[0],
self.image_size[1], 3])
binput = tf.reshape(self.target[:, :, :, :self.K, :],
[self.batch_size, self.image_size[0],
self.image_size[1], -1])
btarget = tf.reshape(self.target[:, :, :, self.K:, :],
[self.batch_size, self.image_size[0],
self.image_size[1], -1])
bgen = tf.reshape(self.G, [self.batch_size,
self.image_size[0],
self.image_size[1], -1])
target_data = tf.concat(axis=3, values=[binput, btarget])
gen_data = tf.concat(axis=3, values=[binput, bgen])
with tf.variable_scope("DIS", reuse=False):
self.D, self.D_logits = self.discriminator(target_data)
with tf.variable_scope("DIS", reuse=True):
self.D_, self.D_logits_ = self.discriminator(gen_data)
self.L_p = tf.reduce_mean(
tf.square(self.G - self.target[:, :, :, self.K:, :])
)
self.L_gdl = gdl(gen_sim, true_sim, 1.)
self.L_img = self.L_p + self.L_gdl
self.d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_logits, labels=tf.ones_like(self.D)
)
)
self.d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_logits_, labels=tf.zeros_like(self.D_)
)
)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.L_GAN = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_logits_, labels=tf.ones_like(self.D_)
)
)
self.loss_sum = tf.summary.scalar("L_img", self.L_img)
self.L_p_sum = tf.summary.scalar("L_p", self.L_p)
self.L_gdl_sum = tf.summary.scalar("L_gdl", self.L_gdl)
self.L_GAN_sum = tf.summary.scalar("L_GAN", self.L_GAN)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.t_vars = tf.trainable_variables()
self.g_vars = [var for var in self.t_vars if 'DIS' not in var.name]
self.d_vars = [var for var in self.t_vars if 'DIS' in var.name]
num_param = 0.0
for var in self.g_vars:
num_param += int(np.prod(var.get_shape()));
print("Number of parameters: %d" % num_param)
self.saver = tf.train.Saver(max_to_keep=10)
def forward(self, diff_in, xt, cell):
# Initial state
state = tf.zeros([self.batch_size, self.image_size[0] / 8,
self.image_size[1] / 8, 512])
reuse = False
# Encoder
for t in xrange(self.K - 1):
enc_h, res_encoder = self.encoder_vgg(diff_in[:, :, :, t, :], reuse=reuse)
h_dyn, state = cell(enc_h, state, scope='lstm', reuse=reuse)
reuse = True
pred = []
# Decoder
for t in xrange(self.T):
if t == 0:
diff_hat = self.decoder_vgg(h_dyn, res_encoder, reuse=False)
else:
enc_h, res_encoder = self.encoder_vgg(diff_in, reuse=True)
h_dyn, state = cell(enc_h, state, scope='lstm', reuse=True)
diff_hat = self.decoder_vgg(h_dyn, res_encoder, reuse=True)
x_hat = diff_hat + xt
diff_in = inverse_transform(diff_hat)
xt = x_hat
pred.append(tf.reshape(x_hat, [self.batch_size, self.image_size[0],
self.image_size[1], 1, self.c_dim]))
return pred
def encoder_vgg(self, diff_in, xt, reuse):
res_in = []
conv1_1 = relu(conv2d(xt, output_dim=self.gf_dim, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv1_1', reuse=reuse))
conv1_2 = relu(conv2d(conv1_1, output_dim=self.gf_dim, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv1_2', reuse=reuse))
res_in.append(conv1_2)
pool1 = MaxPooling(conv1_2, [2, 2])
conv2_1 = relu(conv2d(pool1, output_dim=self.gf_dim * 2, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv2_1', reuse=reuse))
conv2_2 = relu(conv2d(conv2_1, output_dim=self.gf_dim * 2, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv2_2', reuse=reuse))
res_in.append(conv2_2)
pool2 = MaxPooling(conv2_2, [2, 2])
conv3_1 = relu(conv2d(pool2, output_dim=self.gf_dim * 4, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv3_1', reuse=reuse))
conv3_2 = relu(conv2d(conv3_1, output_dim=self.gf_dim * 4, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv3_2', reuse=reuse))
conv3_3 = relu(conv2d(conv3_2, output_dim=self.gf_dim * 4, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv3_3', reuse=reuse))
res_in.append(conv3_3)
conv4_1 = relu(conv2d(conv3_3, output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv4_1', reuse=reuse))
conv4_2 = relu(conv2d(conv4_1 , output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv4_2', reuse=reuse))
conv4_3 = relu(conv2d(conv4_2, output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv4_3', reuse=reuse))
res_in.append(conv4_3)
conv5_1 = relu(conv2d(conv4_3, output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv5_1', reuse=reuse))
conv5_2 = relu(conv2d(conv5_1, output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv5_2', reuse=reuse))
conv5_3 = relu(conv2d(conv5_2, output_dim=self.gf_dim * 8, k_h=3, k_w=3,
d_h=1, d_w=1, name='encoder_vgg_conv5_3', reuse=reuse))
res_in.append(conv5_3)
pool5 = MaxPooling(conv4_3, [2, 2])
return pool5, res_in
def decoder_vgg(self, h_comb, res_connect, reuse=False):
shapel5 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 8]
shapeout5 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 8]
depool5 = FixedUnPooling(h_comb, [2, 2])
deconv5_3 = relu(deconv2d(relu(tf.add(depool5, res_connect[4])),
output_shape=shapel5, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv5_3', reuse=reuse))
deconv5_2 = relu(deconv2d(deconv5_3, output_shape=shapel5, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv5_2', reuse=reuse))
deconv5_1 = relu(deconv2d(deconv5_2, output_shape=shapeout5, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv5_1', reuse=reuse))
shapel4 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 8]
shapeout4 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 4]
deconv4_3 = relu(deconv2d(relu(tf.add(deconv5_1, res_connect[3])),
output_shape=shapel4, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv4_3', reuse=reuse))
deconv4_2 = relu(deconv2d(deconv4_3, output_shape=shapel4, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv4_2', reuse=reuse))
deconv4_1 = relu(deconv2d(deconv4_2, output_shape=shapeout4, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv4_1', reuse=reuse))
shapel3 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 4]
shapeout3 = [self.batch_size, self.image_size[0] / 4,
self.image_size[1] / 4, self.gf_dim * 2]
deconv3_3 = relu(deconv2d(relu(tf.add(deconv4_1, res_connect[2])),
output_shape=shapel3, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv3_3', reuse=reuse))
deconv3_2 = relu(deconv2d(deconv3_3, output_shape=shapel3, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv3_2', reuse=reuse))
deconv3_1 = relu(deconv2d(deconv3_2, output_shape=shapeout3, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv3_1', reuse=reuse))
shapel2 = [self.batch_size, self.image_size[0] / 2,
self.image_size[1] / 2, self.gf_dim * 2]
shapeout2 = [self.batch_size, self.image_size[0] / 2,
self.image_size[1] / 2, self.gf_dim]
depool2 = FixedUnPooling(deconv3_1, [2, 2])
deconv2_2 = relu(deconv2d(relu(tf.add(depool2, res_connect[1])),
output_shape=shapel2, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv2_2', reuse=reuse))
deconv2_1 = relu(deconv2d(deconv2_2, output_shape=shapeout2, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv2_1', reuse=reuse))
shapel1 = [self.batch_size, self.image_size[0],
self.image_size[1], self.gf_dim]
shapeout1 = [self.batch_size, self.image_size[0],
self.image_size[1], self.c_dim]
depool1 = FixedUnPooling(deconv2_1, [2, 2])
deconv1_2 = relu(deconv2d(relu(tf.add(depool1, res_connect[0])),
output_shape=shapel1, k_h=3, k_w=3, d_h=1, d_w=1,
name='decoder_vgg_deconv1_2', reuse=reuse))
xtp1 = tanh(deconv2d(deconv1_2, output_shape=shapeout1, k_h=3, k_w=3,
d_h=1, d_w=1, name='decoder_vgg_deconv1_1', reuse=reuse))
return xtp1
def discriminator(self, image):
h0 = lrelu(conv2d(image, self.df_dim, name='dis0_conv'))
h1_0 = lrelu(batch_norm(conv2d(h0, self.df_dim * 2, name='dis1_0_conv'),"bn1_0"))
h1_1 = lrelu(batch_norm(conv2d(h1_0, self.df_dim * 2, name='dis1_1_conv'),"bn1_1"))
h2_0 = lrelu(batch_norm(conv2d(h1_1, self.df_dim * 4, name='dis2_0_conv'),"bn2_0"))
h2_1 = lrelu(batch_norm(conv2d(h2_0, self.df_dim * 4, name='dis2_1_conv'), "bn2_1"))
h3 = lrelu(batch_norm(conv2d(h2_1, self.df_dim * 8, name='dis_h3_conv'), "bn3"))
h = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'dis_h3_linear')
return tf.nn.sigmoid(h), h
def save(self, sess, checkpoint_dir, step):
model_name = "NETWORK.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, sess, checkpoint_dir, model_name=None):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
if model_name is None: model_name = ckpt_name
self.saver.restore(sess, os.path.join(checkpoint_dir, model_name))
print(" Loaded model: " + str(model_name))
return True, model_name
else:
return False, None
``` |
{
"source": "jinbeizame007/planet_pybullet",
"score": 3
} |
#### File: planet/networks/conv_ha.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import keras
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from planet import tools
def encoder(obs):
"""Extract deterministic features from an observation."""
obs = obs['image']
hidden = tf.layers.dense(obs, 1024, tf.nn.relu)#keras.layers.Dense(500, activation='relu')(obs)
hidden = tf.layers.dense(hidden, 1024, tf.nn.relu)#keras.layers.Dense(500, activation='relu')(hidden)
hidden = tf.layers.dense(hidden, 1024, None)#keras.layers.Dense(1024)(hidden)
return hidden
def decoder(state, data_shape):
"""Compute the data distribution of an observation from its state."""
#hidden = keras.layers.Dense(500, activation='relu')(state)
#hidden = keras.layers.Dense(500, activation='relu')(hidden)
#hidden = keras.layers.Dense(26)(hidden)
hidden = tf.layers.dense(state, 500, tf.nn.relu)
hidden = tf.layers.dense(hidden, 500, tf.nn.relu)
hidden = tf.layers.dense(hidden, 26, None)
mean = hidden
mean = tf.reshape(mean, tools.shape(state)[:-1] + data_shape)
dist = tools.MSEDistribution(mean)
dist = tfd.Independent(dist, len(data_shape))
return dist
``` |
{
"source": "Jinboasltw/FastSurfer",
"score": 2
} |
#### File: FastSurferCNN/models/losses.py
```python
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
class DiceLoss(_Loss):
"""
Dice Loss
"""
def forward(self, output, target, weights=None, ignore_index=None):
"""
:param output: N x C x H x W Variable
:param target: N x C x W LongTensor with starting class at 0
:param weights: C FloatTensor with class wise weights
:param int ignore_index: ignore label with index x in the loss calculation
:return:
"""
eps = 0.001
encoded_target = output.detach() * 0
if ignore_index is not None:
mask = target == ignore_index
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
encoded_target.scatter_(1, target.unsqueeze(1), 1)
if weights is None:
weights = 1
intersection = output * encoded_target
numerator = 2 * intersection.sum(0).sum(1).sum(1)
denominator = output + encoded_target
if ignore_index is not None:
denominator[mask] = 0
denominator = denominator.sum(0).sum(1).sum(1) + eps
loss_per_channel = weights * (1 - (numerator / denominator)) # Channel-wise weights
return loss_per_channel.sum() / output.size(1)
class CrossEntropy2D(nn.Module):
"""
2D Cross-entropy loss implemented as negative log likelihood
"""
def __init__(self, weight=None, reduction='none'):
super(CrossEntropy2D, self).__init__()
self.nll_loss = nn.CrossEntropyLoss(weight=weight, reduction=reduction)
def forward(self, inputs, targets):
return self.nll_loss(inputs, targets)
class CombinedLoss(nn.Module):
"""
For CrossEntropy the input has to be a long tensor
Args:
-- inputx N x C x H x W
-- target - N x H x W - int type
-- weight - N x H x W - float
"""
def __init__(self, weight_dice=1, weight_ce=1):
super(CombinedLoss, self).__init__()
self.cross_entropy_loss = CrossEntropy2D()
self.dice_loss = DiceLoss()
self.weight_dice = weight_dice
self.weight_ce = weight_ce
def forward(self, inputx, target, weight):
target = target.type(torch.LongTensor) # Typecast to long tensor
if inputx.is_cuda:
target = target.cuda()
input_soft = F.softmax(inputx, dim=1) # Along Class Dimension
dice_val = torch.mean(self.dice_loss(input_soft, target))
ce_val = torch.mean(torch.mul(self.cross_entropy_loss.forward(inputx, target), weight))
total_loss = torch.add(torch.mul(dice_val, self.weight_dice), torch.mul(ce_val, self.weight_ce))
return total_loss, dice_val, ce_val
```
#### File: FastSurfer/recon_surf/paint_cc_into_pred.py
```python
import nibabel as nib
import sys
import argparse
HELPTEXT = """
Script to add corpus callosum segmentation (CC, FreeSurfer IDs 251-255) to
deep-learning prediction (e.g. aparc.DKTatlas+aseg.deep.mgz).
USAGE:
paint_cc_into_pred -in_cc <input_seg_with_cc> -in_pred <input_seg_without_cc> -out <output_seg>
Dependencies:
Python 3.5
Nibabel to read and write FreeSurfer data
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jul-10-2020
"""
def argument_parse():
"""
Command line option parser for reduce_to_aseg.py
"""
parser = argparse.ArgumentParser(usage=HELPTEXT)
parser.add_argument('--input_cc', '-in_cc', dest='input_cc',
help="path to input segmentation with Corpus Callosum (IDs 251-255 in FreeSurfer space)")
parser.add_argument('--input_pred', '-in_pred', dest='input_pred',
help="path to input segmentation Corpus Callosum shoud be added to.")
parser.add_argument('--output', '-out', dest='output', help="path to output (input segmentation + added CC)")
args = parser.parse_args()
if args.input_cc is None or args.input_pred is None or args.output is None:
sys.exit('ERROR: Please specify input and output segmentations')
return args
def paint_in_cc(pred, aseg_cc):
"""
Function to paint corpus callosum segmentation into prediction. Note, that this function
modifies the original array and does not create a copy.
:param np.ndarray pred: deep-learning prediction
:param np.ndarray aseg_cc: aseg segmentation with CC
:return np.ndarray: prediction with added CC
"""
cc_mask = (aseg_cc >= 251) & (aseg_cc <= 255)
pred[cc_mask] = aseg_cc[cc_mask]
return pred
if __name__ == "__main__":
# Command Line options are error checking done here
options = argument_parse()
print("Reading inputs: {} {}...".format(options.input_cc, options.input_pred))
aseg_image = nib.load(options.input_cc).get_data()
prediction = nib.load(options.input_pred)
pred_with_cc = paint_in_cc(prediction.get_data(), aseg_image)
print ("Writing segmentation with corpus callosum to: {}".format(options.output))
pred_with_cc_fin = nib.MGHImage(pred_with_cc, prediction.affine, prediction.header)
pred_with_cc_fin.to_filename(options.output)
sys.exit(0)
```
#### File: FastSurfer/recon_surf/smooth_aparc.py
```python
import optparse
import sys
import numpy as np
import nibabel.freesurfer.io as fs
from read_geometry import read_geometry
from scipy import sparse
HELPTEXT = """
Script to fill holes and smooth aparc labels.
USAGE:
smooth_aparc --insurf <surf> --inaparc <in_aparc> --incort <cortex.lable> --outaparc <out_aparc>
Dependencies:
Python 3.5
Numpy
http://www.numpy.org
Nibabel to read and write FreeSurfer surface meshes
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jul-24-2018
"""
h_inaparc = 'path to input aparc'
h_incort = 'path to input cortex label'
h_insurf = 'path to input surface'
h_outaparc = 'path to ouput aparc'
def options_parse():
"""
Command line option parser
"""
parser = optparse.OptionParser(version='$Id: smooth_aparc,v 1.0 2018/06/24 11:34:08 mreuter Exp $', usage=HELPTEXT)
parser.add_option('--insurf', dest='insurf', help=h_insurf)
parser.add_option('--incort', dest='incort', help=h_incort)
parser.add_option('--inaparc', dest='inaparc', help=h_inaparc)
parser.add_option('--outaparc', dest='outaparc', help=h_outaparc)
(options, args) = parser.parse_args()
if options.insurf is None or options.inaparc is None or options.outaparc is None:
sys.exit('ERROR: Please specify input surface, input and output aparc')
return options
def get_adjM(trias, n):
I = trias
J = I[:, [1, 2, 0]]
# flatten
I = I.flatten()
J = J.flatten()
adj = sparse.csr_matrix((np.ones(I.shape, dtype=bool), (I, J)), shape=(n, n))
# if max adj is > 1 we have non manifold or mesh trias are not oriented
# if matrix is not symmetric, we have a boundary
# in case we have boundary, make sure this is a symmetric matrix
adjM = (adj+adj.transpose()).astype(bool)
return adjM
def bincount2D_vectorized(a):
N = a.max()+1
a_offs = a + np.arange(a.shape[0])[:, None]*N
return np.bincount(a_offs.ravel(), minlength=a.shape[0]*N).reshape(-1, N)
def mode_filter(adjM, labels, fillonlylabel="", novote=[]):
# make sure labels lengths equals adjM dimension
n = labels.shape[0]
if n != adjM.shape[0] or n != adjM.shape[1]:
sys.exit("ERROR mode_filter: adjM size "+format(adjM.shape)+" does not match label length "+format(labels.shape))
# remove rows with only a single entry from adjM
# if we removed some triangles, we may have isolated vertices
# adding the eye to adjM will produce these entries
# since they are neighbors to themselves, this adds
# values to nlabels below that we don't want
counts = np.diff(adjM.indptr)
rows = np.where(counts == 1)
pos = adjM.indptr[rows]
adjM.data[pos] = 0
adjM.eliminate_zeros()
# for num rings exponentiate adjM and add adjM from step before
# we currently do this outside of mode_filter
# new labels will be the same as old almost everywhere
labels_new = labels
# find vertices to fill
# if fillonlylabels empty, fill all
if not fillonlylabel:
ids = np.arange(0, n)
else:
# select the ones with the labels
ids = np.where(labels == fillonlylabel)[0]
if ids.size == 0:
print("WARNING: No ids found with idx "+str(fillonlylabel)+" ... continue")
return labels
# of all ids to fill, find neighbors
nbrs = adjM[ids, :]
# get vertex ids (I, J ) of each edge in nbrs
[I, J, V] = sparse.find(nbrs)
# check if we have neighbors with -1 or 0
# this could produce problems in the loop below, so lets stop for now:
nlabels = labels[J]
if any(nlabels == -1) or any(nlabels == 0):
sys.exit("there are -1 or 0 labels in neighbors!")
# create sparse matrix with labels at neighbors
nlabels = sparse.csr_matrix((labels[J], (I, J)))
#print("nlabels: {}".format(nlabels))
from scipy.stats import mode
if not isinstance(nlabels, sparse.csr_matrix):
raise ValueError('Matrix must be CSR format.')
#novote = [-1,0,fillonlylabel]
# get rid of rows that have uniform vote (or are empty)
# for this to work no negative numbers should exist
# get row counts, max and sums
rmax = nlabels.max(1).A.squeeze()
sums = nlabels.sum(axis=1).A1
counts = np.diff(nlabels.indptr)
# then keep rows where max*counts differs from sums
rmax = np.multiply(rmax, counts)
rows = np.where(rmax != sums)[0]
print("rows: "+str(nlabels.shape[0])+" reduced to "+str(rows.size))
# Only after fixing the rows above, we can
# get rid of entries that should not vote
# since we have only rows that were non-uniform, they should not become empty
# rows may become unform: we still need to vote below to update this label
if novote:
rr = np.in1d(nlabels.data, novote)
nlabels.data[rr] = 0
nlabels.eliminate_zeros()
# run over all rows and compute mode (maybe vectorize later)
rempty = 0
for row in rows:
rvals = nlabels.data[nlabels.indptr[row]:nlabels.indptr[row+1]]
if rvals.size == 0:
rempty += 1
continue
#print(str(rvals))
mvals = mode(rvals)[0]
#print(str(mvals))
if mvals.size != 0:
#print(str(row)+' '+str(ids[row])+' '+str(mvals[0]))
labels_new[ids[row]] = mvals[0]
if rempty > 0:
# should not happen
print("WARNING: row empty: "+str(rempty))
#nbrs=np.squeeze(np.asarray(nbrs.todense())) # sparse matrix to dense matrix to np.array
#nlabels=labels[nbrs]
#counts = np.bincount(nlabels)
#vote=np.argmax(counts)
return labels_new
def smooth_aparc(insurfname, inaparcname, incortexname, outaparcname):
""" (string) -> None
smoothes aparc
"""
# read input files
print("Reading in surface: {} ...".format(insurfname))
surf = read_geometry(insurfname, read_metadata=True)
print("Reading in annotation: {} ...".format(inaparcname))
aparc = fs.read_annot(inaparcname)
print("Reading in cortex label: {} ...".format(incortexname))
cortex = fs.read_label(incortexname)
# set labels (n) and triangles (n x 3)
labels = aparc[0]
faces = surf[1]
nvert = labels.size
if labels.size != surf[0].shape[0]:
sys.exit("ERROR smooth_aparc: vertec count "+format(surf[0].shape[0])+" does not match label length "+format(labels.size))
# Compute Cortex Mask
mask = np.zeros(labels.shape, dtype=bool)
mask[cortex] = True
# check if we have places where non-cortex has some labels
noncortnum=np.where(~mask & (labels != -1))
print("Non-cortex vertices with labels: "+str(noncortnum[0].size)) # num of places where non cortex has some real labels
# here we need to decide how to deal with them
# either we set everything outside cortex to -1 (the FS way)
# or we keep these real labels and allow them to vote, maybe even shrink cortex label? Probably not.
# get non-cortex ids (here we could subtract the ids that have a real label)
# for now we remove everything outside cortex
noncortids = np.where(~mask)
# remove triangles where one vertex is non-cortex to avoid these edges to vote on neighbors later
rr = np.in1d(faces, noncortids)
rr = np.reshape(rr, faces.shape)
rr = np.amax(rr, 1)
faces = faces[~rr, :]
# get Edge matrix (adjacency)
adjM = get_adjM(faces, nvert)
# add identity so that each vertex votes in the mode filter below
adjM = adjM + sparse.eye(adjM.shape[0])
#print("adj shape: {}".format(adjM.shape))
#print("v shape: {}".format(surf[0].shape))
#print("labels shape: {}".format(labels.size))
#print("labels: {}".format(labels))
#print("minlab: "+str(np.min(labels))+" maxlab: "+str(np.max(labels)))
# set all labels inside cortex that are -1 or 0 to fill label
fillonlylabel = np.max(labels)+1
labels[mask & (labels == -1)] = fillonlylabel
labels[mask & (labels == 0)] = fillonlylabel
# now we do not have any -1 or 0 (except 0 outside of cortex)
# FILL HOLES
ids = np.where(labels == fillonlylabel)[0]
counter = 1
idssize = ids.size
while idssize != 0:
print("Fill Round: "+str(counter))
labels_new = mode_filter(adjM, labels, fillonlylabel, np.array([fillonlylabel]))
labels = labels_new
ids = np.where(labels == fillonlylabel)[0]
if ids.size == idssize:
# no more improvement, strange could be an island in the cortex label that cannot be filled
print("Warning: Cannot improve but still have holes. Maybe there is an island in the cortex label that cannot be filled with real labels.")
fillids = np.where(labels == fillonlylabel)[0]
labels[fillids] = 0
rr = np.in1d(faces, fillids)
rr = np.reshape(rr, faces.shape)
rr = np.amax(rr, 1)
faces = faces[~rr, :]
# get Edge matrix (adjacency)
adjM = get_adjM(faces, nvert)
# add identity so that each vertex votes in the mode filter below
adjM = adjM + sparse.eye(adjM.shape[0])
break
idssize = ids.size
counter += 1
# SMOOTH other labels (first with wider kernel then again fine-tune):
labels = mode_filter(adjM*adjM, labels)
labels = mode_filter(adjM, labels)
# set labels outside cortex to -1
labels[~mask] = -1
print ("Outputing fixed annot: {}".format(outaparcname))
fs.write_annot(outaparcname, labels, aparc[1], aparc[2])
if __name__ == "__main__":
# Command Line options are error checking done here
options = options_parse()
smooth_aparc(options.insurf, options.inaparc, options.incort, options.outaparc)
sys.exit(0)
``` |
{
"source": "jinboci/gluon-cv",
"score": 2
} |
#### File: scripts/depth/options.py
```python
import os
import argparse
import mxnet as mx
class MonodepthOptions:
def __init__(self):
self.parser = argparse.ArgumentParser(description="Monodepthv2 options")
# PATHS
self.parser.add_argument("--data_path",
type=str,
help="path to the training data",
default=os.path.join(os.path.expanduser("~"),
".mxnet/datasets/kitti", "kitti_data"))
self.parser.add_argument("--log_dir",
type=str,
help="log directory",
default=os.path.join(os.path.expanduser("."), "tmp"))
# MODEL options
self.parser.add_argument('--model_zoo', type=str, default=None,
help='choose depth model from model zoo model')
self.parser.add_argument('--model_zoo_pose', type=str, default=None,
help='choose pose model from model zoo model')
self.parser.add_argument('--pretrained_type',
type=str,
help="use gluoncv pretrained model or customer model",
choices=["gluoncv", "customer"],
default="customer")
self.parser.add_argument('--pretrained_base', action="store_true",
help='whether to use pretrained resnet')
self.parser.add_argument("--scales",
nargs="+",
type=int,
help="scales used in the loss",
default=[0, 1, 2, 3])
self.parser.add_argument('--hybridize', action="store_true",
help='whether to turn on model hybridization')
# DATA options
self.parser.add_argument("--split",
type=str,
help="which training split to use",
choices=["eigen_zhou", "eigen_full", "odom", "benchmark"],
default="eigen_zhou")
self.parser.add_argument("--dataset",
type=str,
help="dataset to train on",
default="kitti",
choices=["kitti", "kitti_odom", "kitti_depth", "kitti_test"])
self.parser.add_argument("--png",
help="if set, trains from raw KITTI png files (instead of jpgs)",
action="store_true")
self.parser.add_argument("--height",
type=int,
help="input image height",
default=192)
self.parser.add_argument("--width",
type=int,
help="input image width",
default=640)
self.parser.add_argument("--disparity_smoothness",
type=float,
help="disparity smoothness weight",
default=1e-3)
self.parser.add_argument("--min_depth",
type=float,
help="minimum depth",
default=0.1)
self.parser.add_argument("--max_depth",
type=float,
help="maximum depth",
default=100.0)
self.parser.add_argument("--use_stereo",
help="if set, uses stereo pair for training",
action="store_true")
self.parser.add_argument("--frame_ids",
nargs="+",
type=int,
help="frames to load",
default=[0, -1, 1])
# OPTIMIZATION options
self.parser.add_argument("--batch_size",
type=int,
help="batch size",
default=12)
self.parser.add_argument("--learning_rate",
type=float,
help="learning rate",
default=1e-4)
self.parser.add_argument("--num_epochs",
type=int,
help="number of epochs",
default=20)
self.parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='start epochs (default:0)')
self.parser.add_argument('--warmup_epochs', type=int, default=0,
help='number of warmup epochs.')
self.parser.add_argument("--scheduler_step_size",
type=int,
help="step size of the scheduler",
default=15)
# ABLATION options
self.parser.add_argument("--v1_multiscale",
help="if set, uses monodepth v1 multiscale",
action="store_true")
self.parser.add_argument("--avg_reprojection",
help="if set, uses average reprojection loss",
action="store_true")
self.parser.add_argument("--disable_automasking",
help="if set, doesn't do auto-masking",
action="store_true")
self.parser.add_argument("--no_ssim",
help="if set, disables ssim in the loss",
action="store_true")
self.parser.add_argument("--pose_model_input",
type=str,
help="how many images the pose network gets",
default="pairs",
choices=["pairs", "all"])
# SYSTEM options
self.parser.add_argument("--no_gpu",
help="if set disables gpu",
action="store_true")
self.parser.add_argument('--gpu', type=int,
default=0,
help='GPU index')
self.parser.add_argument("--num_workers",
type=int,
help="number of dataloader workers",
default=12)
# LOADING options
self.parser.add_argument("--load_weights_folder",
type=str,
help="name of model to load")
self.parser.add_argument('--eval_model', type=str, default=None,
help='the name of evaluation model')
self.parser.add_argument('--resume_depth', type=str, default=None,
help='put the path to depthnet resuming file if needed')
self.parser.add_argument('--resume_pose', type=str, default=None,
help='put the path to posenet resuming file if needed')
# LOGGING options
self.parser.add_argument("--log_frequency",
type=int,
help="number of batches between each tensorboard log",
default=250)
self.parser.add_argument("--save_frequency",
type=int,
help="number of epochs between each save",
default=1)
# EVALUATION options
self.parser.add_argument("--eval_stereo",
help="if set evaluates in stereo mode",
action="store_true")
self.parser.add_argument("--eval_mono",
help="if set evaluates in mono mode",
action="store_true")
self.parser.add_argument("--disable_median_scaling",
help="if set disables median scaling in evaluation",
action="store_true")
self.parser.add_argument("--pred_depth_scale_factor",
help="if set multiplies predictions by this number",
type=float,
default=1)
self.parser.add_argument("--ext_disp_to_eval",
type=str,
help="optional path to a .npy disparities file to evaluate")
self.parser.add_argument("--eval_split",
type=str,
default="eigen",
choices=[
"eigen", "eigen_benchmark", "benchmark", "odom_9", "odom_10"],
help="which split to run eval on")
self.parser.add_argument("--save_pred_disps",
help="if set saves predicted disparities",
action="store_true")
self.parser.add_argument("--no_eval",
help="if set disables evaluation",
action="store_true")
self.parser.add_argument("--eval_eigen_to_benchmark",
help="if set assume we are loading eigen results from npy but "
"we want to evaluate using the new benchmark.",
action="store_true")
def parse(self):
self.options = self.parser.parse_args()
# logging and checkpoint saving
if not (self.options.eval_mono or self.options.eval_stereo):
if self.options.model_zoo_pose and self.options.model_zoo:
log_path = os.path.join(self.options.log_dir, self.options.model_zoo)
if not os.path.exists(log_path):
os.makedirs(log_path)
self.options.ctx = [mx.gpu(self.options.gpu)]
if self.options.no_gpu:
self.options.ctx = [mx.cpu(0)]
return self.options
```
#### File: scripts/depth/trainer.py
```python
from __future__ import absolute_import, division, print_function
import os
import sys
import shutil
import copy
from tqdm import tqdm
import numpy as np
import json
import mxnet as mx
from mxnet import gluon, autograd
from gluoncv.data import KITTIRAWDataset, KITTIOdomDataset
from gluoncv.data.kitti.kitti_utils import dict_batchify_fn, readlines
from gluoncv.model_zoo import get_model
from gluoncv.model_zoo.monodepthv2.layers import *
from gluoncv.model_zoo.monodepthv2 import MonoDepth2PoseNet
from gluoncv.utils import LRScheduler, LRSequential
# Models which were trained with stereo supervision were trained with a nominal
# baseline of 0.1 units. The KITTI rig has a baseline of 54cm. Therefore,
# to convert our stereo predictions to real-world scale we multiply our depths by 5.4.
STEREO_SCALE_FACTOR = 5.4
class Trainer:
def __init__(self, options, logger):
# configuration setting
self.opt = options
self.logger = logger
self.log_path = os.path.join(self.opt.log_dir, self.opt.model_zoo)
# checking height and width are multiples of 32
assert self.opt.height % 32 == 0, "'height' must be a multiple of 32"
assert self.opt.width % 32 == 0, "'width' must be a multiple of 32"
self.num_scales = len(self.opt.scales)
self.num_input_frames = len(self.opt.frame_ids)
assert self.opt.frame_ids[0] == 0, "frame_ids must start with 0"
self.use_pose_net = not (self.opt.use_stereo and self.opt.frame_ids == [0])
if self.opt.use_stereo:
self.opt.frame_ids.append("s")
######################### dataloader #########################
datasets_dict = {"kitti": KITTIRAWDataset,
"kitti_odom": KITTIOdomDataset}
self.dataset = datasets_dict[self.opt.dataset]
fpath = os.path.join(os.path.expanduser("~"), ".mxnet/datasets/kitti",
"splits", self.opt.split, "{}_files.txt")
train_filenames = readlines(fpath.format("train"))
val_filenames = readlines(fpath.format("val"))
img_ext = '.png' if self.opt.png else '.jpg'
train_dataset = self.dataset(
self.opt.data_path, train_filenames, self.opt.height, self.opt.width,
self.opt.frame_ids, num_scales=4, is_train=True, img_ext=img_ext)
self.train_loader = gluon.data.DataLoader(
train_dataset, batch_size=self.opt.batch_size, shuffle=True,
batchify_fn=dict_batchify_fn, num_workers=self.opt.num_workers,
pin_memory=True, last_batch='discard')
val_dataset = self.dataset(
self.opt.data_path, val_filenames, self.opt.height, self.opt.width,
self.opt.frame_ids, num_scales=4, is_train=False, img_ext=img_ext)
self.val_loader = gluon.data.DataLoader(
val_dataset, batch_size=self.opt.batch_size, shuffle=False,
batchify_fn=dict_batchify_fn, num_workers=self.opt.num_workers,
pin_memory=True, last_batch='discard')
################### model initialization ###################
# create depth network
if self.opt.model_zoo is not None:
self.model = get_model(self.opt.model_zoo, pretrained_base=self.opt.pretrained_base,
scales=self.opt.scales, ctx=self.opt.ctx)
else:
assert "Must choose a model from model_zoo, " \
"please provide depth the model_zoo using --model_zoo"
self.logger.info(self.model)
# resume checkpoint if needed
if self.opt.resume_depth is not None:
if os.path.isfile(self.opt.resume_depth):
logger.info('Resume depth model: %s' % self.opt.resume_depth)
self.model.load_parameters(self.opt.resume_depth, ctx=self.opt.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(self.opt.resume_depth))
if self.use_pose_net:
# create pose network
if self.opt.model_zoo_pose is not None:
self.posenet = get_model(
self.opt.model_zoo_pose, pretrained_base=self.opt.pretrained_base,
num_input_images=2, num_input_features=1, num_frames_to_predict_for=2,
ctx=self.opt.ctx)
else:
assert "Must choose a model from model_zoo, " \
"please provide the pose model_zoo_pose using --model_zoo_pose"
self.logger.info(self.posenet)
# resume checkpoint if needed
if self.opt.resume_pose is not None:
if os.path.isfile(self.opt.resume_pose):
logger.info('Resume pose model: %s' % self.opt.resume_pose)
self.model.load_parameters(self.opt.resume_pose, ctx=self.opt.ctx)
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(
self.opt.resume_pose))
if self.opt.hybridize:
self.model.hybridize()
self.posenet.hybridize()
################### optimization setting ###################
self.lr_scheduler_depth = LRSequential([
LRScheduler('step', base_lr=self.opt.learning_rate,
nepochs=self.opt.num_epochs - self.opt.warmup_epochs,
iters_per_epoch=len(train_dataset),
step_epoch=[self.opt.scheduler_step_size - self.opt.warmup_epochs])
])
optimizer_params_depth = {'lr_scheduler': self.lr_scheduler_depth,
'learning_rate': self.opt.learning_rate}
self.depth_optimizer = gluon.Trainer(self.model.collect_params(), 'adam', optimizer_params_depth)
if self.use_pose_net:
self.lr_scheduler_pose = LRSequential([
LRScheduler('step', base_lr=self.opt.learning_rate,
nepochs=self.opt.num_epochs - self.opt.warmup_epochs,
iters_per_epoch=len(train_dataset),
step_epoch=[self.opt.scheduler_step_size - self.opt.warmup_epochs])
])
optimizer_params_pose = {'lr_scheduler': self.lr_scheduler_pose,
'learning_rate': self.opt.learning_rate}
self.pose_optimizer = gluon.Trainer(self.posenet.collect_params(), 'adam', optimizer_params_pose)
print("Training model named:\n ", self.opt.model_zoo)
print("Models are saved to:\n ", self.opt.log_dir)
print("Training is using:\n ", "CPU" if self.opt.ctx[0] is mx.cpu() else "GPU")
################### loss function ###################
if not self.opt.no_ssim:
self.ssim = SSIM()
self.backproject_depth = {}
self.project_3d = {}
for scale in self.opt.scales:
h = self.opt.height // (2 ** scale)
w = self.opt.width // (2 ** scale)
self.backproject_depth[scale] = BackprojectDepth(
self.opt.batch_size, h, w, ctx=self.opt.ctx[0])
self.project_3d[scale] = Project3D(self.opt.batch_size, h, w)
################### metrics ###################
self.depth_metric_names = [
"de/abs_rel", "de/sq_rel", "de/rms", "de/log_rms", "da/a1", "da/a2", "da/a3"]
print("Using split:\n ", self.opt.split)
print("There are {:d} training items and {:d} validation items\n".format(
len(train_dataset), len(val_dataset)))
self.save_opts()
# for save best model
self.best_delta1 = 0
self.best_model = self.model
if self.use_pose_net:
self.best_posenet = self.posenet
def train(self):
"""Run the entire training pipeline
"""
self.logger.info('Starting Epoch: %d' % self.opt.start_epoch)
self.logger.info('Total Epochs: %d' % self.opt.num_epochs)
self.epoch = 0
for self.epoch in range(self.opt.start_epoch, self.opt.num_epochs):
self.run_epoch()
self.val()
# save final model
self.save_model("final")
self.save_model("best")
def run_epoch(self):
"""Run a single epoch of training and validation
"""
print("Training")
tbar = tqdm(self.train_loader)
train_loss = 0.0
for batch_idx, inputs in enumerate(tbar):
with autograd.record(True):
outputs, losses = self.process_batch(inputs)
mx.nd.waitall()
autograd.backward(losses['loss'])
self.depth_optimizer.step(self.opt.batch_size, ignore_stale_grad=True)
if self.use_pose_net:
self.pose_optimizer.step(self.opt.batch_size, ignore_stale_grad=True)
train_loss += losses['loss'].asscalar()
tbar.set_description('Epoch %d, training loss %.3f' %
(self.epoch, train_loss / (batch_idx + 1)))
if batch_idx % self.opt.log_frequency == 0:
self.logger.info('Epoch %d iteration %04d/%04d: training loss %.3f' %
(self.epoch, batch_idx, len(self.train_loader),
train_loss / (batch_idx + 1)))
mx.nd.waitall()
def process_batch(self, inputs, eval_mode=False):
for key, ipt in inputs.items():
inputs[key] = ipt.as_in_context(self.opt.ctx[0])
# prediction disparity map
# Otherwise, we only feed the image with frame_id 0 through the depth encoder
input_img = inputs[("color_aug", 0, 0)]
if eval_mode:
input_img = inputs[("color", 0, 0)]
input_img = input_img.as_in_context(context=self.opt.ctx[0])
decoder_output = self.model(input_img)
# for hybridize mode, the output of HybridBlock must is NDArray or List.
# Here, we have to transfer the output to dict type.
outputs = {}
idx = 0
for i in range(4, -1, -1):
if i in self.opt.scales:
outputs[("disp", i)] = decoder_output[idx]
idx += 1
if eval_mode:
_, depth = disp_to_depth(outputs[("disp", 0)],
self.opt.min_depth, self.opt.max_depth)
outputs[("depth", 0, 0)] = depth
return outputs
if self.use_pose_net:
outputs.update(self.predict_poses(inputs))
# image reconstruction
self.generate_images_pred(inputs, outputs)
# compute loss
losses = self.compute_losses(inputs, outputs)
return outputs, losses
def val(self):
"""Validate the model on a single minibatch
"""
tbar = tqdm(self.val_loader)
depth_metrics = {}
abs_rel, sq_rel, rmse, rmse_log = 0, 0, 0, 0
delta_1, delta_2, delta_3 = 0, 0, 0
for metric in self.depth_metric_names:
depth_metrics[metric] = 0
for i, inputs in enumerate(tbar):
outputs = self.process_batch(inputs, True)
if "depth_gt" in inputs:
self.compute_metrics(inputs, outputs, depth_metrics)
# print evaluation results
abs_rel = depth_metrics['de/abs_rel'] / (i + 1)
sq_rel = depth_metrics['de/sq_rel'] / (i + 1)
rmse = depth_metrics['de/rms'] / (i + 1)
rmse_log = depth_metrics['de/log_rms'] / (i + 1)
delta_1 = depth_metrics['da/a1'] / (i + 1)
delta_2 = depth_metrics['da/a2'] / (i + 1)
delta_3 = depth_metrics['da/a3'] / (i + 1)
tbar.set_description(
'Epoch %d, validation '
'abs_REL: %.3f sq_REL: %.3f '
'RMSE: %.3f, RMSE_log: %.3f '
'Delta_1: %.3f Delta_2: %.3f Delta_2: %.3f' %
(self.epoch, abs_rel, sq_rel, rmse, rmse_log, delta_1, delta_2, delta_3))
else:
print("Cannot find ground truth upon validation dataset!")
return
self.logger.info(
'Epoch %d, validation '
'abs_REL: %.3f sq_REL: %.3f '
'RMSE: %.3f, RMSE_log: %.3f '
'Delta_1: %.3f Delta_2: %.3f Delta_2: %.3f' %
(self.epoch, abs_rel, sq_rel, rmse, rmse_log, delta_1, delta_2, delta_3))
mx.nd.waitall()
if self.epoch % self.opt.save_frequency == 0:
self.save_checkpoint(delta_1)
if delta_1 > self.best_delta1:
self.best_model = self.model
self.best_delta1 = delta_1
if self.use_pose_net:
self.best_posenet = self.posenet
def predict_poses(self, inputs):
outputs = {}
pose_feats = {f_i: inputs["color_aug", f_i, 0] for f_i in self.opt.frame_ids}
for f_i in self.opt.frame_ids[1:]:
if f_i != "s":
# To maintain ordering we always pass frames in temporal order
if f_i < 0:
pose_inputs = [pose_feats[f_i], pose_feats[0]]
else:
pose_inputs = [pose_feats[0], pose_feats[f_i]]
axisangle, translation = self.posenet(mx.nd.concat(*pose_inputs, dim=1))
outputs[("axisangle", 0, f_i)] = axisangle
outputs[("translation", 0, f_i)] = translation
# Invert the matrix if the frame id is negative
outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters(
axisangle[:, 0], translation[:, 0], invert=(f_i < 0))
return outputs
def generate_images_pred(self, inputs, outputs):
for scale in self.opt.scales:
disp = outputs[("disp", scale)]
if self.opt.v1_multiscale:
source_scale = scale
else:
disp = mx.nd.contrib.BilinearResize2D(disp,
height=self.opt.height,
width=self.opt.width)
source_scale = 0
_, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth)
outputs[("depth", 0, scale)] = depth
for i, frame_id in enumerate(self.opt.frame_ids[1:]):
if frame_id == "s":
T = inputs["stereo_T"]
else:
T = outputs[("cam_T_cam", 0, frame_id)]
cam_points = self.backproject_depth[source_scale](depth,
inputs[("inv_K", source_scale)])
pix_coords = self.project_3d[source_scale](cam_points,
inputs[("K", source_scale)],
T)
outputs[("sample", frame_id, scale)] = pix_coords
outputs[("color", frame_id, scale)] = mx.nd.BilinearSampler(
data=inputs[("color", frame_id, source_scale)],
grid=outputs[("sample", frame_id, scale)],
name='sampler')
if not self.opt.disable_automasking:
outputs[("color_identity", frame_id, scale)] = \
inputs[("color", frame_id, source_scale)]
def compute_reprojection_loss(self, pred, target):
"""Computes reprojection loss between a batch of predicted and target images
"""
abs_diff = mx.nd.abs(target - pred)
l1_loss = abs_diff.mean(axis=1, keepdims=True)
if self.opt.no_ssim:
reprojection_loss = l1_loss
else:
ssim_loss = self.ssim(pred, target).mean(axis=1, keepdims=True)
reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss
return reprojection_loss
def compute_losses(self, inputs, outputs):
"""Compute the reprojection and smoothness losses for a minibatch
"""
losses = {}
total_loss = 0
for scale in self.opt.scales:
loss = 0
reprojection_losses = []
if self.opt.v1_multiscale:
source_scale = scale
else:
source_scale = 0
disp = outputs[("disp", scale)]
color = inputs[("color", 0, scale)]
target = inputs[("color", 0, source_scale)]
for frame_id in self.opt.frame_ids[1:]:
pred = outputs[("color", frame_id, scale)]
reprojection_losses.append(self.compute_reprojection_loss(pred, target))
reprojection_losses = mx.nd.concat(*reprojection_losses, dim=1)
if not self.opt.disable_automasking:
identity_reprojection_losses = []
for frame_id in self.opt.frame_ids[1:]:
pred = inputs[("color", frame_id, source_scale)]
identity_reprojection_losses.append(
self.compute_reprojection_loss(pred, target))
identity_reprojection_losses = mx.nd.concat(*identity_reprojection_losses, dim=1)
if self.opt.avg_reprojection:
identity_reprojection_loss = \
identity_reprojection_losses.mean(axis=1, keepdims=True)
else:
# save both images, and do min all at once below
identity_reprojection_loss = identity_reprojection_losses
if self.opt.avg_reprojection:
reprojection_loss = reprojection_losses.mean(axis=1, keepdims=True)
else:
reprojection_loss = reprojection_losses
if not self.opt.disable_automasking:
# add random numbers to break ties
identity_reprojection_loss = \
identity_reprojection_loss + \
mx.nd.random.randn(*identity_reprojection_loss.shape).as_in_context(
identity_reprojection_loss.context) * 0.00001
combined = mx.nd.concat(identity_reprojection_loss, reprojection_loss, dim=1)
else:
combined = reprojection_loss
if combined.shape[1] == 1:
to_optimise = combined
else:
to_optimise = mx.nd.min(data=combined, axis=1)
idxs = mx.nd.argmin(data=combined, axis=1)
if not self.opt.disable_automasking:
outputs["identity_selection/{}".format(scale)] = (
idxs > identity_reprojection_loss.shape[1] - 1).astype('float')
loss += to_optimise.mean()
mean_disp = disp.mean(axis=2, keepdims=True).mean(axis=3, keepdims=True)
norm_disp = disp / (mean_disp + 1e-7)
smooth_loss = get_smooth_loss(norm_disp, color)
loss = loss + self.opt.disparity_smoothness * smooth_loss / (2 ** scale)
total_loss = total_loss + loss
losses["loss/{}".format(scale)] = loss
total_loss = total_loss / self.num_scales
losses["loss"] = total_loss
return losses
def compute_metrics(self, inputs, outputs, depth_metrics):
"""Compute depth metrics, to allow monitoring during training
This isn't particularly accurate as it averages over the entire batch,
so is only used to give an indication of validation performance
"""
depth_gt = inputs["depth_gt"].asnumpy()
gt_height, gt_width = depth_gt.shape[2:]
depth_pred = outputs[("depth", 0, 0)]
depth_pred = mx.nd.clip(
mx.nd.contrib.BilinearResize2D(depth_pred, height=gt_height, width=gt_width),
a_min=1e-3, a_max=80
)
depth_pred = depth_pred.detach().asnumpy()
# garg/eigen crop
mask = depth_gt > 0
crop_mask = np.zeros_like(mask)
crop_mask[:, :, 153:371, 44:1197] = 1
mask = np.logical_and(mask, crop_mask)
depth_gt = depth_gt[mask]
depth_pred = depth_pred[mask]
if self.opt.use_stereo:
scale_factor = STEREO_SCALE_FACTOR
else:
scale_factor = np.median(depth_gt) / np.median(depth_pred)
depth_pred *= scale_factor
depth_pred = np.clip(depth_pred, a_min=1e-3, a_max=80)
depth_errors = compute_depth_errors(depth_gt, depth_pred)
for i, metric in enumerate(self.depth_metric_names):
depth_metrics[metric] += depth_errors[i]
def save_opts(self):
"""Save options to disk so we know what we ran this experiment with
"""
models_dir = os.path.join(self.log_path, "models")
if not os.path.exists(models_dir):
os.makedirs(models_dir)
to_save = self.opt.__dict__.copy()
str_ctr = []
for ctx in to_save['ctx']:
str_ctr.append(str(ctx))
to_save['ctx'] = str_ctr
with open(os.path.join(models_dir, 'opt.json'), 'w') as f:
json.dump(to_save, f, indent=2)
def save_checkpoint(self, delta_1):
"""Save Checkpoint"""
save_folder = os.path.join(self.log_path, "models", "weights")
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# depth model
filename = 'epoch_%04d_Delta1_%2.4f.params' % (self.epoch, delta_1)
filepath = os.path.join(save_folder, filename)
self.model.save_parameters(filepath)
# pose encoder model
if self.use_pose_net:
filename = 'epoch_%04d_Delta1_%2.4f_posenet.params' % (self.epoch, delta_1)
filepath = os.path.join(save_folder, filename)
self.posenet.save_parameters(filepath)
def save_model(self, model_type="final"):
"""Save Checkpoint"""
save_folder = os.path.join(self.log_path, "models", "weights")
if not os.path.exists(save_folder):
os.makedirs(save_folder)
model = self.model
if self.use_pose_net:
posenet = self.posenet
if model_type == "best":
model = self.best_model
if self.use_pose_net:
posenet = self.best_posenet
# save depth model
filename = 'depth_{}.params'
filepath = os.path.join(save_folder, filename.format(model_type))
model.save_parameters(filepath)
# save pose model
if self.use_pose_net:
filename = 'pose_{}.params'
filepath = os.path.join(save_folder, filename.format(model_type))
posenet.save_parameters(filepath)
``` |
{
"source": "jinboci/incubator-tvm",
"score": 2
} |
#### File: python/unittest/test_auto_scheduler_measure.py
```python
import tvm
from tvm import auto_scheduler
import tempfile
from test_auto_scheduler_common import get_tiled_matmul
def test_record():
dag, s = get_tiled_matmul()
if not tvm.runtime.enabled("llvm"):
return
target = tvm.target.create("llvm")
task = auto_scheduler.SearchTask(dag, "test", target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, results = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_measure_local_builder_runner():
dag, s0 = get_tiled_matmul()
if not tvm.runtime.enabled("llvm"):
return
tgt = tvm.target.create("llvm")
task = auto_scheduler.SearchTask(dag, "test", tgt)
minp = auto_scheduler.MeasureInput(task, s0)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner()
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
test_record()
test_measure_local_builder_runner()
``` |
{
"source": "Jinbo-Hu/L3DAS22-TASK2",
"score": 3
} |
#### File: code/learning/checkpoint.py
```python
import logging
import random
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
class CheckpointIO:
"""CheckpointIO class.
It handles saving and loading checkpoints.
"""
def __init__(self, checkpoints_dir, model, optimizer, batch_sampler, metrics_names, num_checkpoints=1, remark=None):
"""
Args:
checkpoint_dir (Path obj): path where checkpoints are saved
model: model
optimizer: optimizer
batch_sampler: batch_sampler
metrics_names: metrics names to be saved in a checkpoints csv file
num_checkpoints: maximum number of checkpoints to save. When it exceeds the number, the older
(older, smaller or higher) checkpoints will be deleted
remark (optional): to remark the name of the checkpoint
"""
self.checkpoints_dir = checkpoints_dir
self.checkpoints_dir.mkdir(parents=True, exist_ok=True)
self.model = model
self.optimizer = optimizer
self.batch_sampler = batch_sampler
self.num_checkpoints = num_checkpoints
self.remark = remark
self.value_list = []
self.epoch_list = []
self.checkpoints_csv_path = checkpoints_dir.joinpath('metrics_statistics.csv')
# save checkpoints_csv header
if dist.get_rank() == 0:
metrics_keys_list = [name for name in metrics_names]
header = ['epoch'] + metrics_keys_list
df_header = pd.DataFrame(columns=header)
df_header.to_csv(self.checkpoints_csv_path, sep='\t', index=False, mode='a+')
def save(self, epoch, it, metrics, key_rank=None, rank_order='high', max_epoch=100):
"""Save model. It will save a latest model, a best model of rank_order for value, and
'self.num_checkpoints' best models of rank_order for value.
Args:
metrics: metrics to log
key_rank (str): the key of metrics to rank
rank_order: 'low' | 'high' | 'latest'
'low' to keep the models of lowest values
'high' to keep the models of highest values
'latest' to keep the models of latest epochs
"""
## save checkpionts_csv
metrics_values_list = [value for value in metrics.values()]
checkpoint_list = [[epoch] + metrics_values_list]
df_checkpoint = pd.DataFrame(checkpoint_list)
df_checkpoint.to_csv(self.checkpoints_csv_path, sep='\t', header=False, index=False, mode='a+')
## save checkpoints
current_value = None if rank_order == 'latest' else metrics[key_rank]
# latest model
latest_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_latest.pth'.format(self.remark))
self.save_file(latest_checkpoint_path, epoch, it)
# save 5 latest models
# if epoch >= max_epoch - 5:
# checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}th.pth'.format(self.remark, epoch))
# self.save_file(checkpoint_path, epoch, it)
# if len(self.value_list) < self.num_checkpoints:
# self.value_list.append(current_value)
# self.epoch_list.append(epoch)
# checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch))
# self.save_file(checkpoint_path, epoch, it)
# logging.info('Checkpoint saved to {}'.format(checkpoint_path))
# elif len(self.value_list) >= self.num_checkpoints:
# value_list = np.array(self.value_list)
# if rank_order == 'high' and current_value >= value_list.min():
# worst_index = value_list.argmin()
# self.del_and_save(worst_index, current_value, epoch, it)
# elif rank_order == 'low' and current_value <= value_list.max():
# worst_index = value_list.argmax()
# self.del_and_save(worst_index, current_value, epoch, it)
# elif rank_order == 'latest':
# worst_index = 0
# self.del_and_save(worst_index, current_value, epoch, it)
# best model
# value_list = np.array(self.value_list)
# best_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_best.pth'.format(self.remark))
# if rank_order == 'high' and current_value >= value_list.max():
# self.save_file(best_checkpoint_path, epoch, it)
# elif rank_order == 'low' and current_value <= value_list.min():
# self.save_file(best_checkpoint_path, epoch, it)
# elif rank_order == 'latest':
# self.save_file(best_checkpoint_path, epoch, it)
def del_and_save(self, worst_index, current_value, epoch, it):
"""Delete and save checkpoint
Args:
worst_index: worst index,
current_value: current value,
epoch: epoch,
it: it,
"""
worst_chpt_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, self.epoch_list[worst_index]))
if worst_chpt_path.is_file():
worst_chpt_path.unlink()
self.value_list.pop(worst_index)
self.epoch_list.pop(worst_index)
self.value_list.append(current_value)
self.epoch_list.append(epoch)
checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch))
self.save_file(checkpoint_path, epoch, it)
logging.info('Checkpoint saved to {}'.format(checkpoint_path))
def save_file(self, checkpoint_path, epoch, it):
"""Save a module to a file
Args:
checkpoint_path (Path obj): checkpoint path, including .pth file name
epoch: epoch,
it: it
"""
outdict = {
'epoch': epoch,
'it': it,
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'sampler': self.batch_sampler.get_state(),
'rng': torch.get_rng_state(),
'cuda_rng': torch.cuda.get_rng_state(),
'random': random.getstate(),
'np_random': np.random.get_state(),
}
torch.save(outdict, checkpoint_path)
def load(self, checkpoint_path):
"""Load a module from a file
"""
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
epoch = state_dict['epoch']
it = state_dict['it']
self.model.module.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.batch_sampler.set_state(state_dict['sampler'])
torch.set_rng_state(state_dict['rng'])
torch.cuda.set_rng_state(state_dict['cuda_rng'])
random.setstate(state_dict['random'])
np.random.set_state(state_dict['np_random'])
logging.info('Resuming complete from {}\n'.format(checkpoint_path))
return epoch, it
```
#### File: code/learning/initialize.py
```python
import logging
import random
import shutil
import socket
from datetime import datetime
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
import torch.optim as optim
from torch.backends import cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from utils.common import create_logging
from utils.config import (get_afextractor, get_generator,
get_losses, get_metrics, get_models, get_optimizer,
get_trainer, store_config)
from learning.checkpoint import CheckpointIO
def init_train(args, cfg, dataset):
""" Training initialization.
Including Data generator, model, optimizer initialization.
"""
'''Cuda'''
args.cuda = not args.no_cuda and torch.cuda.is_available()
rank = dist.get_rank()
world_size = dist.get_world_size()
''' Reproducible seed set'''
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.deterministic = True # Using random seed to fix the algorithm
cudnn.benchmark = True # Automatically find the most efficient algorithm for the current configuration. Set it False to reduce random
'''Sharing directories'''
out_train_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_train') \
.joinpath(cfg['method']).joinpath(cfg['task']).joinpath(cfg['training']['train_id'])
ckpts_dir = out_train_dir.joinpath('checkpoints')
if rank == 0:
print('Train ID is {}\n'.format(cfg['training']['train_id']))
out_train_dir.mkdir(parents=True, exist_ok=True)
'''tensorboard and logging'''
if rank == 0:
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
tb_dir = out_train_dir.joinpath('tb').joinpath(current_time + '_' + socket.gethostname())
tb_dir.mkdir(parents=True, exist_ok=True)
logs_dir = out_train_dir.joinpath('logs')
create_logging(logs_dir, filemode='w')
writer = SummaryWriter(log_dir=str(tb_dir))
param_file = out_train_dir.joinpath('config.yaml')
if param_file.is_file():
param_file.unlink()
store_config(param_file, cfg)
dist.barrier(device_ids=[rank])
'''Data generator'''
train_set, train_generator, batch_sampler = get_generator(args, cfg, dataset, generator_type='train')
valid_set, valid_generator, _ = get_generator(args, cfg, dataset, generator_type='valid')
'''Loss'''
losses = get_losses(cfg)
'''Metrics'''
metrics = get_metrics(cfg, dataset)
'''Audio feature extractor'''
af_extractor = get_afextractor(cfg, args.cuda)
'''Model'''
model = get_models(cfg, dataset, args.cuda)
if dist.is_initialized():
torch.cuda.set_device(rank)
model = DDP(model, device_ids=[rank], output_device=rank)
'''Optimizer'''
optimizer = get_optimizer(cfg, af_extractor, model)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=cfg['training']['lr_step_size'],
gamma=cfg['training']['lr_gamma'])
'''Trainer'''
trainer = get_trainer(args=args, cfg=cfg, dataset=dataset, valid_set=valid_set,
af_extractor=af_extractor, model=model, optimizer=optimizer, losses=losses, metrics=metrics)
'''CheckpointIO'''
if not cfg['training']['valid_set']:
metrics_names = losses.names
else:
metrics_names = metrics.names
ckptIO = CheckpointIO(
checkpoints_dir=ckpts_dir,
model=model,
optimizer=optimizer,
batch_sampler=batch_sampler,
metrics_names=metrics_names,
num_checkpoints=1,
remark=cfg['training']['remark']
)
if cfg['training']['resume_model']:
resume_path = ckpts_dir.joinpath(cfg['training']['resume_model'])
logging.info('=====>> Resume from the checkpoint: {}......\n'.format(str(resume_path)))
epoch_it, it = ckptIO.load(resume_path)
for param_group in optimizer.param_groups:
param_group['lr'] = cfg['training']['lr']
else:
epoch_it, it = 0, 0
''' logging and return '''
logging.info('Train sets are: {}\n'.format(cfg['training']['train_set']))
logging.info('Valid sets are: {}\n'.format(cfg['training']['valid_set']))
logging.info('Training clip number is: {}\n'.format(len(train_set)))
logging.info('Number of batches per epoch is: {}\n'.format(len(batch_sampler)))
logging.info('Validation clip number is: {}\n'.format(len(valid_set)))
logging.info('Training loss type is: {}\n'.format(cfg['training']['loss_type']))
train_initializer = {
'writer': writer if rank == 0 else None,
'train_generator': train_generator,
'valid_generator': valid_generator,
'lr_scheduler': lr_scheduler,
'trainer': trainer,
'ckptIO': ckptIO ,
'epoch_it': epoch_it,
'it': it
}
return train_initializer
def init_infer(args, cfg, dataset):
""" Inference initialization.
Including Data generator, model, optimizer initialization.
"""
''' Cuda '''
args.cuda = not args.no_cuda and torch.cuda.is_available()
''' Directories '''
print('Inference ID is {}\n'.format(cfg['inference']['infer_id']))
out_infer_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_infer')\
.joinpath(cfg['method']).joinpath(cfg['inference']['infer_id'])
if out_infer_dir.is_dir():
shutil.rmtree(str(out_infer_dir))
submissions_dir = out_infer_dir.joinpath('submissions')
predictions_dir = out_infer_dir.joinpath('predictions')
submissions_dir.mkdir(parents=True, exist_ok=True)
predictions_dir.mkdir(parents=True, exist_ok=True)
train_ids = [train_id.strip() for train_id in str(cfg['inference']['train_ids']).split(',')]
models = [model.strip() for model in str(cfg['inference']['models']).split(',')]
ckpts_paths_list = []
ckpts_models_list = []
for train_id, model_name in zip(train_ids, models):
ckpts_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_train').joinpath(cfg['method'])\
.joinpath(cfg['task']).joinpath(train_id).joinpath('checkpoints')
ckpt_path = [path for path in sorted(ckpts_dir.iterdir()) if cfg['inference']['model_mark'] in path.stem]
print('ckpt_name: ', ckpt_path, 'model_name: ', model_name)
# ckpt_path = [path for path in sorted(ckpts_dir.iterdir()) if path.stem.split('_')[-1].isnumeric()] #
for path in ckpt_path:
ckpts_paths_list.append(path)
ckpts_models_list.append(model_name)
''' Parameters '''
param_file = out_infer_dir.joinpath('config.yaml')
if param_file.is_file():
param_file.unlink()
store_config(param_file, cfg)
''' Data generator '''
test_set, test_generator, _ = get_generator(args, cfg, dataset, generator_type='test')
''' logging and return '''
logging.info('Test clip number is: {}\n'.format(len(test_set)))
infer_initializer = {
'submissions_dir': submissions_dir,
'predictions_dir': predictions_dir,
'ckpts_paths_list': ckpts_paths_list,
'ckpts_models_list': ckpts_models_list,
'test_generator': test_generator,
'cuda': args.cuda,
'test_set': test_set
}
return infer_initializer
```
#### File: code/learning/preprocess.py
```python
import os
import shutil
from pathlib import Path
from timeit import default_timer as timer
import h5py
import librosa
import numpy as np
import pandas as pd
import torch
from methods.data import BaseDataset, collate_fn
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.common import float_samples_to_int16, find_key_from_value
from utils.config import get_afextractor
class Preprocessor_task2:
"""Preprocess the audio data.
1. Extract wav file and store to hdf5 file
2. Extract meta file and store to hdf5 file
"""
def __init__(self, args, cfg, dataset):
"""
Args:
args: parsed args
cfg: configurations
dataset: dataset class
"""
self.args = args
self.cfg = cfg
self.dataset = dataset
self.cfg_logmelIV = cfg['data']['logmelIV']
# Path for dataset
self.hdf5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('task2')
# Path for extraction of wav
self.data_dir_list = [
dataset.dataset_dir['task2']['dev'].joinpath('data'),
dataset.dataset_dir['task2']['train'].joinpath('data'),
dataset.dataset_dir['task2']['test'].joinpath('data')
]
data_h5_dir = self.hdf5_dir.joinpath('data').joinpath('{}fs'.format(self.cfg_logmelIV['sample_rate']))
self.data_h5_dir_list = [
data_h5_dir.joinpath('dev'),
data_h5_dir.joinpath('train'),
data_h5_dir.joinpath('test')
]
# Path for extraction of scalar
self.scalar_h5_dir = self.hdf5_dir.joinpath('scalar')
fn_scalar = '{}_sr{}_nfft{}_hop{}_mel{}.h5'.format(cfg['data']['audio_feature'],
self.cfg_logmelIV['sample_rate'], self.cfg_logmelIV['n_fft'], self.cfg_logmelIV['hop_length'], self.cfg_logmelIV['n_mels'])
self.scalar_path = self.scalar_h5_dir.joinpath(fn_scalar)
# Path for extraction of meta
self.label_dir_list = [
dataset.dataset_dir['task2']['dev'].joinpath('labels'),
dataset.dataset_dir['task2']['train'].joinpath('labels'),
dataset.dataset_dir['task2']['test'].joinpath('labels'),
]
# Path for extraction of frame label
self.meta_frame_csv_dir_list = [
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('dev'),
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('train'),
self.hdf5_dir.joinpath('meta').joinpath('frame').joinpath('test')
]
# Path for extraction of track label
self.meta_track_h5_dir_list = [
self.hdf5_dir.joinpath('meta').joinpath('track').joinpath('dev'),
self.hdf5_dir.joinpath('meta').joinpath('track').joinpath('train'),
]
if args.dataset_type == 'train':
self.data_dir_list = self.data_dir_list[:2]
self.data_h5_dir_list = self.data_h5_dir_list[:2]
self.label_dir_list = self.label_dir_list[:2]
self.meta_frame_csv_dir_list = self.meta_frame_csv_dir_list[:2]
elif args.dataset_type == 'test':
self.data_dir_list = self.data_dir_list[2:]
self.data_h5_dir_list = self.data_h5_dir_list[2:]
self.label_dir_list = self.label_dir_list[2:]
self.meta_frame_csv_dir_list = self.meta_frame_csv_dir_list[2:]
def extract_data(self):
""" Extract wave and store to hdf5 file
"""
print('Converting wav file to hdf5 file starts......\n')
for h5_dir in self.data_h5_dir_list:
if h5_dir.is_dir():
flag = input("HDF5 folder {} is already existed, delete it? (y/n)".format(h5_dir)).lower()
if flag == 'y':
shutil.rmtree(h5_dir)
elif flag == 'n':
print("User select not to remove the HDF5 folder {}. The process will quit.\n".format(h5_dir))
return
h5_dir.mkdir(parents=True)
for idx, data_dir in enumerate(self.data_dir_list):
h5_dir = self.data_h5_dir_list[idx]
data_path = os.listdir(data_dir)
data_path_A = [i for i in data_path if i.split('.')[0].split('_')[-1]=='A']
audio_count = 0
for wav_file_A in data_path_A:
wav_file_B = wav_file_A[:-5] + 'B' + wav_file_A[-4:] #change A with B
wav_path_A = data_dir.joinpath(wav_file_A)
wav_path_B = data_dir.joinpath(wav_file_B)
data_A, _ = librosa.load(wav_path_A, sr=self.cfg_logmelIV['sample_rate'], mono=False)
data_B, _ = librosa.load(wav_path_B, sr=self.cfg_logmelIV['sample_rate'], mono=False)
# stack two ambisonics data
data = np.concatenate((data_A, data_B), axis=0)
# save to h5py
h5_file = wav_file_A.replace('_A','').replace('.wav','.h5')
h5_path = h5_dir.joinpath(h5_file)
with h5py.File(h5_path, 'w') as hf:
hf.create_dataset(name='waveform', data=float_samples_to_int16(data), dtype=np.int16)
audio_count += 1
print('{}, {}, {}'.format(audio_count, h5_path, data.shape))
def extract_frame_label(self):
""" Extract frame label for evaluating. Store to csv file.
"""
num_frames = int(self.dataset.clip_length / self.dataset.label_resolution)
print('Converting meta file to frame label file starts......\n')
for meta_frame_dir in self.meta_frame_csv_dir_list:
if meta_frame_dir.is_dir():
flag = input("frame label folder {} is already existed, delete it? (y/n)".format(meta_frame_dir)).lower()
if flag == 'y':
shutil.rmtree(meta_frame_dir)
elif flag == 'n':
print("User select not to remove the frame label folder {}. The process will quit.\n".format(meta_frame_dir))
return
#quantize time stamp to step resolution
quantize = lambda x: round(float(x) / self.dataset.label_resolution)
for idx, label_dir in enumerate(self.label_dir_list): # label dir
label_list = os.listdir(label_dir)
self.meta_frame_csv_dir_list[idx].mkdir(parents=True, exist_ok=True)
iterator = tqdm(enumerate(label_list), total=len(label_list), unit='it')
for idy, path in iterator: # label path
frame_label = {}
for i in range(num_frames):
frame_label[i] = []
path = label_dir.joinpath(path)
df = pd.read_csv(path)
meta_path = self.meta_frame_csv_dir_list[idx].joinpath(path.stem + '.csv')
for idz, row in df.iterrows():
#compute start and end frame position (quantizing)
start = quantize(row['Start'])
end = quantize(row['End'])
start_frame = int(start)
end_frame = int(end)
class_id = self.dataset.label_dic_task2[row['Class']] #int ID of sound class name
sound_frames = np.arange(start_frame, end_frame)
for f in sound_frames:
local_frame_label = [class_id, row['X'], row['Y'],row['Z'], idz]
frame_label[f].append(local_frame_label)
for frame in range(num_frames):
if frame_label[frame]:
for event in frame_label[frame]:
event[0] = find_key_from_value(self.dataset.label_dic_task2, event[0])[0]
with meta_path.open('a') as f:
f.write('{},{},{},{},{},{}\n'.format(frame, event[0], event[1], event[2], event[3], event[4]))
tqdm.write('{}, {}'.format(idy, meta_path))
def extract_track_label(self):
""" Extract track label for permutation invariant training. Store to h5 file
"""
num_tracks = self.dataset.max_ov
num_frames = int(self.dataset.clip_length / self.dataset.label_resolution)
num_classes = self.dataset.num_classes
#quantize time stamp to step resolution
quantize = lambda x: round(float(x) / self.dataset.label_resolution)
for idx, label_dir in enumerate(self.label_dir_list):
label_list = os.listdir(label_dir)
self.meta_track_h5_dir_list[idx].mkdir(parents=True, exist_ok=True)
iterator = tqdm(enumerate(label_list), total=len(label_list), unit='it')
for idy, path in iterator:
sed_label = np.zeros((num_frames, num_tracks, num_classes))
doa_label = np.zeros((num_frames, num_tracks, 3))
path = label_dir.joinpath(path)
df = pd.read_csv(path)
for idz, row in df.iterrows():
#compute start and end frame position (quantizing)
start = quantize(row['Start'])
end = quantize(row['End'])
start_frame = int(start)
end_frame = int(end)
class_id = self.dataset.label_dic_task2[row['Class']] #int ID of sound class name
for track_idx in range(num_tracks):
if sed_label[start_frame][track_idx].sum() == 0:
sed_label[start_frame:end_frame, track_idx, class_id] = 1
doa_label[start_frame:end_frame, track_idx, 0] = row['X']
doa_label[start_frame:end_frame, track_idx, 1] = row['Y']
doa_label[start_frame:end_frame, track_idx, 2] = row['Z']
break
else:
track_idx += 1
meta_path = self.meta_track_h5_dir_list[idx].joinpath(path.stem + '.h5')
with h5py.File(meta_path, 'w') as hf:
hf.create_dataset(name='sed_label', data=sed_label, dtype=np.float32)
hf.create_dataset(name='doa_label', data=doa_label, dtype=np.float32)
tqdm.write('{}, {}'.format(idy, meta_path))
def extract_scalar(self):
""" Extract scalar and store to hdf5 file
"""
print('Extracting scalar......\n')
self.scalar_h5_dir.mkdir(parents=True, exist_ok=True)
cuda_enabled = not self.args.no_cuda and torch.cuda.is_available()
train_set = BaseDataset(self.args, self.cfg, self.dataset)
data_generator = DataLoader(
dataset=train_set,
batch_size=16,
shuffle=False,
num_workers=self.args.num_workers,
collate_fn=collate_fn,
pin_memory=True
)
af_extractor = get_afextractor(self.cfg, cuda_enabled).eval()
iterator = tqdm(enumerate(data_generator), total=len(data_generator), unit='it')
features_A = []
features_B = []
begin_time = timer()
for it, batch_sample in iterator:
if it == len(data_generator):
break
batch_x_A = batch_sample['waveform'][:,:4]
batch_x_B = batch_sample['waveform'][:,4:]
batch_x_A.require_grad = False
batch_x_B.require_grad = False
if cuda_enabled:
batch_x_A = batch_x_A.cuda(non_blocking=True)
batch_x_B = batch_x_B.cuda(non_blocking=True)
batch_y_A = af_extractor(batch_x_A).transpose(0, 1) # (C,N,T,F)
batch_y_B = af_extractor(batch_x_B).transpose(0, 1) # (C,N,T,F)
C, _, _, F = batch_y_A.shape
features_A.append(batch_y_A.reshape(C, -1, F).cpu().numpy()) # (C, N*T, F)
features_B.append(batch_y_B.reshape(C, -1, F).cpu().numpy()) # (C, N*T, F)
iterator.close()
features_A = np.concatenate(features_A, axis=1)
features_B = np.concatenate(features_B, axis=1)
mean_A = []
mean_B = []
std_A = []
std_B = []
for ch in range(C):
mean_A.append(np.mean(features_A[ch], axis=0, keepdims=True))
std_A.append(np.std(features_A[ch], axis=0, keepdims=True))
mean_B.append(np.mean(features_B[ch], axis=0, keepdims=True))
std_B.append(np.std(features_B[ch], axis=0, keepdims=True))
mean_A = np.stack(mean_A)[None, ...]
std_A = np.stack(std_A)[None, ...]
mean_B = np.stack(mean_B)[None, ...]
std_B = np.stack(std_B)[None, ...]
mean = np.concatenate((mean_A, mean_B), axis=1)
std = np.concatenate((std_A, std_B), axis=1)
# save to h5py
with h5py.File(self.scalar_path, 'w') as hf:
hf.create_dataset(name='mean', data=mean, dtype=np.float32)
hf.create_dataset(name='std', data=std, dtype=np.float32)
print("\nScalar saved to {}\n".format(str(self.scalar_path)))
print("Extacting scalar finished! Time spent: {:.3f} s\n".format(timer() - begin_time))
```
#### File: methods/utils/dense_block.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=dilation,
dilation=dilation, bias=False)),
self.drop_rate = float(drop_rate)
def bn_function(self, inputs):
# type: (list [Tensor]) -> Tensor
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
def forward(self, input):
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
dilation=2**i,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for _, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseFeature(nn.Module):
def __init__(self, growth_rate=32, block_config=(4, 4, 4), num_init_features=64,
bn_size=4, drop_rate=0):
super(DenseFeature, self).__init__()
# Denseblocks
self.features = nn.Sequential()
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
# if i != len(block_config) - 1:
if i < 2:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.features.add_module('relu5', nn.ReLU(inplace=True))
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
return features
if __name__ == '__main__':
layer = _DenseBlock(num_layers=4, num_input_features=8,
bn_size=4, growth_rate=16, drop_rate=0.1)
print(layer)
print(layer(torch.rand(3,8,5,5)).shape)
```
#### File: code/utils/cli_parser.py
```python
import argparse
import sys
from pathlib import Path
from ruamel.yaml import YAML
from termcolor import cprint
def parse_cli_overides():
"""Parse the command-line arguments.
Parse args from CLI and override config dictionary entries
This function implements the command-line interface of the program.
The interface accepts general command-line arguments as well as
arguments that are specific to a sub-command. The sub-commands are
*preprocess*, *train*, *predict*, and *evaluate*. Specifying a
sub-command is required, as it specifies the task that the program
should carry out.
Returns:
args: The parsed arguments.
"""
# Parse the command-line arguments, but separate the `--config_file`
# option from the other arguments. This way, options can be parsed
# from the config file(s) first and then overidden by the other
# command-line arguments later.
parser = argparse.ArgumentParser(
description='Event Independent Network for Learning 3D Audio Sources.',
add_help=False
)
parser.add_argument('-c', '--config_file', default='./configs/ein_seld/seld.yaml', help='Specify config file', metavar='FILE')
subparsers = parser.add_subparsers(dest='mode')
parser_preproc = subparsers.add_parser('preprocess')
parser_train = subparsers.add_parser('train')
parser_infer = subparsers.add_parser('infer')
subparsers.add_parser('evaluate')
# Require the user to specify a sub-command
subparsers.required = True
parser_preproc.add_argument('--preproc_mode', choices=['extract_data', 'extract_scalar', 'extract_frame_label',
'extract_track_label', 'salsa_extractor'], required=True, help='select preprocessing mode')
parser_preproc.add_argument('--dataset_type', default='train', choices=['train', 'test'],
help='select dataset to preprocess')
parser_preproc.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_preproc.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--seed', type=int, default=2022, metavar='N')
parser_train.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_train.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--port', type=int, default=12359, metavar='N')
parser_infer.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_infer.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
args = parser.parse_args()
args_dict = vars(args)
cprint("Args:", "green")
for key, value in args_dict.items():
print(f" {key:25s} -> {value}")
yaml = YAML()
yaml.indent(mapping=4, sequence=6, offset=3)
yaml.default_flow_style = False
with open(args.config_file, 'r') as f:
cfg = yaml.load(f)
cprint("Cfg:", "red")
yaml.dump(cfg, sys.stdout, transform=replace_indent)
return args, cfg
def replace_indent(stream):
stream = " " + stream
return stream.replace("\n", "\n ")
``` |
{
"source": "jinbooooom/coding-for-interview",
"score": 4
} |
#### File: dataStructure/queue/deque.py
```python
class Deque:
def __init__(self):
self.items = []
def addFront(self, item): # 首部添加元素
self.items.append(item)
def addRear(self, item): # 尾部添加元素
self.items.insert(0, item)
def removeFront(self): # 首部删除元素
return self.items.pop()
def removeRear(self): # 尾部删除元素
return self.items.pop(0)
def size(self):
return len(self.items)
def isEmpty(self):
return self.items == []
def clear(self):
del self.items[:]
if __name__ == "__main__":
d = Deque()
d.addRear(5)
d.addRear(6)
d.addRear(7)
d.addFront(8)
d.addFront(9)
d.addFront(10)
print(d.items)
d.removeFront()
print(d.items)
d.removeRear()
print(d.items)
d.clear()
print(d.items)
print(d.isEmpty())
```
#### File: dataStructure/queue/queue.py
```python
class Queue:
"""先进先出"""
def __init__(self):
self.items = []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def isEmpty(self):
return self.items == []
def size(self):
return len(self.items)
def clear(self): # 设置为空队列
del self.items[:]
if __name__ == "__main__":
q = Queue()
q.enqueue(9)
q.enqueue(8)
q.enqueue(5)
print(q.items)
print(q.size())
print(q.isEmpty())
q.dequeue()
print(q.items)
print(q.size())
print(q.isEmpty())
q.clear()
print(q.items)
print(q.size())
print(q.isEmpty())
```
#### File: dataStructure/tree/BinaryTree.py
```python
class BinaryTree:
def __init__(self, rootObj):
self.key = rootObj
self.left = None
self.right = None
def insertLeft(self, newNode):
if not self.left: # 若左孩子节点为空
self.left = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.left = self.left
self.left = t
def insertRight(self, newNode):
if not self.right:
self.right = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.right = self.right
self.right = t
def getLeft(self): # 指针
return self.left
def getRight(self): # 指针
return self.right
def setRootVal(self, obj):
self.key == obj
def getRootVal(self):
return self.key
def preorder(self, tree): # 前序遍历
if tree:
print(tree.getRootVal())
self.preorder(tree.getLeft())
self.preorder(tree.getRight())
def inorder(self, tree): # 中序遍历
if tree:
self.inorder(tree.getLeft())
print(tree.getRootVal())
self.inorder(tree.getRight())
def posorder(self, tree): # 后序遍历
if tree:
self.posorder(tree.getLeft())
self.posorder(tree.getRight())
print(tree.getRootVal())
if __name__ == "__main__":
tree = BinaryTree('root')
tree.insertLeft('2a')
tree.insertRight('2b')
print(tree.getRootVal())
print(tree.getLeft().getRootVal())
print(tree.getRight().getRootVal())
tree.getLeft().insertLeft('3a')
tree.getLeft().insertRight('3b')
tree.getRight().insertLeft('3c')
print('前序遍历:')
tree.preorder(tree)
print('中序遍历:')
tree.inorder(tree)
print('后序遍历:')
tree.posorder(tree)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.