max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
orchestrator/domain/base.py
workfloworchestrator/orchestrator-core
15
12792051
<filename>orchestrator/domain/base.py # Copyright 2019-2020 SURF. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from datetime import datetime from itertools import groupby, zip_longest from operator import attrgetter from sys import version_info from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Set, Tuple, Type, TypeVar, Union from uuid import UUID, uuid4 import structlog from more_itertools import first, flatten, one, only from pydantic import BaseModel, Field, ValidationError from pydantic.fields import PrivateAttr from pydantic.main import ModelMetaclass from pydantic.types import ConstrainedList from pydantic.typing import get_args, get_origin from sqlalchemy.orm import selectinload from orchestrator.db import ( ProductBlockTable, ProductTable, SubscriptionInstanceRelationTable, SubscriptionInstanceTable, SubscriptionInstanceValueTable, SubscriptionTable, db, ) from orchestrator.domain.lifecycle import ProductLifecycle, lookup_specialized_type, register_specialized_type from orchestrator.types import ( SAFE_PARENT_TRANSITIONS_FOR_STATUS, State, SubscriptionLifecycle, UUIDstr, is_list_type, is_of_type, is_optional_type, is_union_type, ) from orchestrator.utils.datetime import nowtz from orchestrator.utils.docs import make_product_block_docstring, make_subscription_model_docstring logger = structlog.get_logger(__name__) def _is_constrained_list_type(type: Type) -> bool: """Check if type is a constained list type. Example: >>> _is_constrained_list_type(List[int]) False >>> class ListType(ConstrainedList): ... min_items = 1 >>> _is_constrained_list_type(ListType) True """ # subclass on typing.List throws exception and there is no good way to test for this try: is_constrained_list = issubclass(type, ConstrainedList) except Exception: # Strip generic arguments, it still might be a subclass if get_origin(type): return _is_constrained_list_type(get_origin(type)) else: return False return is_constrained_list T = TypeVar("T") # pragma: no mutate S = TypeVar("S", bound="SubscriptionModel") # pragma: no mutate B = TypeVar("B", bound="ProductBlockModel") # pragma: no mutate class DomainModel(BaseModel): """Base class for domain models. Contains all common Product block/Subscription instance code """ class Config: validate_assignment = True # pragma: no mutate validate_all = True # pragma: no mutate arbitrary_types_allowed = True # pragma: no mutate __base_type__: ClassVar[Optional[Type["DomainModel"]]] = None # pragma: no mutate _product_block_fields_: ClassVar[Dict[str, Type]] _non_product_block_fields_: ClassVar[Dict[str, Type]] def __init_subclass__( cls, *args: Any, lifecycle: Optional[List[SubscriptionLifecycle]] = None, **kwargs: Any ) -> None: super().__init_subclass__() cls._find_special_fields() if kwargs.keys(): logger.warning( "Unexpected keyword arguments in domain model class", # pragma: no mutate class_name=cls.__name__, kwargs=kwargs.keys(), ) # Check if child subscription instance models conform to the same lifecycle for product_block_field_name, product_block_field_type in cls._get_child_product_block_types().items(): if lifecycle: for lifecycle_status in lifecycle: if isinstance(product_block_field_type, tuple): for field_type in product_block_field_type: specialized_type = lookup_specialized_type(field_type, lifecycle_status) if not issubclass(field_type, specialized_type): raise AssertionError( f"The lifecycle status of the type for the field: {product_block_field_name}, {specialized_type.__name__} (based on {field_type.__name__}) is not suitable for the lifecycle status ({lifecycle_status}) of this model" ) else: specialized_type = lookup_specialized_type(product_block_field_type, lifecycle_status) if not issubclass(product_block_field_type, specialized_type): raise AssertionError( f"The lifecycle status of the type for the field: {product_block_field_name}, {specialized_type.__name__} (based on {product_block_field_type.__name__}) is not suitable for the lifecycle status ({lifecycle_status}) of this model" ) @classmethod def _get_child_product_block_types( cls, ) -> Dict[str, Union[Type["ProductBlockModel"], Tuple[Type["ProductBlockModel"]]]]: """Return all the product block model types. This strips any List[..] or Optional[...] types. """ result = {} for product_block_field_name, product_block_field_type in cls._product_block_fields_.items(): if is_union_type(product_block_field_type) and not is_optional_type(product_block_field_type): field_type: Union[Type["ProductBlockModel"], Tuple[Type["ProductBlockModel"]]] = get_args(product_block_field_type) # type: ignore elif is_list_type(product_block_field_type) or is_optional_type(product_block_field_type): field_type = first(get_args(product_block_field_type)) else: field_type = product_block_field_type result[product_block_field_name] = field_type return result @classmethod def _find_special_fields(cls: Type) -> None: """Make and store a list of resource_type fields and product block fields.""" cls._non_product_block_fields_ = {} cls._product_block_fields_ = {} if version_info.minor < 10: annotations = cls.__dict__.get("__annotations__", {}) else: if TYPE_CHECKING: annotations = {} else: # Only available in python > 3.10 from inspect import get_annotations annotations = get_annotations(cls) for field_name, field_type in annotations.items(): if field_name.startswith("_"): continue try: is_product_block_field = ( is_union_type(field_type, DomainModel) or is_list_type(field_type, DomainModel) or is_optional_type(field_type, DomainModel) or is_of_type(field_type, DomainModel) ) except TypeError: # issubclass does not work on typing types is_product_block_field = False # We only want fields that are on this class and not on the parent if is_product_block_field: cls._product_block_fields_[field_name] = field_type else: cls._non_product_block_fields_[field_name] = field_type @classmethod def _init_instances( cls, subscription_id: UUID, skip_keys: Optional[List[str]] = None ) -> Dict[str, Union[List["ProductBlockModel"], "ProductBlockModel"]]: """Initialize default subscription instances. When a new domain model is created that is not loaded from an existing subscription. We also create all subscription instances for it. This function does that. Args: skip_keys: list of fields on the class to skip when creating dummy instances. Returns: A dict with instances to pass to the new model """ if skip_keys is None: skip_keys = [] instances: Dict[str, Union[List[ProductBlockModel], ProductBlockModel]] = {} for product_block_field_name, product_block_field_type in cls._product_block_fields_.items(): if product_block_field_name in skip_keys: continue if is_list_type(product_block_field_type): if _is_constrained_list_type(product_block_field_type): product_block_model = one(get_args(product_block_field_type)) default_value = product_block_field_type() # if constrainedlist has minimum, return that minimum else empty list if product_block_field_type.min_items: logger.debug("creating min_items", type=product_block_field_type) # pragma: no mutate for _ in range(product_block_field_type.min_items): default_value.append(product_block_model.new(subscription_id=subscription_id)) else: # a list field of ProductBlockModels without limits gets an empty list default_value = [] elif is_optional_type(product_block_field_type, ProductBlockModel): default_value = None elif is_union_type(product_block_field_type): raise ValueError( "Union Types must always be `Optional` when calling `.new().` We are unable to detect which type to intialise and Union types always cross subscription boundaries." ) else: product_block_model = product_block_field_type # Scalar field of a ProductBlockModel expects 1 instance default_value = product_block_model.new(subscription_id=subscription_id) instances[product_block_field_name] = default_value return instances @classmethod def _load_instances( cls, db_instances: List[SubscriptionInstanceTable], status: SubscriptionLifecycle, match_domain_attr: bool = True, ) -> Dict[str, Union[Optional["ProductBlockModel"], List["ProductBlockModel"]]]: """Load subscription instances for this domain model. When a new domain model is loaded from an existing subscription we also load all subscription instances for it. This function does that. Args: db_instances: list of database models to load from status: SubscriptionLifecycle of subscription to check if models match match_domain_attr: Match domain attribute from relation (not wanted when loading product blocks directly related to subscriptions) Returns: A dict with instances to pass to the new model """ instances: Dict[str, Union[Optional[ProductBlockModel], List[ProductBlockModel]]] = {} def keyfunc(i: SubscriptionInstanceTable) -> str: return i.product_block.name sorted_instances = sorted(db_instances, key=keyfunc) grouped_instances = {k: list(g) for k, g in groupby(sorted_instances, keyfunc)} def match_domain_model_attr_if_possible(field_name: str) -> Callable: def domain_filter(instance: SubscriptionInstanceTable) -> bool: """ Match domain model attributes. This helper is necessary to filter through all relations in a subscription. Not all subscriptions have a domain model attribute that is set as it is not always necessary. However when it is set, it is necessary to filter through instances depending on that attribute. Args: instance: child instance Returns: Boolean of match. """ # We don't match on the product_blocks directly under subscriptions. They don't have parent relations to those if not match_domain_attr: return True attr_names = { relation.domain_model_attr for relation in instance.parent_relations if relation.domain_model_attr } # We can assume true is no domain_model_attr is set. return not attr_names or field_name in attr_names return domain_filter for product_block_field_name, product_block_field_type in cls._product_block_fields_.items(): filter_func = match_domain_model_attr_if_possible(product_block_field_name) if is_list_type(product_block_field_type): if product_block_field_name not in grouped_instances: if _is_constrained_list_type(product_block_field_type): product_block_model_list = product_block_field_type() else: product_block_model_list = [] product_block_model = one(get_args(product_block_field_type)) instance_list: List[SubscriptionInstanceTable] = list( filter( filter_func, flatten(grouped_instances.get(name, []) for name in product_block_model.__names__) ) ) product_block_model_list.extend( product_block_model.from_db(subscription_instance=instance, status=status) for instance in instance_list ) instances[product_block_field_name] = product_block_model_list elif is_union_type(product_block_field_type) and not is_optional_type(product_block_field_type): instance = only( list( filter( filter_func, flatten( grouped_instances.get(field_type.name, []) for field_type in get_args(product_block_field_type) ), ) ) ) product_block_model = None if instance is None: raise ValueError("Required subscription instance is missing in the database") for field_type in get_args(product_block_field_type): if instance.product_block.name == field_type.name: product_block_model = field_type assert ( # noqa: S101 product_block_model is not None ), "Product block model has not been resolved. Unable to continue" instances[product_block_field_name] = product_block_model.from_db( subscription_instance=instance, status=status ) else: product_block_model = product_block_field_type if is_optional_type(product_block_field_type): product_block_model = first(get_args(product_block_model)) instance = only( list( filter( filter_func, flatten(grouped_instances.get(name, []) for name in product_block_model.__names__), ) ) ) if is_optional_type(product_block_field_type) and instance is None: instances[product_block_field_name] = None elif not is_optional_type(product_block_field_type) and instance is None: raise ValueError("Required subscription instance is missing in database") else: instances[product_block_field_name] = product_block_model.from_db( subscription_instance=instance, status=status ) return instances @classmethod def _data_from_lifecycle(cls, other: "DomainModel", status: SubscriptionLifecycle, subscription_id: UUID) -> Dict: data = other.dict() for field_name, field_type in cls._product_block_fields_.items(): if is_list_type(field_type): data[field_name] = [] for item in getattr(other, field_name): data[field_name].append( one(get_args(field_type))._from_other_lifecycle(item, status, subscription_id) ) else: value = getattr(other, field_name) if is_optional_type(field_type): field_type = first(get_args(field_type)) if value: data[field_name] = field_type._from_other_lifecycle(value, status, subscription_id) else: data[field_name] = None elif is_union_type(field_type) and not is_optional_type(field_type): field_types = get_args(field_type) for f_type in field_types: if f_type.name == value.name: field_type = f_type data[field_name] = field_type._from_other_lifecycle(value, status, subscription_id) else: data[field_name] = field_type._from_other_lifecycle(value, status, subscription_id) return data def _save_instances( self, subscription_id: UUID, status: SubscriptionLifecycle ) -> Tuple[List[SubscriptionInstanceTable], Dict[str, List[SubscriptionInstanceTable]]]: """Save subscription instances for this domain model. When a domain model is saved to the database we need to save all child subscription instances for it. Args: subscription_id: The subscription id status: SubscriptionLifecycle of subscription to check if models match Returns: A list with instances which are saved and a dict with direct children """ saved_instances: List[SubscriptionInstanceTable] = [] child_instances: Dict[str, List[SubscriptionInstanceTable]] = {} for product_block_field, product_block_field_type in self._product_block_fields_.items(): product_block_models = getattr(self, product_block_field) if is_list_type(product_block_field_type): field_instance_list = [] for product_block_model in product_block_models: saved, child = product_block_model.save(subscription_id=subscription_id, status=status) field_instance_list.append(child) saved_instances.extend(saved) child_instances[product_block_field] = field_instance_list elif ( is_optional_type(product_block_field_type) or is_union_type(product_block_field_type) ) and product_block_models is None: pass else: saved, child = product_block_models.save(subscription_id=subscription_id, status=status) child_instances[product_block_field] = [child] saved_instances.extend(saved) return saved_instances, child_instances class ProductBlockModelMeta(ModelMetaclass): """Metaclass used to create product block instances. This metaclass is used to make sure the class contains product block metadata. This metaclass should not be used directly in the class definition. Instead a new product block model should inherit from ProductBlockModel which has this metaclass defined. You can find some examples in: :ref:`domain-models` """ __names__: Set[str] name: Optional[str] product_block_id: UUID description: str tag: str registry: Dict[str, Type["ProductBlockModel"]] = {} # pragma: no mutate def _fix_pb_data(self) -> None: if not self.name: raise ValueError(f"Cannot create instance of abstract class. Use one of {self.__names__}") # Would have been nice to do this in __init_subclass__ but that runs outside the app context so we cant access the db # So now we do it just before we instantiate the instance if not hasattr(self, "product_block_id"): product_block = ProductBlockTable.query.filter(ProductBlockTable.name == self.name).one() self.product_block_id = product_block.product_block_id self.description = product_block.description self.tag = product_block.tag def __call__(self, *args: Any, **kwargs: Any) -> B: self._fix_pb_data() kwargs["name"] = self.name return super().__call__(*args, **kwargs) class ProductBlockModel(DomainModel, metaclass=ProductBlockModelMeta): r"""Base class for all product block models. This class should have been called SubscriptionInstanceModel. ProductTable Blocks are represented as dataclasses with pydantic runtime validation. Different stages of a subscription lifecycle could require different product block definition. Mainly to support mandatory fields when a subscription is active. To support this a lifecycle specific product block definition can be created by subclassing the generic product block with keyword argument 'lifecycle' and overriding its fields. All product blocks are related to a database ProductBlockTable object through the `product_block_name` that is given as class keyword argument. Define a product block: >>> class BlockInactive(ProductBlockModel, product_block_name="Virtual Circuit"): ... int_field: Optional[int] = None ... str_field: Optional[str] = None >>> class Block(BlockInactive, lifecycle=[SubscriptionLifecycle.ACTIVE]): ... int_field: int ... str_field: str This example defines a product_block with two different contraints based on lifecycle. `Block` is valid only for `ACTIVE` And `BlockInactive` for all other states. `product_block_name` must be defined on the base class and need not to be defined on the others Create a new empty product block >>> example1 = BlockInactive() # doctest:+SKIP Create a new instance based on a dict in the state: >>> example2 = BlockInactive(\*\*state) # doctest:+SKIP To retrieve a ProductBlockModel from the database.: >>> BlockInactive.from_db(subscription_instance_id) # doctest:+SKIP """ registry: ClassVar[Dict[str, Type["ProductBlockModel"]]] # pragma: no mutate __names__: ClassVar[Set[str]] = set() product_block_id: ClassVar[UUID] description: ClassVar[str] tag: ClassVar[str] _db_model: SubscriptionInstanceTable = PrivateAttr() # Product block name. This needs to be an instance var because its part of the API (we expose it to the frontend) # Is actually optional since abstract classes dont have it. In practice it is always set name: str subscription_instance_id: UUID owner_subscription_id: UUID label: Optional[str] = None def __init_subclass__( cls, *, product_block_name: Optional[str] = None, lifecycle: Optional[List[SubscriptionLifecycle]] = None, **kwargs: Any, ) -> None: super().__init_subclass__(lifecycle=lifecycle, **kwargs) if product_block_name is not None: # This is a concrete product block base class (so not a abstract super class or a specific lifecycle version) cls.name = product_block_name cls.__base_type__ = cls cls.__names__ = {cls.name} ProductBlockModel.registry[cls.name] = cls elif lifecycle is None: # Abstract class, no product block name cls.name = None # type:ignore cls.__names__ = set() # For everything except abstract classes if cls.name is not None: register_specialized_type(cls, lifecycle) # Add ourself to any super class. That way we can match a superclass to an instance when loading for klass in cls.__mro__: if issubclass(klass, ProductBlockModel): klass.__names__.add(cls.name) cls.__doc__ = make_product_block_docstring(cls, lifecycle) @classmethod def diff_product_block_in_database(cls) -> Dict[str, Any]: """Return any differences between the attrs defined on the domain model and those on product blocks in the database. This is only needed to check if the domain model and database models match which would be done during testing... """ if not cls.name: # This is a superclass we can't check that return {} product_block_db = ProductBlockTable.query.filter(ProductBlockTable.name == cls.name).one_or_none() product_blocks_in_db = {pb.name for pb in product_block_db.children} if product_block_db else set() product_blocks_types_in_model = cls._get_child_product_block_types().values() if product_blocks_types_in_model and isinstance(first(product_blocks_types_in_model), tuple): # There may only be one in the type if it is a Tuple product_blocks_in_model = set(flatten(map(attrgetter("__names__"), one(product_blocks_types_in_model)))) # type: ignore else: product_blocks_in_model = set(flatten(map(attrgetter("__names__"), product_blocks_types_in_model))) missing_product_blocks_in_db = product_blocks_in_model - product_blocks_in_db missing_product_blocks_in_model = product_blocks_in_db - product_blocks_in_model resource_types_model = set(cls._non_product_block_fields_) resource_types_db = {rt.resource_type for rt in product_block_db.resource_types} if product_block_db else set() missing_resource_types_in_db = resource_types_model - resource_types_db missing_resource_types_in_model = resource_types_db - resource_types_model logger.debug( "ProductBlockTable blocks diff", product_block_db=product_block_db.name if product_block_db else None, product_blocks_in_db=product_blocks_in_db, product_blocks_in_model=product_blocks_in_model, resource_types_db=resource_types_db, resource_types_model=resource_types_model, missing_product_blocks_in_db=missing_product_blocks_in_db, missing_product_blocks_in_model=missing_product_blocks_in_model, missing_resource_types_in_db=missing_resource_types_in_db, missing_resource_types_in_model=missing_resource_types_in_model, ) missing_data: Dict[str, Any] = {} if product_blocks_types_in_model and isinstance(first(product_blocks_types_in_model), tuple): for product_block_model in one(product_blocks_types_in_model): # type: ignore missing_data.update(product_block_model.diff_product_block_in_database()) else: for product_block_in_model in product_blocks_types_in_model: missing_data.update(product_block_in_model.diff_product_block_in_database()) # type: ignore diff = { k: v for k, v in { "missing_product_blocks_in_db": missing_product_blocks_in_db, "missing_product_blocks_in_model": missing_product_blocks_in_model, "missing_resource_types_in_db": missing_resource_types_in_db, "missing_resource_types_in_model": missing_resource_types_in_model, }.items() if v } if diff: missing_data[cls.name] = diff return missing_data @classmethod def new(cls: Type[B], subscription_id: UUID, **kwargs: Any) -> B: """Create a new empty product block. We need to use this instead of the normal constructor because that assumes you pass in all required values. That is cumbersome since that means creating a tree of product blocks. This is similar to `from_product_id()` """ sub_instances = cls._init_instances(subscription_id, list(kwargs.keys())) subscription_instance_id = uuid4() # Make sure product block stuff is already set if new is the first usage of this class cls._fix_pb_data() db_model = SubscriptionInstanceTable( product_block_id=cls.product_block_id, subscription_instance_id=subscription_instance_id, subscription_id=subscription_id, ) db.session.enable_relationship_loading(db_model) model = cls(subscription_instance_id=subscription_instance_id, owner_subscription_id=subscription_id, **sub_instances, **kwargs) # type: ignore model._db_model = db_model return model @classmethod def _load_instances_values(cls, instance_values: List[SubscriptionInstanceValueTable]) -> Dict[str, str]: """Load non product block fields (instance values). Args: instance_values: List of instance values from database Returns: Dict of fields to use for constructor """ instance_values_dict: State = {} list_field_names = set() # Set default values for field_name, field_type in cls._non_product_block_fields_.items(): # Ensure that empty lists are handled OK if is_list_type(field_type): instance_values_dict[field_name] = [] list_field_names.add(field_name) for siv in instance_values: # check the type of the siv in the instance and act accordingly: only lists and scalar values supported resource_type_name = siv.resource_type.resource_type if resource_type_name in list_field_names: instance_values_dict[resource_type_name].append(siv.value) else: instance_values_dict[resource_type_name] = siv.value # Make sure values are sorted. This already happens when they come from the db. # However newly created SubscriptionInstances might not have the correct order for field_name in list_field_names: instance_values_dict[field_name] = sorted(instance_values_dict[field_name]) return instance_values_dict @classmethod def _from_other_lifecycle( cls: Type[B], other: "ProductBlockModel", status: SubscriptionLifecycle, subscription_id: UUID, ) -> B: """Create new domain model from instance while changing the status. This makes sure we always have a specific instance.. """ if not cls.__base_type__: cls = ProductBlockModel.registry.get(other.name, cls) # type:ignore cls = lookup_specialized_type(cls, status) data = cls._data_from_lifecycle(other, status, subscription_id) model = cls(**data) model._db_model = other._db_model return model @classmethod def from_db( cls: Type[B], subscription_instance_id: Optional[UUID] = None, subscription_instance: Optional[SubscriptionInstanceTable] = None, status: Optional[SubscriptionLifecycle] = None, ) -> B: """Create a product block based on a subscription instance from the database. This function is similar to `from_subscription()` >>> subscription_instance_id = KNOWN_UUID_IN_DB # doctest:+SKIP >>> si_from_db = db.SubscriptionInstanceTable.query.get(subscription_instance_id) # doctest:+SKIP >>> example3 = ProductBlockModel.from_db(subscription_instance=si_from_db) # doctest:+SKIP >>> example4 = ProductBlockModel.from_db(subscription_instance_id=subscription_instance_id) # doctest:+SKIP """ # Fill values from actual subscription if subscription_instance_id: subscription_instance = SubscriptionInstanceTable.query.get(subscription_instance_id) if subscription_instance: subscription_instance_id = subscription_instance.subscription_instance_id assert subscription_instance_id # noqa: S101 assert subscription_instance # noqa: S101 if not status: status = SubscriptionLifecycle(subscription_instance.subscription.status) if not cls.__base_type__: cls = ProductBlockModel.registry.get(subscription_instance.product_block.name, cls) # type:ignore cls = lookup_specialized_type(cls, status) elif not issubclass(cls, lookup_specialized_type(cls, status)): raise ValueError(f"{cls} is not valid for lifecycle {status}") label = subscription_instance.label instance_values = cls._load_instances_values(subscription_instance.values) sub_instances = cls._load_instances(subscription_instance.children, status) try: model = cls( subscription_instance_id=subscription_instance_id, owner_subscription_id=subscription_instance.subscription_id, subscription=subscription_instance.subscription, label=label, **instance_values, # type: ignore **sub_instances, # type: ignore ) model._db_model = subscription_instance return model except ValidationError: logger.exception( "Subscription is not correct in database", loaded_instance_values=instance_values, loaded_sub_instances=sub_instances, ) raise def _save_instance_values( self, product_block: ProductBlockTable, current_values: List[SubscriptionInstanceValueTable] ) -> List[SubscriptionInstanceValueTable]: """Save non product block fields (instance values). Returns: List of database instances values to save """ resource_types = {rt.resource_type: rt for rt in product_block.resource_types} current_values_dict: Dict[str, List[SubscriptionInstanceValueTable]] = defaultdict(list) for siv in current_values: current_values_dict[siv.resource_type.resource_type].append(siv) subscription_instance_values = [] for field_name, field_type in self._non_product_block_fields_.items(): assert ( # noqa: S101 field_name in resource_types ), f"Domain model {self.__class__} does not match the ProductBlockTable {product_block.name}, missing: {field_name} {resource_types}" resource_type = resource_types[field_name] value = getattr(self, field_name) if value is None: continue if is_list_type(field_type): for val, siv in zip_longest(value, current_values_dict[field_name]): if val: if siv: siv.value = str(val) subscription_instance_values.append(siv) else: subscription_instance_values.append( SubscriptionInstanceValueTable(resource_type=resource_type, value=str(val)) ) else: if field_name in current_values_dict: current_value = current_values_dict[field_name][0] current_value.value = str(value) subscription_instance_values.append(current_value) else: subscription_instance_values.append( SubscriptionInstanceValueTable(resource_type=resource_type, value=str(value)) ) return subscription_instance_values def _set_instance_domain_model_attrs( self, subscription_instance: SubscriptionInstanceTable, subscription_instance_mapping: Dict[str, List[SubscriptionInstanceTable]], ) -> None: """ Save the domain model attribute to the database. This function iterates through the subscription instances and stores the domain model attribute in the hierarchy relationship. Args: subscription_instance_mapping: a mapping of the domain model attribute a underlying instances Returns: None """ children_relations = [] # Set the domain_model_attrs in the database for domain_model_attr, instances in subscription_instance_mapping.items(): instance: SubscriptionInstanceTable for index, instance in enumerate(instances): relation = SubscriptionInstanceRelationTable( parent_id=subscription_instance.subscription_instance_id, child_id=instance.subscription_instance_id, order_id=index, domain_model_attr=domain_model_attr, ) children_relations.append(relation) subscription_instance.children_relations = children_relations def save( self, *, subscription_id: UUID, status: SubscriptionLifecycle, ) -> Tuple[List[SubscriptionInstanceTable], SubscriptionInstanceTable]: """Save the current model instance to the database. This means saving the whole tree of subscription instances and seperately saving all instance values for this instance. Args: status: current SubscriptionLifecycle to check if all constraints match subscription_id: Optional subscription id needed if this is a new model Returns: List of saved instances """ if not self.name: raise ValueError(f"Cannot create instance of abstract class. Use one of {self.__names__}") # Make sure we have a valid subscription instance database model subscription_instance: SubscriptionInstanceTable = SubscriptionInstanceTable.query.get( self.subscription_instance_id ) if subscription_instance: # Make sure we do not use a mapped session. db.session.refresh(subscription_instance) # Block unsafe status changes on domain models that have Subscription instances with parent relations for parent in subscription_instance.parents: if ( parent.subscription != self.subscription and parent.subscription.status not in SAFE_PARENT_TRANSITIONS_FOR_STATUS[status] ): raise ValueError( f"Unsafe status change of Subscription with depending subscriptions: {list(map(lambda instance: instance.subscription.description, subscription_instance.parents))}" ) # If this is a "foreign" instance we just stop saving and return it so only its relation is saved # We should not touch these themselves if self.subscription and subscription_instance.subscription_id != subscription_id: return [], subscription_instance self._db_model = subscription_instance else: subscription_instance = self._db_model # We only need to add to the session if the subscription_instance does not exist. db.session.add(subscription_instance) subscription_instance.subscription_id = subscription_id db.session.flush() # Everything is ok, make sure we are of the right class specialized_type = lookup_specialized_type(self.__class__, status) if specialized_type and not isinstance(self, specialized_type): raise ValueError( f"Lifecycle status {status} requires specialized type {specialized_type!r}, was: {type(self)!r}" ) # Actually save stuff subscription_instance.label = self.label subscription_instance.values = self._save_instance_values( subscription_instance.product_block, subscription_instance.values ) sub_instances, children = self._save_instances(subscription_id, status) # Save the subscription instances relations. self._set_instance_domain_model_attrs(subscription_instance, children) return sub_instances + [subscription_instance], subscription_instance @property def subscription(self) -> SubscriptionTable: return self.db_model.subscription @property def db_model(self) -> SubscriptionInstanceTable: return self._db_model @property def parents(self) -> List[SubscriptionInstanceTable]: return self._db_model.parents @property def children(self) -> List[SubscriptionInstanceTable]: return self._db_model.children class ProductModel(BaseModel): """Represent the product as defined in the database as a dataclass.""" class Config: validate_assignment = True # pragma: no mutate validate_all = True # pragma: no mutate arbitrary_types_allowed = True # pragma: no mutate product_id: UUID name: str description: str product_type: str tag: str status: ProductLifecycle class SubscriptionModel(DomainModel): r"""Base class for all product subscription models. Define a subscription model: >>> class SubscriptionInactive(SubscriptionModel, product_type="SP"): # doctest:+SKIP ... block: Optional[ProductBlockModelInactive] = None >>> class Subscription(BlockInactive, lifecycle=[SubscriptionLifecycle.ACTIVE]): # doctest:+SKIP ... block: ProductBlockModel This example defines a subscription model with two different contraints based on lifecycle. `Subscription` is valid only for `ACTIVE` And `SubscriptionInactive` for all other states. `product_type` must be defined on the base class and need not to be defined on the others Create a new empty subscription >>> example1 = SubscriptionInactive.from_product_id(product_id, customer_id) # doctest:+SKIP Create a new instance based on a dict in the state: >>> example2 = SubscriptionInactive(\*\*state) # doctest:+SKIP To retrieve a ProductBlockModel from the database: >>> SubscriptionInactive.from_subscription(subscription_id) # doctest:+SKIP """ product: ProductModel customer_id: UUID _db_model: SubscriptionTable = PrivateAttr() subscription_id: UUID = Field(default_factory=uuid4) # pragma: no mutate description: str = "Initial subscription" # pragma: no mutate status: SubscriptionLifecycle = SubscriptionLifecycle.INITIAL # pragma: no mutate insync: bool = False # pragma: no mutate start_date: Optional[datetime] = None # pragma: no mutate end_date: Optional[datetime] = None # pragma: no mutate note: Optional[str] = None # pragma: no mutate def __new__(cls, *args: Any, status: Optional[SubscriptionLifecycle] = None, **kwargs: Any) -> "SubscriptionModel": # status can be none if created during change_lifecycle if status and not issubclass(cls, lookup_specialized_type(cls, status)): raise ValueError(f"{cls} is not valid for status {status}") return super().__new__(cls) def __init_subclass__( cls, is_base: bool = False, lifecycle: Optional[List[SubscriptionLifecycle]] = None, **kwargs: Any ) -> None: super().__init_subclass__(lifecycle=lifecycle, **kwargs) if is_base: cls.__base_type__ = cls if is_base or lifecycle: register_specialized_type(cls, lifecycle) cls.__doc__ = make_subscription_model_docstring(cls, lifecycle) @classmethod def diff_product_in_database(cls, product_id: UUID) -> Dict[str, Any]: """Return any differences between the attrs defined on the domain model and those on product blocks in the database. This is only needed to check if the domain model and database models match which would be done during testing... """ product_db = ProductTable.query.get(product_id) product_blocks_in_db = {pb.name for pb in product_db.product_blocks} if product_db else set() product_blocks_types_in_model = cls._get_child_product_block_types().values() if product_blocks_types_in_model and isinstance(first(product_blocks_types_in_model), tuple): product_blocks_in_model = set(flatten(map(attrgetter("__names__"), one(product_blocks_types_in_model)))) # type: ignore else: product_blocks_in_model = set(flatten(map(attrgetter("__names__"), product_blocks_types_in_model))) missing_product_blocks_in_db = product_blocks_in_model - product_blocks_in_db missing_product_blocks_in_model = product_blocks_in_db - product_blocks_in_model fixed_inputs_model = set(cls._non_product_block_fields_) fixed_inputs_in_db = {fi.name for fi in product_db.fixed_inputs} if product_db else set() missing_fixed_inputs_in_db = fixed_inputs_model - fixed_inputs_in_db missing_fixed_inputs_in_model = fixed_inputs_in_db - fixed_inputs_model logger.debug( "ProductTable blocks diff", product_block_db=product_db.name if product_db else None, product_blocks_in_db=product_blocks_in_db, product_blocks_in_model=product_blocks_in_model, fixed_inputs_in_db=fixed_inputs_in_db, fixed_inputs_model=fixed_inputs_model, missing_product_blocks_in_db=missing_product_blocks_in_db, missing_product_blocks_in_model=missing_product_blocks_in_model, missing_fixed_inputs_in_db=missing_fixed_inputs_in_db, missing_fixed_inputs_in_model=missing_fixed_inputs_in_model, ) missing_data_children: Dict[str, Any] = {} for product_block_in_model in product_blocks_types_in_model: missing_data_children.update(product_block_in_model.diff_product_block_in_database()) # type: ignore diff = { k: v for k, v in { "missing_product_blocks_in_db": missing_product_blocks_in_db, "missing_product_blocks_in_model": missing_product_blocks_in_model, "missing_fixed_inputs_in_db": missing_fixed_inputs_in_db, "missing_fixed_inputs_in_model": missing_fixed_inputs_in_model, "missing_in_children": missing_data_children, }.items() if v } missing_data = {} if diff: missing_data[product_db.name] = diff return missing_data @classmethod def from_product_id( cls: Type[S], product_id: Union[UUID, UUIDstr], customer_id: Union[UUID, UUIDstr], status: SubscriptionLifecycle = SubscriptionLifecycle.INITIAL, description: Optional[str] = None, insync: bool = False, start_date: Optional[datetime] = None, end_date: Optional[datetime] = None, note: Optional[str] = None, ) -> S: """Use product_id (and customer_id) to return required fields of a new empty subscription.""" # Caller wants a new instance and provided a product_id and customer_id product_db = ProductTable.query.get(product_id) product = ProductModel( product_id=product_db.product_id, name=product_db.name, description=product_db.description, product_type=product_db.product_type, tag=product_db.tag, status=product_db.status, ) if description is None: description = f"Initial subscription of {product.description}" subscription_id = uuid4() subscription = SubscriptionTable( subscription_id=subscription_id, product_id=product_id, customer_id=customer_id, description=description, status=status.value, insync=insync, start_date=start_date, end_date=end_date, note=note, ) db.session.add(subscription) fixed_inputs = {fi.name: fi.value for fi in product_db.fixed_inputs} instances = cls._init_instances(subscription_id) if isinstance(customer_id, str): customer_id = UUID(customer_id) model = cls( product=product, customer_id=customer_id, subscription_id=subscription_id, description=description, status=status, insync=insync, start_date=start_date, end_date=end_date, note=note, **fixed_inputs, **instances, # type: ignore ) model._db_model = subscription return model @classmethod def from_other_lifecycle( cls: Type[S], other: "SubscriptionModel", status: SubscriptionLifecycle, ) -> S: """Create new domain model from instance while changing the status. This makes sure we always have a speficic instance. """ if not cls.__base_type__: # Import here to prevent cyclic imports from orchestrator.domain import SUBSCRIPTION_MODEL_REGISTRY cls = SUBSCRIPTION_MODEL_REGISTRY.get(other.product.name, cls) # type:ignore cls = lookup_specialized_type(cls, status) data = cls._data_from_lifecycle(other, status, other.subscription_id) data["status"] = status if data["start_date"] is None and status == SubscriptionLifecycle.ACTIVE: data["start_date"] = nowtz() if data["end_date"] is None and status == SubscriptionLifecycle.TERMINATED: data["end_date"] = nowtz() model = cls(**data) model._db_model = other._db_model return model @classmethod def from_subscription(cls: Type[S], subscription_id: Union[UUID, UUIDstr]) -> S: """Use a subscription_id to return required fields of an existing subscription.""" subscription = SubscriptionTable.query.options( selectinload(SubscriptionTable.instances) .selectinload(SubscriptionInstanceTable.product_block) .selectinload(ProductBlockTable.resource_types), selectinload(SubscriptionTable.instances).selectinload(SubscriptionInstanceTable.parent_relations), selectinload(SubscriptionTable.instances).selectinload(SubscriptionInstanceTable.values), ).get(subscription_id) product = ProductModel( product_id=subscription.product.product_id, name=subscription.product.name, description=subscription.product.description, product_type=subscription.product.product_type, tag=subscription.product.tag, status=subscription.product.status, ) status = SubscriptionLifecycle(subscription.status) if not cls.__base_type__: # Import here to prevent cyclic imports from orchestrator.domain import SUBSCRIPTION_MODEL_REGISTRY cls = SUBSCRIPTION_MODEL_REGISTRY.get(subscription.product.name, cls) # type:ignore cls = lookup_specialized_type(cls, status) elif not issubclass(cls, lookup_specialized_type(cls, status)): raise ValueError(f"{cls} is not valid for lifecycle {status}") fixed_inputs = {fi.name: fi.value for fi in subscription.product.fixed_inputs} instances = cls._load_instances(subscription.instances, status, match_domain_attr=False) try: model = cls( product=product, customer_id=subscription.customer_id, subscription_id=subscription.subscription_id, description=subscription.description, status=status, insync=subscription.insync, start_date=subscription.start_date, end_date=subscription.end_date, note=subscription.note, **fixed_inputs, **instances, # type: ignore ) model._db_model = subscription return model except ValidationError: logger.exception( "Subscription is not correct in database", loaded_fixed_inputs=fixed_inputs, loaded_instances=instances ) raise def save(self) -> None: """Save the subscription to the database.""" specialized_type = lookup_specialized_type(self.__class__, self.status) if specialized_type and not isinstance(self, specialized_type): raise ValueError( f"Lifecycle status {self.status.value} requires specialized type {specialized_type!r}, was: {type(self)!r}" ) sub = SubscriptionTable.query.options( selectinload(SubscriptionTable.instances) .selectinload(SubscriptionInstanceTable.product_block) .selectinload(ProductBlockTable.resource_types), selectinload(SubscriptionTable.instances).selectinload(SubscriptionInstanceTable.values), ).get(self.subscription_id) if not sub: sub = self._db_model # Make sure we refresh the object and not use an already mapped object db.session.refresh(sub) self._db_model = sub sub.product_id = self.product.product_id sub.customer_id = self.customer_id sub.description = self.description sub.status = self.status.value sub.insync = self.insync sub.start_date = self.start_date sub.end_date = self.end_date sub.note = self.note db.session.add(sub) db.session.flush() # Sends INSERT and returns subscription_id without committing transaction old_instances_dict = {instance.subscription_instance_id: instance for instance in sub.instances} saved_instances, child_instances = self._save_instances(self.subscription_id, self.status) for instances in child_instances.values(): for instance in instances: if instance.subscription_id != self.subscription_id: raise ValueError( "Attempting to save a Foreign `Subscription Instance` directly below a subscription. This is not allowed." ) sub.instances = saved_instances # Calculate what to remove instances_set = {instance.subscription_instance_id for instance in sub.instances} for instance_id in instances_set: old_instances_dict.pop(instance_id, None) # What's left should be removed for instance in old_instances_dict.values(): db.session.delete(instance) db.session.flush() @property def db_model(self) -> SubscriptionTable: return self._db_model SI = TypeVar("SI") # pragma: no mutate class SubscriptionInstanceList(ConstrainedList, List[SI]): """Shorthand to create constrained lists of product blocks.""" def __init_subclass__(cls, **kwargs: Any) -> None: super().__init_subclass__(**kwargs) # type:ignore # Copy generic argument (SI) if not set explicitly # This makes a lot of assuptions about the internals of `typing` if "__orig_bases__" in cls.__dict__ and cls.__dict__["__orig_bases__"]: generic_base_cls = cls.__dict__["__orig_bases__"][0] if not hasattr(generic_base_cls, "item_type") and get_args(generic_base_cls): cls.item_type = get_args(generic_base_cls)[0] # Make sure __args__ is set cls.__args__ = (cls.item_type,)
1.679688
2
texturizer/emoticons.py
john-hawkins/Text_Feature_Generator
3
12792052
# -*- coding: utf-8 -*- import pandas as pd import numpy as np import codecs import re from .process import load_word_list from .process import load_word_pattern from .process import remove_urls_and_tags from .process import remove_escapes_and_non_printable smiles = load_word_list("emoticons-smile.dat") laughs = load_word_list("emoticons-laugh.dat") winks = load_word_list("emoticons-wink.dat") cheekys = load_word_list("emoticons-wink.dat") kisses = load_word_list("emoticons-kiss.dat") happycrys = load_word_list("emoticons-happy-cry.dat") crys = load_word_list("emoticons-cry.dat") sads = load_word_list("emoticons-sad.dat") shocks = load_word_list("emoticons-shock.dat") sceptics = load_word_list("emoticons-sceptical.dat") fwd_regex = "[:;8BX]['’`]{0,1}[-=^oc]{0,2}[DPO0J3ox,Þþb@*\\|/()<>{}\[\]]{1,2}" fwd_re = re.compile(fwd_regex) bck_regex = "[@*\\|/()<>{}\[\]]{1,2}[-=^]{0,2}['’`]{0,1}[:;]" bck_re = re.compile(bck_regex) """ texturizer.emoticons: Emoticon Recognition Text Features The functions in this library will add columns to a dataframe that indivate whether there are emoticons in certain columns of text, and whether those emoticons represent one of the more common emotions. NOTE: In developing these regexes I have deliberately ignored certain emoticons because of the likelihood of false positive matches in text containing brackets For example emoticons: 8) or (B will not be matched. To avoid matching characters inside document markup language tags there is a rudimentary regex based tag removal and unescaped version of the text that is expecte to have been generated in the intial simple text function run by the program. This will remove URLS and HTML tags before trying to match emoticons. Some references used when considering which empticons to include: https://www.unglobalpulse.org/2014/10/emoticon-use-in-arabic-spanish-and-english-tweets/ https://www.researchgate.net/publication/266269913_From_Emoticon_to_Universal_Symbolic_Signs_Can_Written_Language_Survive_in_Cyberspace https://www.sciencedirect.com/science/article/abs/pii/S0950329317300939 https://www.semanticscholar.org/paper/An-Approach-towards-Text-to-Emoticon-Conversion-and-Jha/3b81505fa7fec81563b2dafae3939fa1b07f3a98 https://www.qualitative-research.net/index.php/fqs/article/view/175/391 https://www.researchgate.net/publication/221622114_M_Textual_Affect_Sensing_for_Sociable_and_Expressive_Online_Communication """ ######################################################################################## def add_text_emoticon_features(df, columns): """ Given a pandas dataframe and a set of column names. Add features that detect the presence of emoticons. """ rez = df.copy() for col in columns: rez = add_emoticon_features(rez, col) return rez ######################################################################################## def add_emoticon_features(df, col): """ Given a pandas dataframe and a column name. Check for emoticons in the column and add a set of features that indicate both the presence and emotional flavour of the emoticon. """ def cal_features(x, col): emos = 0 smiley = 0 wink = 0 kiss = 0 happycry = 0 laugh = 0 cheeky = 0 crying = 0 sad = 0 shock = 0 sceptic = 0 if x[col]==x[col]: text = remove_urls_and_tags( remove_escapes_and_non_printable( x[col] ) ) matches = fwd_re.findall(text) bck_matches = bck_re.findall(text) if len(matches)>0 or len(bck_matches)>0: matches.extend(bck_matches) emos = len(matches) if set(matches).intersection( smiles ): smiley = 1 if set(matches).intersection( crys ): crying = 1 if set(matches).intersection( winks ): wink = 1 if set(matches).intersection( kisses ): kiss = 1 if set(matches).intersection( sads ): sad = 1 if set(matches).intersection( shocks ): shock = 1 if set(matches).intersection( sceptics ): sceptic = 1 if set(matches).intersection( laughs ): laugh = 1 if set(matches).intersection( cheekys ): cheeky = 1 if set(matches).intersection( happycrys ): happycry = 1 pos = smiley + wink + kiss + happycry + laugh + cheeky neg = crying + sad + shock + sceptic sent = pos - neg return emos,smiley,wink,kiss,happycry,laugh,cheeky,crying,sad,shock,sceptic,pos,neg,sent df[ get_emoticon_col_list(col) ] = df.apply(cal_features, col=col, axis=1, result_type="expand") return df ######################################################################################## def get_emoticon_col_list(col): return [col+'_emoticons', col+'_emo_smiley', col+'_emo_wink', col+'_emo_kiss', col+'_emo_happycry', col+'_emo_laugh', col+'_emo_cheeky', col+'_emo_cry', col+'_emo_sad', col+'_emo_shock', col+'_emo_sceptic', col+'_emo_pos', col+'_emo_neg', col+'_emo_sentiment']
2.890625
3
accounts/migrations/0002_auto_20200724_0209.py
LuizFelipeGondim/AUline
0
12792053
<filename>accounts/migrations/0002_auto_20200724_0209.py<gh_stars>0 # Generated by Django 3.0.8 on 2020-07-24 02:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='biografia', field=models.TextField(blank=True, max_length=400, verbose_name='Fale um pouco sobre você'), ), ]
1.289063
1
src/models.py
VasudhaJha/URLShortner
0
12792054
from pydantic import BaseModel class URL(BaseModel): long_url: str class ShortURL(BaseModel): short_url: str
2.234375
2
HackMarathon5/HackMarathon5_Closed_Part.py
FaboBence/HackMarathon_MechaHU_5
0
12792055
<gh_stars>0 from copy import deepcopy # Closed part def create_path(word, dictionary): lista = [word] puffer=[dictionary[word]] while True: if len(dictionary[lista[-1]])>0 and dictionary[lista[-1]][0] not in lista: lista.append(dictionary[lista[-1]][0]) else: break return lista def attacher(words_in_a_list): tmp = words_in_a_list[0] for i in range(1,len(words_in_a_list)): for j in range(1,1+min(len(tmp),len(words_in_a_list[i]))): if tmp[-j:] == words_in_a_list[i][:j]: tmp += words_in_a_list[i][j:] return tmp def recursive(dictionary,list_of_words,longest = []): skipped = 0 for word in dictionary[list_of_words[-1]]: if word in list_of_words: skipped += 1 continue cpy = deepcopy(list_of_words) cpy.append(word) ret = recursive(dictionary, cpy, longest) if len(longest) < len(ret): longest = deepcopy(ret) if skipped == len(dictionary[list_of_words[-1]]): if len(list_of_words) > len(longest): longest = deepcopy(list_of_words) elif len(list_of_words) == len(longest) and len(attacher(list_of_words)) < len(attacher(longest)): longest = deepcopy(list_of_words) return deepcopy(longest) def recursive_2(list_of_words, longest = []): skipped = 0 hova = [] str_of_words = attacher(list_of_words) for i in words: if str_of_words != i: for j in range(1,1+min(len(str_of_words),len(i))): # j: hány betűt nézünk meg if str_of_words[-j:] == i[:j]: hova.append(i) break #stop = 0 for word in hova: if word in list_of_words: skipped += 1 continue cpy = deepcopy(list_of_words) cpy.append(word) ret = recursive_2(cpy, longest) if len(ret) > len(longest): longest = deepcopy(ret) elif len(ret) == len(longest) and len(attacher(list_of_words)) < len(attacher(longest)): longest = deepcopy(ret) if skipped == len(hova): if len(list_of_words) > len(longest): longest = deepcopy(list_of_words) elif len(list_of_words) == len(longest) and len(attacher(list_of_words)) < len(attacher(longest)): print(' longest: '+attacher(longest)) longest = deepcopy(list_of_words) print(' replace: '+attacher(longest)) return deepcopy(longest) words = ["blood", "zonked", "rush", "writer", "grate", "ignorant", "cloudy", "chicken", "illness", "useless", "challenge", "comfortable", "noxious", "desk", "shade", "error", "great", "flagrant", "cute", "plan", "daughter", "dare", "giraffe", "airplane", "aunt", "men", "vase", "cheap", "obsolete", "tomatoes", "receipt", "festive", "screeching", "moor", "ingredients", "great", "skill", "us", "expansion", "rex", "lesson", "one", "nemo", "sack"] attached = [] kiir = [] dictionary = {} #init dict for word in words: hova=[] for i in words: if word != i: for j in range(1,1+min(len(i),len(word))): # j: hány betűt nézünk meg if word[-j:] == i[:j]: hova.append(i) break dictionary[word]=hova for kiiras in dictionary: print(kiiras, dictionary[kiiras]) # Searching for the longest concatenation print() for i,firstword in enumerate(words): kiir.append([firstword]) #kiir[i] = recursive(dictionary, kiir[i]) kiir[i] = recursive_2(kiir[i]) # Kiiratás print(len(kiir[i]), attacher(kiir[i]))
3.328125
3
pbk/util/descriptors.py
dnwobu/pbk
1
12792056
<filename>pbk/util/descriptors.py import abc class DescriptorClass(abc.ABC): def __init__(self, prop_name): self.prop_name = prop_name @abc.abstractmethod def __set__(self, instance, value): """ Set needs to be defined in subclasses for this to work as a real discriptor :param instance: :param value: :return: """ def __get__(self, instance, owner): return instance.__dict__[self.prop_name] def __delete__(self, instance): del instance.__dict__[self.prop_name] class TypeChecked(DescriptorClass): def __init__(self, allowed_type, prop_name, allow_none=True): super().__init__(prop_name=prop_name) self.allowed_type = allowed_type self.allow_none = allow_none def __set__(self, instance, value): if value is None and self.allow_none: pass elif not isinstance(value, self.allowed_type): raise TypeError(f'Value: "{value}" is not of the required type: {str(self.allowed_type)}') instance.__dict__[self.prop_name] = value class ValueChecked(DescriptorClass): def __init__(self, allowed_values, prop_name, allow_none=True): super().__init__(prop_name=prop_name) self.allowed_values = allowed_values self.allow_none = allow_none def __set__(self, instance, value): if value is None and self.allow_none: pass elif value not in self.allowed_values: raise ValueError(f'Value: "{value}" is not of the available options: {str(self.allowed_values)}') instance.__dict__[self.prop_name] = value
3.40625
3
model/npz2pc.py
tarun738/i3DMM
33
12792057
<gh_stars>10-100 import numpy as np npzfile = np.load("./data/SdfSamples/dataset/heads/xxxxx_exx.npz") posSamples = npzfile['pos'] negSamples = npzfile['neg'] points = np.append(posSamples,negSamples,axis=0) if 'pospoi' in npzfile: pospoiSamples = npzfile['pospoi'] points = np.append(points,pospoiSamples,axis=0) if 'negpoi' in npzfile: negpoiSamples = npzfile['negpoi'] points = np.append(points,negpoiSamples,axis=0) print(points.shape[0]) f= open("sample.obj","w+") if points.shape[1] == 4: for i in range(points.shape[0]): f.write( "v " + str(points[i,0]) + " " + str(points[i,1]) + " " + str(points[i,2]) + "\n") else: for i in range(points.shape[0]): color = ((points[i,4:7] + 0.5)*255.0).astype('uint8') f.write( "v " + str(points[i,0]) + " " + str(points[i,1]) + " " + str(points[i,2]) + " " + str(color[0]) + " " + str(color[1]) + " " + str(color[2]) + "\n") f.close()
2.109375
2
imageledger/management/commands/indexer.py
creativecommons/open-ledger
46
12792058
from collections import namedtuple import itertools import logging import os import time from multiprocessing.dummy import Pool import multiprocessing import uuid from elasticsearch import helpers import elasticsearch from elasticsearch_dsl import Index from django.conf import settings from django.core.management.base import BaseCommand, CommandError from django.db import connection, transaction import requests from imageledger import models, search console = logging.StreamHandler() log = logging.getLogger(__name__) log.addHandler(console) log.setLevel(logging.INFO) MAX_CONNECTION_RETRIES = 50 RETRY_WAIT = 5 # Number of sections to wait before retrying DEFAULT_CHUNK_SIZE = 1000 DEFAULT_NUM_ITERATIONS = 10000 DEFAULT_NUM_THREADS = 4 class Command(BaseCommand): can_import_settings = True requires_migrations_checks = True def add_arguments(self, parser): parser.add_argument("--verbose", action="store_true", default=False, help="Be very chatty and run logging at DEBUG") parser.add_argument("--chunk-size", dest="chunk_size", default=DEFAULT_CHUNK_SIZE, type=int, help="The number of records to batch process at once") parser.add_argument("--num-iterations", dest="num_iterations", default=DEFAULT_NUM_ITERATIONS, type=int, help="The number of times to loop through `chunk_size` records") parser.add_argument("--num-threads", dest="num_threads", default=DEFAULT_NUM_THREADS, type=int, help="The number of threads to start up at once") def handle(self, *args, **options): if options['verbose'] or settings.DEBUG: log.setLevel(logging.DEBUG) self.index_all_images(chunk_size=options['chunk_size'], num_iterations=options['num_iterations'], num_threads=options['num_threads'] ) def index_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, num_iterations=DEFAULT_NUM_ITERATIONS, num_threads=DEFAULT_NUM_THREADS): """Index every record in the database with a server-side cursor""" index = Index(settings.ELASTICSEARCH_INDEX) if not index.exists(): log.info("Creating new index %s", settings.ELASTICSEARCH_INDEX) search.Image.init() mapping = search.Image._doc_type.mapping mapping.save(settings.ELASTICSEARCH_INDEX) log.info("Done creating new index") with Pool(num_threads) as pool: starts = [i * chunk_size for i in range(0, num_iterations)] pool.starmap(do_index, zip(starts, itertools.repeat(chunk_size, len(starts)))) def do_index(start, chunk_size): end = start + chunk_size + 1 batches = [] retries = 0 try: es = search.init(timeout=2000) if not settings.DEBUG: es.cluster.health(wait_for_status='green', request_timeout=2000) except (requests.exceptions.ReadTimeout, elasticsearch.exceptions.TransportError) as e: log.warn(e) log.warn("Skipping batch and retrying after wait") time.sleep(RETRY_WAIT) return log.debug("Starting index in range from %d to %d...", start, end) qs = models.Image.objects.filter(removed_from_source=False, id__gt=start).order_by('id')[0:chunk_size] for db_image in server_cursor_query(qs, chunk_size=chunk_size): log.debug("Indexing database record %s", db_image.identifier) image = search.db_image_to_index(db_image) try: if len(batches) >= chunk_size: if not settings.DEBUG: log.debug("Waiting for green status...") es.cluster.health(wait_for_status='green', request_timeout=2000) helpers.bulk(es, batches) log.debug("Pushed batch of %d records to ES", len(batches)) batches = [] # Clear the batch size else: batches.append(image.to_dict(include_meta=True)) except (requests.exceptions.ReadTimeout, elasticsearch.exceptions.TransportError, elasticsearch.helpers.BulkIndexError) as e: if retries < MAX_CONNECTION_RETRIES: log.warn("Got timeout: retrying with %d retries remaining", MAX_CONNECTION_RETRIES - retries) retries += 1 time.sleep(RETRY_WAIT) else: raise helpers.bulk(es, batches) def server_cursor_query(queryset, cursor_id=0, chunk_size=DEFAULT_CHUNK_SIZE): connection.cursor() compiler = queryset.query.get_compiler(using=queryset.db) sql, params = compiler.as_sql() model = compiler.klass_info['model'] select_fields = compiler.klass_info['select_fields'] fields = [field[0].target.attname for field in compiler.select[select_fields[0]:select_fields[-1] + 1]] cursor_name = 'cursor-large-%d' % cursor_id cursor = connection.connection.cursor(name=cursor_name) with transaction.atomic(savepoint=False): cursor.execute(sql, params) while True: rows = cursor.fetchmany(chunk_size) if not rows: break for row in rows: DBObj = namedtuple('DBObj', fields) obj = DBObj(*row[select_fields[0]:select_fields[-1] + 1]) yield obj def grouper_it(n, iterable): it = iter(iterable) while True: chunk_it = itertools.islice(it, n) try: first_el = next(chunk_it) except StopIteration: return yield itertools.chain((first_el,), chunk_it)
2.0625
2
src/pulse3D/constants.py
CuriBio/sdk_refactor
0
12792059
# -*- coding: utf-8 -*- """Constants for the Mantarray File Manager.""" from typing import Dict import uuid from immutabledict import immutabledict from labware_domain_models import LabwareDefinition try: from importlib import metadata except ImportError: # pragma: no cover import importlib_metadata as metadata # type: ignore PACKAGE_VERSION = metadata.version("pulse3D") CURI_BIO_ACCOUNT_UUID = uuid.UUID("73f52be0-368c-42d8-a1fd-660d49ba5604") CURI_BIO_USER_ACCOUNT_ID = uuid.UUID("<KEY>") TWENTY_FOUR_WELL_PLATE = LabwareDefinition(row_count=4, column_count=6) MIN_SUPPORTED_FILE_VERSION = "0.1.1" CURRENT_BETA1_HDF5_FILE_FORMAT_VERSION = "0.4.2" CURRENT_BETA2_HDF5_FILE_FORMAT_VERSION = "1.0.0" FILE_FORMAT_VERSION_METADATA_KEY = "File Format Version" FILE_MIGRATION_PATHS = immutabledict({"0.3.1": "0.4.1", "0.4.1": "0.4.2"}) NOT_APPLICABLE_H5_METADATA = uuid.UUID( "59d92e00-99d5-4460-9a28-5a1a0fe9aecf" ) # Eli (1/19/21): H5 files can't store the concept of `None` in their metadata, so using this value to denote that a particular piece of metadata is not available (i.e. after migrating to a newer file format version) HARDWARE_TEST_RECORDING_UUID = uuid.UUID("a2e76058-08cd-475d-a55d-31d401c3cb34") UTC_BEGINNING_DATA_ACQUISTION_UUID = uuid.UUID("98c67f22-013b-421a-831b-0ea55df4651e") START_RECORDING_TIME_INDEX_UUID = uuid.UUID("e41422b3-c903-48fd-9856-46ff56a6534c") UTC_BEGINNING_RECORDING_UUID = uuid.UUID("d2449271-0e84-4b45-a28b-8deab390b7c2") UTC_FIRST_TISSUE_DATA_POINT_UUID = uuid.UUID("b32fb8cb-ebf8-4378-a2c0-f53a27bc77cc") UTC_FIRST_REF_DATA_POINT_UUID = uuid.UUID("7cc07b2b-4146-4374-b8f3-1c4d40ff0cf7") CUSTOMER_ACCOUNT_ID_UUID = uuid.UUID("4927c810-fbf4-406f-a848-eba5308576e6") USER_ACCOUNT_ID_UUID = uuid.UUID("7282cf00-2b6e-4202-9d9e-db0c73c3a71f") SOFTWARE_BUILD_NUMBER_UUID = uuid.UUID("b4db8436-10a4-4359-932d-aa80e6de5c76") SOFTWARE_RELEASE_VERSION_UUID = uuid.UUID("432fc3c1-051b-4604-bc3d-cc0d0bd75368") MAIN_FIRMWARE_VERSION_UUID = uuid.UUID("faa48a0c-0155-4234-afbf-5e5dbaa59537") SLEEP_FIRMWARE_VERSION_UUID = uuid.UUID("3a816076-90e4-4437-9929-dc910724a49d") XEM_SERIAL_NUMBER_UUID = uuid.UUID("e5f5b134-60c7-4881-a531-33aa0edba540") MANTARRAY_NICKNAME_UUID = uuid.UUID("0cdec9bb-d2b4-4c5b-9dd5-6a49766c5ed4") MANTARRAY_SERIAL_NUMBER_UUID = uuid.UUID("83720d36-b941-4d85-9b39-1d817799edd6") REFERENCE_VOLTAGE_UUID = uuid.UUID("0b3f3f56-0cc7-45f0-b748-9b9de480cba8") WELL_NAME_UUID = uuid.UUID("6d78f3b9-135a-4195-b014-e74dee70387b") WELL_ROW_UUID = uuid.UUID("da82fe73-16dd-456a-ac05-0b70fb7e0161") WELL_COLUMN_UUID = uuid.UUID("7af25a0a-8253-4d32-98c4-3c2ca0d83906") WELL_INDEX_UUID = uuid.UUID("cd89f639-1e36-4a13-a5ed-7fec6205f779") TOTAL_WELL_COUNT_UUID = uuid.UUID("7ca73e1c-9555-4eca-8281-3f844b5606dc") REF_SAMPLING_PERIOD_UUID = uuid.UUID("48aa034d-8775-453f-b135-75a983d6b553") TISSUE_SAMPLING_PERIOD_UUID = uuid.UUID("f629083a-3724-4100-8ece-c03e637ac19c") ADC_GAIN_SETTING_UUID = uuid.UUID("a3c3bb32-9b92-4da1-8ed8-6c09f9c816f8") ADC_TISSUE_OFFSET_UUID = uuid.UUID("41069860-159f-49f2-a59d-401783c1ecb4") ADC_REF_OFFSET_UUID = uuid.UUID("dc10066c-abf2-42b6-9b94-5e52d1ea9bfc") PLATE_BARCODE_UUID = uuid.UUID("cf60afef-a9f0-4bc3-89e9-c665c6bb6941") STIM_BARCODE_UUID = uuid.UUID("6fa67db1-c8b9-4937-b93f-6fe8bdc7e6d7") BACKEND_LOG_UUID = uuid.UUID("87533deb-2495-4430-bce7-12fdfc99158e") COMPUTER_NAME_HASH_UUID = uuid.UUID("fefd0675-35c2-45f6-855a-9500ad3f100d") PLATE_BARCODE_IS_FROM_SCANNER_UUID = uuid.UUID("7d026e86-da70-4464-9181-dc0ce2d47bd1") STIM_BARCODE_IS_FROM_SCANNER_UUID = uuid.UUID("6e5a4b3e-f766-4638-80f7-d95c417c0fc2") IS_FILE_ORIGINAL_UNTRIMMED_UUID = uuid.UUID("52231a24-97a3-497a-917c-86c780d9993f") TRIMMED_TIME_FROM_ORIGINAL_START_UUID = uuid.UUID("371996e6-5e2d-4183-a5cf-06de7058210a") TRIMMED_TIME_FROM_ORIGINAL_END_UUID = uuid.UUID("55f6770d-c369-42ce-a437-5ed89c3cb1f8") ORIGINAL_FILE_VERSION_UUID = uuid.UUID("cd1b4063-4a87-4a57-bc12-923ff4890844") UTC_TIMESTAMP_OF_FILE_VERSION_MIGRATION_UUID = uuid.UUID("399b2148-09d4-418b-a132-e37df2721938") FILE_VERSION_PRIOR_TO_MIGRATION_UUID = uuid.UUID("11b4945b-3cf3-4f67-8bee-7abc3c449756") BOOTUP_COUNTER_UUID = uuid.UUID("b9ccc724-a39d-429a-be6d-3fd29be5037d") TOTAL_WORKING_HOURS_UUID = uuid.UUID("f8108718-2fa0-40ce-a51a-8478e5edd4b8") TAMPER_FLAG_UUID = uuid.UUID("68d0147f-9a84-4423-9c50-228da16ba895") PCB_SERIAL_NUMBER_UUID = uuid.UUID("5103f995-19d2-4880-8a2e-2ce9080cd2f5") MAGNETOMETER_CONFIGURATION_UUID = uuid.UUID("921121e9-4191-4536-bedd-03186fa1e117") UTC_BEGINNING_STIMULATION_UUID = uuid.UUID("4b310594-ded4-45fd-a1b4-b829aceb416c") STIMULATION_PROTOCOL_UUID = uuid.UUID("ede638ce-544e-427a-b1d9-c40784d7c82d") IS_CALIBRATION_FILE_UUID = uuid.UUID("9a6f90eb-fe34-423b-bfed-fb441d6d9e5f") CHANNEL_FIRMWARE_VERSION_UUID = uuid.UUID("d9694cfe-824c-41f8-915e-91e41ce7af32") BOOT_FLAGS_UUID = uuid.UUID("762f6715-ffcd-4e8d-b707-638dd5777841") INITIAL_MAGNET_FINDING_PARAMS = uuid.UUID("da5f2f6d-6874-4e53-be10-90c4bfbd3d45") METADATA_UUID_DESCRIPTIONS = immutabledict( { # General values HARDWARE_TEST_RECORDING_UUID: "Is Hardware Test Recording", START_RECORDING_TIME_INDEX_UUID: "Timepoint of Beginning of Recording", UTC_BEGINNING_DATA_ACQUISTION_UUID: "UTC Timestamp of Beginning of Data Acquisition", UTC_BEGINNING_RECORDING_UUID: "UTC Timestamp of Beginning of Recording", UTC_FIRST_TISSUE_DATA_POINT_UUID: "UTC Timestamp of Beginning of Recorded Tissue Sensor Data", UTC_FIRST_REF_DATA_POINT_UUID: "UTC Timestamp of Beginning of Recorded Reference Sensor Data", CUSTOMER_ACCOUNT_ID_UUID: "Customer Account ID", USER_ACCOUNT_ID_UUID: "User Account ID", SOFTWARE_BUILD_NUMBER_UUID: "Software Build Number", SOFTWARE_RELEASE_VERSION_UUID: "Software Release Version", MAIN_FIRMWARE_VERSION_UUID: "Firmware Version (Main Controller)", SLEEP_FIRMWARE_VERSION_UUID: "Firmware Version (Sleep Mode)", MANTARRAY_NICKNAME_UUID: "Mantarray Nickname", MANTARRAY_SERIAL_NUMBER_UUID: "Mantarray Serial Number", REFERENCE_VOLTAGE_UUID: "Reference Voltage", WELL_NAME_UUID: "Well Name", WELL_ROW_UUID: "Well Row (zero-based)", WELL_COLUMN_UUID: "Well Column (zero-based)", WELL_INDEX_UUID: "Well Index (zero-based)", TOTAL_WELL_COUNT_UUID: "Total Wells in Plate", REF_SAMPLING_PERIOD_UUID: "Reference Sensor Sampling Period (microseconds)", TISSUE_SAMPLING_PERIOD_UUID: "Tissue Sensor Sampling Period (microseconds)", ADC_GAIN_SETTING_UUID: "ADC Gain Setting", ADC_TISSUE_OFFSET_UUID: "ADC Tissue Sensor Offset", ADC_REF_OFFSET_UUID: "ADC Reference Sensor Offset", PLATE_BARCODE_UUID: "Plate Barcode", BACKEND_LOG_UUID: "Backend log file identifier", COMPUTER_NAME_HASH_UUID: "SHA512 digest of computer name", PLATE_BARCODE_IS_FROM_SCANNER_UUID: "Is this plate barcode obtained from the scanner", IS_FILE_ORIGINAL_UNTRIMMED_UUID: "Is this an original file straight from the instrument and untrimmed", TRIMMED_TIME_FROM_ORIGINAL_START_UUID: "Number of centimilliseconds that has been trimmed off the beginning of when the original data started", TRIMMED_TIME_FROM_ORIGINAL_END_UUID: "Number of centimilliseconds that has been trimmed off the end of when the original data ended", ORIGINAL_FILE_VERSION_UUID: "The original version of the file when recorded, prior to any migrations to newer versions/formats.", UTC_TIMESTAMP_OF_FILE_VERSION_MIGRATION_UUID: "Timestamp when this file was migrated from an earlier version.", FILE_VERSION_PRIOR_TO_MIGRATION_UUID: "File format version that this file was migrated from", # Beta 1 specific values XEM_SERIAL_NUMBER_UUID: "XEM Serial Number", # Beta 2 specific values BOOTUP_COUNTER_UUID: "The number of times this Mantarray Instrument has booted up", TOTAL_WORKING_HOURS_UUID: "The total number of hours this Mantarray Instrument has been powered on and running", TAMPER_FLAG_UUID: "Is it suspected the internals of the Mantarray enclosure have been tampered with", PCB_SERIAL_NUMBER_UUID: "The serial number of the Mantarray PCB", MAGNETOMETER_CONFIGURATION_UUID: "The state (on/off) of the board's magnetometers", UTC_BEGINNING_STIMULATION_UUID: "UTC Timestamp of Beginning of Stimulation", STIMULATION_PROTOCOL_UUID: "The stimulation protocol that was running on this well during recording. Empty string if stimulation was not active", STIM_BARCODE_UUID: "Stim Lid Barcode", STIM_BARCODE_IS_FROM_SCANNER_UUID: "Is this stim lid barcode obtained from the scanner", IS_CALIBRATION_FILE_UUID: "Is this file a calibration (empty plate) recording", CHANNEL_FIRMWARE_VERSION_UUID: "Firmware Version (Channel Controller)", BOOT_FLAGS_UUID: "Hardware/firmware flags present on device bootup", INITIAL_MAGNET_FINDING_PARAMS: "JSON string of the initial magnet finding params that should be used in Pulse3D", } ) DATETIME_STR_FORMAT = "%Y-%m-%d %H:%M:%S.%f" CENTIMILLISECONDS_PER_SECOND = int(1e5) MICRO_TO_BASE_CONVERSION = int(1e6) MICROSECONDS_PER_CENTIMILLISECOND = 10 TISSUE_SENSOR_READINGS = "tissue_sensor_readings" REFERENCE_SENSOR_READINGS = "reference_sensor_readings" STIMULATION_READINGS = "stimulation_readings" TIME_INDICES = "time_indices" TIME_OFFSETS = "time_offsets" """ constants from mantarray_waveform_analysis library """ MILLI_TO_BASE_CONVERSION = 1000 TWITCH_PERIOD_UUID = uuid.UUID("6e0cd81c-7861-4c49-ba14-87b2739d65fb") # This is just the reciprocal of twitch period, but is pre-computed to make downstream pipelines # simpler. Frequency is reported in Hz TWITCH_FREQUENCY_UUID = uuid.UUID("472d0707-ff87-4198-9374-c28900bb216c") AMPLITUDE_UUID = uuid.UUID("89cf1105-a015-434f-b527-4169b9400e26") AUC_UUID = uuid.UUID("e7b9a6e4-c43d-4e8b-af7e-51742e252030") WIDTH_UUID = uuid.UUID("c4c60d55-017a-4783-9600-f19606de26f3") WIDTH_VALUE_UUID = uuid.UUID("05041f4e-c77d-42d9-a2ae-8902f912e9ac") WIDTH_RISING_COORDS_UUID = uuid.UUID("2a16acb6-4df7-4064-9d47-5d27ea7a98ad") WIDTH_FALLING_COORDS_UUID = uuid.UUID("26e5637d-42c9-4060-aa5d-52209b349c84") RELAXATION_VELOCITY_UUID = uuid.UUID("0fcc0dc3-f9aa-4f1b-91b3-e5b5924279a9") CONTRACTION_VELOCITY_UUID = uuid.UUID("73961e7c-17ec-42b0-b503-a23195ec249c") IRREGULARITY_INTERVAL_UUID = uuid.UUID("61046076-66b9-4b8b-bfec-1e00603743c0") # Kristian 9/15/21 FRACTION_MAX_UUID = uuid.UUID("8fe142e2-2504-4c9e-b3dc-817b24c7447e") # Kristian 10/29/21: for contraction to % width, or peak to % relaxation TIME_VALUE_UUID = uuid.UUID("32f5ce6b-e311-4434-8a2a-c2b6bbd81ee6") RELAXATION_TIME_UUID = uuid.UUID("0ad56cd1-7bcc-4b57-8076-14366d7f3c6a") CONTRACTION_TIME_UUID = uuid.UUID("33b5b0a8-f197-46ef-a451-a254e530757b") AMPLITUDE_UUID = uuid.UUID("89cf1105-a015-434f-b527-4169b9400e26") AUC_UUID = uuid.UUID("e7b9a6e4-c43d-4e8b-af7e-51742e252030") WIDTH_UUID = uuid.UUID("c4c60d55-017a-4783-9600-f19606de26f3") WIDTH_VALUE_UUID = uuid.UUID("05041f4e-c77d-42d9-a2ae-8902f912e9ac") WIDTH_RISING_COORDS_UUID = uuid.UUID("2a16acb6-4df7-4064-9d47-5d27ea7a98ad") WIDTH_FALLING_COORDS_UUID = uuid.UUID("26e5637d-42c9-4060-aa5d-52209b349c84") RELAXATION_VELOCITY_UUID = uuid.UUID("0fcc0dc3-f9aa-4f1b-91b3-e5b5924279a9") CONTRACTION_VELOCITY_UUID = uuid.UUID("73961e7c-17ec-42b0-b503-a23195ec249c") IRREGULARITY_INTERVAL_UUID = uuid.UUID("61046076-66b9-4b8b-bfec-1e00603743c0") FRACTION_MAX_UUID = uuid.UUID("8fe142e2-2504-4c9e-b3dc-817b24c7447e") TIME_DIFFERENCE_UUID = uuid.UUID("1363817a-b1fb-468e-9f1c-ec54fce72dfe") TIME_VALUE_UUID = uuid.UUID("32f5ce6b-e311-4434-8a2a-c2b6bbd81ee6") RELAXATION_TIME_UUID = uuid.UUID("0ad56cd1-7bcc-4b57-8076-14366d7f3c6a") CONTRACTION_TIME_UUID = uuid.UUID("33b5b0a8-f197-46ef-a451-a254e530757b") # Kristian 11/9/21: full contraction or full relaxation metrics BASELINE_TO_PEAK_UUID = uuid.UUID("03ce2d30-3580-4129-9913-2fc2e35eddb7") PEAK_TO_BASELINE_UUID = uuid.UUID("1ac2589d-4713-41c0-8dd0-1e6c98600e37") ALL_METRICS = [ TWITCH_PERIOD_UUID, FRACTION_MAX_UUID, AMPLITUDE_UUID, AUC_UUID, TWITCH_FREQUENCY_UUID, CONTRACTION_VELOCITY_UUID, RELAXATION_VELOCITY_UUID, IRREGULARITY_INTERVAL_UUID, BASELINE_TO_PEAK_UUID, PEAK_TO_BASELINE_UUID, WIDTH_UUID, RELAXATION_TIME_UUID, CONTRACTION_TIME_UUID, ] PRIOR_PEAK_INDEX_UUID = uuid.UUID("80df90dc-21f8-4cad-a164-89436909b30a") PRIOR_VALLEY_INDEX_UUID = uuid.UUID("72ba9466-c203-41b6-ac30-337b4a17a124") SUBSEQUENT_PEAK_INDEX_UUID = uuid.UUID("7e37325b-6681-4623-b192-39f154350f36") SUBSEQUENT_VALLEY_INDEX_UUID = uuid.UUID("fd47ba6b-ee4d-4674-9a89-56e0db7f3d97") BESSEL_BANDPASS_UUID = uuid.UUID("0ecf0e52-0a29-453f-a6ff-46f5ec3ae783") BESSEL_LOWPASS_10_UUID = uuid.UUID("7d64cac3-b841-4912-b734-c0cf20a81e7a") BESSEL_LOWPASS_30_UUID = uuid.UUID("eee66c75-4dc4-4eb4-8d48-6c608bf28d91") BUTTERWORTH_LOWPASS_30_UUID = uuid.UUID("de8d8cef-65bf-4119-ada7-bdecbbaa897a") # General mangetic field to force conversion factor. Obtained 03/09/2021 by <NAME>, Valid as of 11/19/21 MILLIMETERS_PER_MILLITESLA = 23.25 NEWTONS_PER_MILLIMETER = 0.000159 # Beta 1 GMR to magnetic field conversion values. Valid as of 11/19/21 MILLIVOLTS_PER_MILLITESLA = 1073.6 # Obtained 03/09/2021 by <NAME> MIDSCALE_CODE = 0x800000 RAW_TO_SIGNED_CONVERSION_VALUE = 2 ** 23 # subtract this value from raw hardware data REFERENCE_VOLTAGE = 2.5 ADC_GAIN = 2 # Beta 2 Memsic to magnetic field conversion factors. Valid as of 11/19/21 MEMSIC_CENTER_OFFSET = 2 ** 15 MEMSIC_MSB = 2 ** 16 MEMSIC_FULL_SCALE = 16 GAUSS_PER_MILLITESLA = 10 MIN_NUMBER_PEAKS = 3 MIN_NUMBER_VALLEYS = 3 """ pulse3D constants """ METADATA_EXCEL_SHEET_NAME = "metadata" METADATA_RECORDING_ROW_START = 0 METADATA_INSTRUMENT_ROW_START = METADATA_RECORDING_ROW_START + 4 METADATA_OUTPUT_FILE_ROW_START = METADATA_INSTRUMENT_ROW_START + 6 CONTINUOUS_WAVEFORM_SHEET_NAME = "continuous-waveforms" AGGREGATE_METRICS_SHEET_NAME = "aggregate-metrics" PER_TWITCH_METRICS_SHEET_NAME = "per-twitch-metrics" NUMBER_OF_PER_TWITCH_METRICS = 45 SNAPSHOT_CHART_SHEET_NAME = "continuous-waveform-snapshots" FULL_CHART_SHEET_NAME = "full-continuous-waveform-plots" TWITCH_FREQUENCIES_CHART_SHEET_NAME = "twitch-frequencies-plots" FORCE_FREQUENCY_RELATIONSHIP_SHEET = "force-frequency-relationship" INTERPOLATED_DATA_PERIOD_SECONDS = 1 / 100 INTERPOLATED_DATA_PERIOD_US = INTERPOLATED_DATA_PERIOD_SECONDS * MICRO_TO_BASE_CONVERSION TSP_TO_DEFAULT_FILTER_UUID = { # Tissue Sampling Period (µs) to default Pipeline Filter UUID 9600: BESSEL_LOWPASS_10_UUID, 1600: BUTTERWORTH_LOWPASS_30_UUID, } DEFAULT_CELL_WIDTH = 64 CHART_ALPHA = 60 # for full/snapshots -- num pixels between left figure edge and plot area CHART_GAMMA = 150 # for full/snapshots -- num pixels between right figure edge and plot area CHART_PIXELS_PER_SECOND = 35 # for full/snapshots -- number of pixels per second CHART_MAXIMUM_SNAPSHOT_LENGTH = 10 CHART_HEIGHT = 300 CHART_HEIGHT_CELLS = 15 CHART_FIXED_WIDTH_CELLS = 8 CHART_FIXED_WIDTH = DEFAULT_CELL_WIDTH * CHART_FIXED_WIDTH_CELLS PEAK_VALLEY_COLUMN_START = 100 SECONDS_PER_CELL = 2.5 CALCULATED_METRIC_DISPLAY_NAMES = { TWITCH_PERIOD_UUID: "Twitch Period (seconds)", TWITCH_FREQUENCY_UUID: "Twitch Frequency (Hz)", AMPLITUDE_UUID: "Active Twitch Force (μN)", FRACTION_MAX_UUID: "Fraction of Maximum Active Twitch Force (μN)", AUC_UUID: "Energy (μJ)", CONTRACTION_VELOCITY_UUID: "Twitch Contraction Velocity (μN/second)", RELAXATION_VELOCITY_UUID: "Twitch Relaxation Velocity (μN/second)", IRREGULARITY_INTERVAL_UUID: "Twitch Interval Irregularity (seconds)", TIME_DIFFERENCE_UUID: "Time Difference (seconds)", WIDTH_UUID: "Twitch Width {} (seconds)", RELAXATION_TIME_UUID: "Time From Peak to Relaxation {} (seconds)", CONTRACTION_TIME_UUID: "Time From Contraction {} to Peak (seconds)", BASELINE_TO_PEAK_UUID: "Time From Baseline to Peak (seconds)", PEAK_TO_BASELINE_UUID: "Time From Peak to Baseline (seconds)", } CALCULATED_METRICS = immutabledict( { "by_width": (WIDTH_UUID, CONTRACTION_TIME_UUID, RELAXATION_TIME_UUID), "scalar": ( AMPLITUDE_UUID, AUC_UUID, BASELINE_TO_PEAK_UUID, CONTRACTION_VELOCITY_UUID, FRACTION_MAX_UUID, IRREGULARITY_INTERVAL_UUID, PEAK_TO_BASELINE_UUID, RELAXATION_VELOCITY_UUID, TWITCH_FREQUENCY_UUID, TWITCH_PERIOD_UUID, ), } ) COORDS = (10, 25, 50, 75, 90) TWITCH_WIDTH_METRIC_DISPLAY_NAMES: Dict[int, str] = immutabledict( (coord, f"Twitch Width {coord} (seconds)") for coord in reversed(COORDS) ) CONTRACTION_COORDINATES_DISPLAY_NAMES: Dict[int, str] = immutabledict( (coord, f"Contraction Coordinates {coord}") for coord in reversed(COORDS) ) RELAXATION_COORDINATES_DISPLAY_NAMES: Dict[int, str] = immutabledict( (coord, f"Relaxation Coordinates {coord}") for coord in COORDS ) CONTRACTION_TIME_DIFFERENCE_DISPLAY_NAMES: Dict[int, str] = immutabledict( (coord, f"Time From Contraction {coord} to Peak (seconds)") for coord in reversed(COORDS) ) RELAXATION_TIME_DIFFERENCE_DISPLAY_NAMES: Dict[int, str] = immutabledict( (coord, f"Time From Peak to Relaxation {coord} (seconds)") for coord in COORDS ) ALL_FORMATS = immutabledict({"CoV": {"num_format": "0.00%"}}) TWITCHES_POINT_UP_UUID = uuid.UUID("97f69f56-f1c6-4c50-8590-7332570ed3c5") INTERPOLATION_VALUE_UUID = uuid.UUID("466d0131-06b7-4f0f-ba1e-062a771cb280") mutable_metadata_uuid_descriptions = dict( METADATA_UUID_DESCRIPTIONS ) # create a mutable version to add in the new values specific to the SDK (.update is an in-place operation that doesn't return the dictionary, so chaining is difficult) mutable_metadata_uuid_descriptions.update( { TWITCHES_POINT_UP_UUID: "Flag indicating whether or not the twitches in the data point up or not", INTERPOLATION_VALUE_UUID: "Desired value for optical well data interpolation", } ) METADATA_UUID_DESCRIPTIONS = immutabledict(mutable_metadata_uuid_descriptions) EXCEL_OPTICAL_METADATA_CELLS = immutabledict( { WELL_NAME_UUID: "E2", UTC_BEGINNING_RECORDING_UUID: "E3", PLATE_BARCODE_UUID: "E4", TISSUE_SAMPLING_PERIOD_UUID: "E5", TWITCHES_POINT_UP_UUID: "E6", MANTARRAY_SERIAL_NUMBER_UUID: "E7", INTERPOLATION_VALUE_UUID: "E8", } ) """ Magnet Finding """ # 10 seconds at sampling rate of 100Hz BASELINE_MEAN_NUM_DATA_POINTS = 10 * 100
1.960938
2
HS110Influx.py
GiantMolecularCloud/HS110Influx
0
12792060
<filename>HS110Influx.py #################################################################################################### # log FritzBox to InfluxDB #################################################################################################### """ FritzBox to InfluxDB author: <NAME> (GiantMolecularCloud) This script uses environment variables for authentification and settings: HS110_IP IP address of the HS110 HS110_PORT port to use for the connection, default: 9999 INFLUX_IP IP address of the machine InfluxDB is running on, default: 127.0.0.1 INFLUX_PORT port to connect to InfluxDB, default: 8086 INFLUX_USER user to access the InfluxDB database, default: root INFLUX_PASSWD password to access the InfluxDB database, default: <PASSWORD> INFLUX_DB Database to write the measurements to, default: HS110 SAMPLE_TIME time to wait before getting the next sample, default: 60 """ #################################################################################################### # imports #################################################################################################### import os import time from influxdb import InfluxDBClient import influxdb.exceptions as inexc #################################################################################################### # settings #################################################################################################### # read in environment variables, set some defaults if env vars are not defined HS110_IP = os.getenv('HS110_IP') HS110_PORT = int(os.getenv('HS110_PORT') or 9999) INFLUX_IP = os.getenv('INFLUX_IP') or '127.0.0.1' INFLUX_PORT = int(os.getenv('INFLUX_PORT') or 8086) INFLUX_USER = os.getenv('INFLUX_USER') or 'root' INFLUX_PASSWD = os.getenv('INFLUX_PASSWD') or '<PASSWORD>' INFLUX_DB = os.getenv('INFLUX_DB') or 'HS110' SAMPLE_TIME = int(os.getenv('SAMPLE_TIME') or 60) #################################################################################################### # helper functions #################################################################################################### class HS110: def __init__(self, ip, port): self.ip = ip self.port = port def encrypt(self,string): """ Encrypt the TP-Link Smart Home Protocoll: XOR Autokey Cipher with starting key = 171 This follows: https://github.com/softScheck/tplink-smartplug """ from struct import pack key = 171 result = pack('>I', len(string)) for i in string: a = key ^ ord(i) key = a result += bytes([a]) return result def decrypt(self): """ Decrypt the TP-Link Smart Home Protocoll: XOR Autokey Cipher with starting key = 171 This follows: https://github.com/softScheck/tplink-smartplug """ key = 171 self.decrypted = "" for i in self.encrypted[4:]: a = key ^ i key = i self.decrypted += chr(a) def get_raw(self): """ connect to HS110, send payload and receive power data """ import socket try: sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_tcp.settimeout(int(10)) sock_tcp.connect((self.ip, self.port)) sock_tcp.settimeout(None) sock_tcp.send(self.encrypt('{"emeter":{"get_realtime":{}}}')) self.encrypted = sock_tcp.recv(2048) sock_tcp.close() except: raise ConnectionError("Could not connect to HS110 at IP "+str(self.ip)+" on port "+str(self.port)) def decrypt_power(self): """ decrypt power data and convert to Volts, Ampere, Watt, kWh """ import json try: self.decrypt() decrypt_dict = json.loads(self.decrypted) self.data = {'voltage': decrypt_dict['emeter']['get_realtime']['voltage_mv']/1000, # V 'current': decrypt_dict['emeter']['get_realtime']['current_ma']/1000, # A 'power': decrypt_dict['emeter']['get_realtime']['power_mw']/1000, # W 'energy_total': decrypt_dict['emeter']['get_realtime']['total_wh']/1000, # kWh 'error_code': decrypt_dict['emeter']['get_realtime']['err_code'] } except: raise TypeError("Could not decrypt returned data.") def error_data(self): """ In case of an error set all data to None and return error code 9999 This error code is presumably not used by TP-Link, so I highjack this metric to let '9999' denote errors within HS110Influx. """ self.data = {'voltage': None, 'current': None, 'power': None, 'energy_total': None, 'error_code': 9999 } def poll(self): """ Poll the HS110 and format the data to be sent to InfluxDB. """ from datetime import datetime self.polltime = datetime.utcnow().isoformat() try: self.get_raw() self.decrypt_power() except ConnectionError: print(polltime, " Error contacting HS110.") self.error_data except TypeError: print(polltime, " Error decrypting data") self.error_data except Exception: print(polltime, " Unknown error.") self.error_data return [{'measurement': 'power', 'tags': {'sensor': 'HS110'}, 'time': self.polltime, 'fields': self.data }] #################################################################################################### # Initialize #################################################################################################### # Set up HS110 HS = HS110(HS110_IP, HS110_PORT) # connect to InfluxDB client = InfluxDBClient(host = INFLUX_IP, port = INFLUX_PORT, username = INFLUX_USER, password = <PASSWORD> ) # create new database if necessary if not INFLUX_DB in [db['name'] for db in client.get_list_database()]: client.create_database(INFLUX_DB) # select current database client.switch_database(INFLUX_DB) #################################################################################################### # Send data to influxdb #################################################################################################### def write_database(client, data): """ Writes a given data record to the database and prints unexpected results. Copy/paste from my homeclimate code. """ from datetime import datetime try: iresponse = client.write_points(data) if not iresponse: print("Sending data to database failed. Response: ", iresponse) except inexc.InfluxDBServerError as e: print(datetime.utcnow().isoformat(), " Sending data to database failed due to timeout.\n", e) pass except Exception as e: print(datetime.utcnow().isoformat(), " Encountered unknown error.\n", e) pass #################################################################################################### # Continuously take data #################################################################################################### try: while True: try: write_database(client = client, data = HS.poll() ) except Exception as e: print(e) finally: time.sleep(SAMPLE_TIME) except KeyboardInterrupt: print (datetime.now(), " Program stopped by keyboard interrupt [CTRL_C] by user. ") ####################################################################################################
2.5625
3
international/middleware.py
project-cece/django-international-sites
3
12792061
<reponame>project-cece/django-international-sites<filename>international/middleware.py<gh_stars>1-10 from django.utils.deprecation import MiddlewareMixin from django.utils import translation from django.conf import settings from .models import CountrySite class InternationalSiteMiddleware(MiddlewareMixin): """ Middleware that sets `country` attribute to request object. """ def process_request(self, request): request.country_site = CountrySite.objects.get_current(request) # Set language based on country site if wanted if (getattr(settings, "FORCE_COUNTRY_LANGUAGE", False)): default_language = request.country_site.default_language if request.LANGUAGE_CODE != default_language: translation.activate(default_language) request.LANGUAGE_CODE = translation.get_language() def process_response(self, request, response): local = request.COOKIES.get("local", "") country_code = request.country_site.country_code # For use by js frontend if local != country_code: response.set_cookie("local", country_code) request.session["local"] = country_code return response
2.125
2
exoskeleton/__init__.py
RuedigerVoigt/exoskeleton
22
12792062
<reponame>RuedigerVoigt/exoskeleton #!/usr/bin/env python3 # -*- coding: utf-8 -*- "Exoskeleton Crawler Framwork for Python" from exoskeleton.__main__ import Exoskeleton from exoskeleton import _version NAME = "exoskeleton" __version__ = _version.__version__ __author__ = "<NAME>"
0.972656
1
numbamisc/utils/__init__.py
MSeifert04/numbamisc
4
12792063
<filename>numbamisc/utils/__init__.py from ._generatefilters import *
1.078125
1
setup.py
cm107/logger
0
12792064
<gh_stars>0 from setuptools import setup, find_packages import logger as pkg packages = find_packages( where='.', include=['logger*'] ) with open("README.md", "r") as fh: long_description = fh.read() setup( name='pyclay_logger', version=pkg.__version__, description='logger library', long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/cm107/logger", author='<NAME>', author_email='<EMAIL>', license='MIT License', packages=packages, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], install_requires=[ 'pylint>=2.4.2', 'twine>=3.1.1' ], python_requires='>=3.6' )
1.5
2
app/examplereplacer.py
aravindvnair99/Natural-Language-Processing-Project
0
12792065
<reponame>aravindvnair99/Natural-Language-Processing-Project from nltk.corpus import wordnet from nltk.tokenize import word_tokenize class AntonymReplacer(object): def replace(self, word): ant = list() for syn in wordnet.synsets(word): for lemma in syn.lemmas(): if lemma.antonyms(): ant.append(lemma.antonyms()[0].name()) if len(ant) >= 1: return ant[0] else: return None def negreplace(self, string): i = 0 sent = word_tokenize(string) len_sent = len(sent) words = [] while i < len_sent: word = sent[i] if word == 'not' and i + 1 < len_sent: ant = self.replace(sent[i + 1]) if ant: words.append(ant) i += 2 continue words.append(word) i += 1 return words
3.578125
4
tests/test_api.py
jchen0506/bse-staging
0
12792066
from flask import current_app import pytest import json from base64 import b64encode import basis_set_exchange as bse headers = {'Content-Type': 'application/json'} def get_ref_formats(): return [(format) for format in bse.get_reference_formats()] @pytest.mark.usefixtures("app", "client", autouse=True) # to use fixtures from conftest class TestAPIs(object): """ Testing the APIs by connecting to the flask app from a client. """ @classmethod def setup_class(cls): cls.api_url = '/api/' cls.template_url = '/' def test_app_exists(self): assert current_app is not None def get_api_headers(self, username, password): return { 'Authorization': 'Basic ' + b64encode( (username + ':' + password).encode('utf-8')).decode('utf-8'), 'Accept': 'application/json', 'Content-Type': 'application/json' } def test_get_formats(self, client): """Get the supported formats of the basis sets """ response = client.get(self.api_url + 'formats/') assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert type(data) == dict assert data['gamess_us'] == 'GAMESS US' def test_get_references_formats(self, client): """Get the supported references formats """ response = client.get(self.api_url + 'reference_formats/') assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert type(data) == dict assert data['bib'] == 'BibTeX' def test_get_metadata(self, client): """Get the bs metadata """ response = client.get(self.api_url + 'metadata/') assert response.status_code == 200 data = json.loads(response.get_data(as_text=True)) assert type(data) == dict # get the basis data of any basis set basis_set_name = list(data.keys())[0] basis_set = data[basis_set_name] assert 'auxiliaries' in basis_set assert 'functiontypes' in basis_set assert 'latest_version' in basis_set assert 'display_name' in basis_set assert 'family' in basis_set assert 'role' in basis_set @pytest.mark.parametrize('bs_format,output',[ ('gaussian94', 'Basis set: 3-21G'), ('json', '"name": "3-21G"') ]) def test_get_simple_basis(self, bs_format, output, client): """Get a simple basis set""" bs_name = '3-21g' url = self.api_url + 'basis/{}/format/{}/'.format(bs_name, bs_format) response = client.get(url) assert response.status_code == 200 data = response.get_data(as_text=True) assert output in data if bs_format == 'json': assert json.loads(data) def test_get_basis_elements(self, client): """Get a simple basis set""" bs_name = '3-21g' bs_format = 'gaussian94' params = dict(elements='1,3') url = self.api_url + 'basis/{}/format/{}/'.format(bs_name, bs_format) response = client.get(url, query_string=params) assert response.status_code == 200 data = response.get_data(as_text=True) assert 'Basis set: 3-21G' in data assert 'H' in data and 'Li' in data @pytest.mark.parametrize('rf_format', get_ref_formats()) def test_get_references(self, rf_format, client): """Get references for a basis set with different formats""" bs_name = '3-21g' params = dict(elements='1,3') url = self.api_url + 'references/{}/format/{}/'.format(bs_name, rf_format) print(url) response = client.get(url, query_string=params) assert response.status_code == 200 data = response.get_data(as_text=True) assert data if rf_format == 'json': assert json.loads(data) # without elements response = client.get(url) assert response.status_code == 200 def test_get_notes(self, client): """Get notes of a basis set""" bs_name = '3-21g' url = self.api_url + 'notes/{}/'.format(bs_name) response = client.get(url) assert response.status_code == 200 assert response.get_data(as_text=True) @pytest.mark.parametrize('family_name', ['pople', 'sto']) def test_bs_family_notes(self, family_name, client): """Get basis set family notes""" url = self.api_url + 'family_notes/{}/'.format(family_name) response = client.get(url) assert response.status_code == 200 assert response.get_data(as_text=True) @pytest.mark.parametrize('bs_format', bse.get_formats().keys()) @pytest.mark.parametrize('archive_type', bse.get_archive_types().keys()) def test_download(self, bs_format, archive_type, client): """Get basis set family notes""" ver = bse.version() url = self.api_url + 'download/{}/{}/{}'.format(ver, bs_format, archive_type) response = client.head(url) assert response.status_code == 200
2.265625
2
Flask_app2/app/app.py
npayneau/gesture-detection
1
12792067
from flask import Flask, render_template, Response, request from video import Video import requests app = Flask(__name__) vid=Video(0) geste = "" data = "" def gen(): global geste while True: frame, geste = vid.get_frame() r=requests.post('http://localhost:8090/getAPI',data={'geste':geste,'position':'0123'}) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') @app.route('/') def video(): return render_template('index.html',geste = geste) @app.route('/pptDisplay') def ppt(): return render_template('pptDisplay.html') @app.route('/video_feed') def video_feed(): return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/geste') def geste(): r = requests.post('http://localhost:8090/getAPI', data={'geste': "Main Ouverte", 'position': '0123'}) return("geste") @app.route('/data',methods=['GET','POST']) def data(): global data if request.method=='POST': data=request return(request) if request.method=='GET': return(data) if __name__ == '__main__': app.run(debug=False, port=8080)
2.78125
3
phi/sensors/misc/dae_ip32in.py
alttch/eva-phi
1
12792068
__author__ = "Altertech Group, https://www.altertech.com/" __copyright__ = "Copyright (C) 2012-2018 Altertech Group" __license__ = "Apache License 2.0" __version__ = "2.0.0" __description__ = "Denkovi smartDEN IP-32IN" __api__ = 4 __required__ = ['value', 'events'] __mods_required__ = [] __lpi_default__ = 'sensor' __equipment__ = 'smartDEN IP-32IN' __features__ = [] __config_help__ = [{ 'name': 'host', 'help': 'module host/ip', 'type': 'str', 'required': True }, { 'name': 'community', 'help': 'snmp default community (default: public)', 'type': 'str', 'required': False }, { 'name': 'retries', 'help': 'snmp retry attemps (default: 0)', 'type': 'int', 'required': False }] __get_help__ = [] __set_help__ = [] __help__ = """ PHI for Denkovi smartDEN IP-32IN Sensors should have port set 1-16 for digital inputs, a1-a8 for analog inputs, t1-8 for temperature inputs. DIN events can be received by SNMP traps. For production it is recommended to install python "python3-netsnmp" module. """ try: import netsnmp except: netsnmp = None from eva.uc.drivers.phi.generic_phi import PHI as GenericPHI from eva.uc.driverapi import log_traceback from eva.uc.driverapi import get_timeout from eva.uc.driverapi import handle_phi_event from eva.tools import parse_host_port import eva.uc.drivers.tools.snmp as snmp import eva.traphandler from eva.uc.driverapi import phi_constructor class PHI(GenericPHI): @phi_constructor def __init__(self, **kwargs): self.snmp_host, self.snmp_port = parse_host_port( self.phi_cfg.get('host'), 161) self.port_state = {} if not self.snmp_host: self.log_error('no host specified') self.ready = False self.community = self.phi_cfg.get('community') if self.phi_cfg.get( 'community') else 'public' try: self.snmp_tries = int(self.phi_get('retries')) + 1 except: self.snmp_tries = 1 self.oid_din = '.1.3.6.1.4.1.42505.7.2.1.1.7' self.oid_ain = '.1.3.6.1.4.1.42505.7.2.2.1.6' self.oid_temp = '.1.3.6.1.4.1.42505.7.2.3.1.7' self.oid_name = '.1.3.6.1.4.1.42505.7.1.1.0' self.oid_version = '.1.3.6.1.4.1.42505.7.1.2.0' def start(self): eva.traphandler.subscribe(self) def stop(self): eva.traphandler.unsubscribe(self) def get(self, port=None, cfg=None, timeout=0): if cfg: host, snmp_port = parse_host_port(cfg.get('host'), 161) community = cfg.get('community') tries = cfg.get('retries') try: tries = int(tries) + 1 except: tries = None else: host = None community = None tries = None if not host: host = self.snmp_host snmp_port = self.snmp_port if not community: community = self.community if tries is None: tries = self.snmp_tries if not host or not community: return None _timeout = timeout / tries port = str(port) if port.startswith('a'): oid = self.oid_ain port_max = 8 port = port[1:] ret = 1 elif port.startswith('t'): oid = self.oid_temp port_max = 8 port = port[1:] ret = 2 else: oid = self.oid_din port_max = 16 ret = 0 try: port = int(port) except: return None if port < 1 or port > port_max: return None if netsnmp: try: sess = netsnmp.Session(Version=2, DestHost=host, RemotePort=snmp_port, Community=community, Timeout=int(_timeout * 1000000), Retries=self.snmp_tries - 1) o = netsnmp.VarList('%s.%u' % (oid, port - 1)) result = sess.get(o)[0].decode() except Exception as e: self.log_error(e) log_traceback() return None else: result = snmp.get('%s.%u' % (oid, port - 1), host, snmp_port, community, _timeout, tries - 1, rf=int) if ret == 0: return result elif ret == 1: return int(result) / 100 elif ret == 2: return None if result == '---' else result def get_ports(self): l = self.generate_port_list(port_max=16, name='DIN port #{}', description='digital input port #{}') for i in range(1, 9): l.append({ 'port': 'a{}'.format(i), 'name': 'AIN port #{}'.format(i), 'description': 'analog input port #{}'.format(i) }) for i in range(1, 9): l.append({ 'port': 't{}'.format(i), 'name': 'Temp port #{}'.format(i), 'description': 'temperature input port #{}'.format(i) }) return l def process_snmp_trap(self, host, data): if host != self.snmp_host: return if data.get('1.3.6.1.6.3.1.1.4.1.0') != '1.3.6.1.4.1.42505.7.0.1': return for i in range(16): value = data.get('1.3.6.1.4.1.42505.7.2.1.1.7.{}'.format(i)) if value: port = 'din{}'.format(i + 1) self.log_debug('event {} = {}'.format(port, value)) self.port_state[port] = value handle_phi_event(self, port, {port: value}) return def test(self, cmd=None): if cmd == 'module': return 'default' if not netsnmp else 'netsnmp' if cmd == 'self' and self.snmp_host is None: return 'OK' if cmd == 'info' or cmd == 'self': if netsnmp: try: sess = netsnmp.Session(Version=2, DestHost=self.snmp_host, RemotePort=self.snmp_port, Community=self.community, Timeout=int(get_timeout() * 1000000), Retries=self.snmp_tries - 1) except: log_traceback() sess = None if netsnmp: try: name = sess.get(netsnmp.VarList(self.oid_name))[0].decode() except: log_traceback() name = None else: name = snmp.get(self.oid_name, self.snmp_host, self.snmp_port, self.community, timeout=get_timeout(), retries=self.snmp_tries - 1) if not name: return 'FAILED' if name and cmd == 'self': return 'OK' if netsnmp: try: version = sess.get(netsnmp.VarList( self.oid_version))[0].decode() except: version = None else: version = snmp.get(self.oid_version, self.snmp_host, self.snmp_port, self.community, timeout=get_timeout()) if not version: return 'FAILED' return '%s %s' % (name.strip(), version.strip()) return { 'info': 'returns relay ip module name and version', 'module': 'current SNMP module' }
1.65625
2
app/main/forms.py
martamatos/kinetics_db
0
12792069
<filename>app/main/forms.py import re from flask_wtf import FlaskForm from wtforms import FloatField, IntegerField, SelectField, StringField, SubmitField, TextAreaField from wtforms.ext.sqlalchemy.fields import QuerySelectField, QuerySelectMultipleField from wtforms.validators import ValidationError, DataRequired, Length, Optional from flask_wtf.file import FileField, FileRequired from app.models import Compartment, Enzyme, EvidenceLevel, Mechanism, Model, Organism, Reaction, User, \ EnzymeReactionOrganism, EnzymeReactionInhibition, EnzymeReactionActivation, \ EnzymeReactionEffector, ModelAssumptions, EnzymeReactionMiscInfo, Metabolite from app.utils.parsers import parse_input_list, ReactionParser def get_compartments(): return Compartment.query def get_enzymes(): return Enzyme.query def get_enzyme_activations(): return EnzymeReactionActivation.query def get_enzyme_effectors(): return EnzymeReactionEffector.query def get_enzyme_inhibitions(): return EnzymeReactionInhibition.query def get_enzyme_misc_infos(): return EnzymeReactionMiscInfo.query def get_enzyme_reaction_organisms(): return EnzymeReactionOrganism.query def get_evidence_names(): return EvidenceLevel.query def get_mechanisms(): return Mechanism.query def get_model_assumptions(): return ModelAssumptions.query def get_models(): return Model.query def get_organisms(): return Organism.query def get_reactions(): return Reaction.query class EditProfileForm(FlaskForm): username = StringField('Username', validators=[DataRequired()]) about_me = TextAreaField('About me', validators=[Length(min=0, max=140)]) submit = SubmitField('Submit') def __init__(self, original_username, *args, **kwargs): super(EditProfileForm, self).__init__(*args, **kwargs) self.original_username = original_username def validate_username(self, username): if username.data != self.original_username: user = User.query.filter_by(username=self.username.data).first() if user is not None: raise ValidationError('Please use a different username.') class PostForm(FlaskForm): post = TextAreaField('Say something', validators=[ DataRequired(), Length(min=1, max=140)]) submit = SubmitField('Submit') class EnzymeForm(FlaskForm): def __init__(self, data=None, flag='insert'): FlaskForm.__init__(self, data=data) self.local_data = data self.flag = flag name = StringField('Enzyme name (e.g. phosphofructokinase) *', validators=[DataRequired()]) acronym = StringField('Enzyme bigg_acronym (eg. PFK) *', validators=[DataRequired()]) isoenzyme = StringField('Isoenzyme (e.g. PFK1) *', validators=[DataRequired()]) ec_number = StringField('EC number *', validators=[DataRequired()]) organism_name = QuerySelectField('Organism name (eg. E coli)', query_factory=get_organisms, allow_blank=True) number_of_active_sites = IntegerField('Number of enzyme active sites (you need to specify the organism first)', validators=[Optional()]) gene_names = StringField('Encoding gene names (you need to specify the organism first)', id='gene_bigg_ids') uniprot_id_list = StringField('Uniprot IDs (you need to specify the organism first) (e.g. Q5FKG6, P21777)') pdb_structure_ids = StringField('PDB structure IDs (you need to specify the organism first) (e.g. 3H8A, 1UCW)') strain = StringField('Strain for the PDB structure', id='strain') submit = SubmitField('Submit') def validate_isoenzyme(self, isoenzyme): if self.flag != 'modify' or isoenzyme.data != self.local_data['isoenzyme']: enzyme_list = Enzyme.query.all() isoenzyme_list = set([enzyme.isoenzyme for enzyme in enzyme_list]) if enzyme_list else {} if isoenzyme.data in isoenzyme_list: raise ValidationError('The isoenzyme you specified already exists. Please choose a different name.') def validate_number_of_active_sites(self, number_of_active_sites): if number_of_active_sites.data and not self.organism_name.data: raise ValidationError('If you specify the number of active sites you must also specify the organism name.') def validate_gene_names(self, gene_names): if gene_names.data and not self.organism_name.data: raise ValidationError('If you specify encoding genes you must also specify the organism name.') def validate_uniprot_id_list(self, uniprot_id_list): if uniprot_id_list.data and not self.organism_name.data: raise ValidationError('If you specify uniprot IDs you must also specify the organism name') def validate_pdb_structure_ids(self, pdb_structure_ids): if pdb_structure_ids.data and not self.organism_name.data: raise ValidationError('If you specify PDB structures you must also specify the organism name') def validate_strain(self, strain): strain_list = parse_input_list(strain.data) pdb_id_list = parse_input_list(self.pdb_structure_ids.data) if len(strain_list) > 1 and len(pdb_id_list) and len(strain_list) != len(pdb_id_list): raise ValidationError( 'When providing PDB IDs either provide:\n-the corresponding strains for each PDB ID;\n-a single strain name\n-or no strain names.') class EnzymeInhibitionForm(FlaskForm): enzyme = QuerySelectField('Isoenzyme *', query_factory=get_enzymes) reaction = QuerySelectField('Reaction *', query_factory=get_reactions) organism = QuerySelectField('Organism *', query_factory=get_organisms) models = QuerySelectMultipleField('Model(s)', query_factory=get_models, allow_blank=True) inhibitor_met = StringField('Inhibiting metabolite (e.g. adp), please use bigg IDs *', validators=[DataRequired()], id='metabolite_list') affected_met = StringField('Affected metabolite (e.g. atp), please use bigg IDs', id='metabolite_list') inhibition_type = SelectField('Inhibition type', choices=[('Unknown', 'Unknown'), ('Competitive', 'Competitive'), ('Uncompetitive', 'Uncompetitive'), ('Noncompetitive', 'Noncompetitive'), ('Mixed', 'Mixed')]) inhibition_constant = FloatField('Inhibition constant (in M)', validators=[Optional()]) inhibition_evidence_level = QuerySelectField('Enzyme inhibition evidence level', query_factory=get_evidence_names, allow_blank=True) references = StringField( 'References, please use DOI (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236)') comments = TextAreaField('Comments') submit = SubmitField('Submit') class EnzymeActivationForm(FlaskForm): enzyme = QuerySelectField('Isoenzyme *', query_factory=get_enzymes, validators=[DataRequired()]) reaction = QuerySelectField('Reaction *', query_factory=get_reactions, validators=[DataRequired()]) organism = QuerySelectField('Organism *', query_factory=get_organisms) models = QuerySelectMultipleField('Model(s)', query_factory=get_models, allow_blank=True) activator_met = StringField('Activating metabolite (e.g. adp), please use bigg IDs *', validators=[DataRequired()], id='metabolite_list') activation_constant = FloatField('Activation constant (in M)', validators=[Optional()]) activation_evidence_level = QuerySelectField('Activation inhibition evidence level', query_factory=get_evidence_names, allow_blank=True) references = StringField( 'References, please use DOI (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236)') comments = TextAreaField('Comments') submit = SubmitField('Submit') class EnzymeEffectorForm(FlaskForm): enzyme = QuerySelectField('Isoenzyme *', query_factory=get_enzymes, validators=[DataRequired()]) reaction = QuerySelectField('Reaction *', query_factory=get_reactions, validators=[DataRequired()]) organism = QuerySelectField('Organism *', query_factory=get_organisms) models = QuerySelectMultipleField('Model(s)', query_factory=get_models, allow_blank=True) effector_met = StringField('Effector metabolite (e.g. adp), please use bigg IDs *', validators=[DataRequired()], id='metabolite_list') effector_type = SelectField('Effector type', choices=[('Activating', 'Activating'), ('Inhibiting', 'Inhibiting')]) effector_evidence_level = QuerySelectField('Effector evidence level', query_factory=get_evidence_names, allow_blank=True) references = StringField( 'References, please use DOI (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236)') comments = TextAreaField('Comments') submit = SubmitField('Submit') class EnzymeMiscInfoForm(FlaskForm): enzyme = QuerySelectField('Isoenzyme *', query_factory=get_enzymes, validators=[DataRequired()]) reaction = QuerySelectField('Reaction *', query_factory=get_reactions, validators=[DataRequired()]) organism = QuerySelectField('Organism *', query_factory=get_organisms) models = QuerySelectMultipleField('Model(s)', query_factory=get_models, allow_blank=True) topic = StringField('Topic (e.g. allostery) *', validators=[DataRequired()]) description = TextAreaField('Description *', validators=[DataRequired()]) evidence_level = QuerySelectField('Evidence level', query_factory=get_evidence_names, allow_blank=True) references = StringField( 'References, please use DOI (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236)') comments = TextAreaField('Comments') submit = SubmitField('Submit') class GeneForm(FlaskForm): name = StringField('Gene name (e.g. pfkA) *', validators=[DataRequired()]) submit = SubmitField('Submit') class MetaboliteForm(FlaskForm): def __init__(self, data=None, flag='insert'): FlaskForm.__init__(self, data=data) self.local_data = data self.flag = flag grasp_id = StringField('Grasp ID (will be shown on the model excel file) *', validators=[DataRequired()]) name = StringField('Name (e.g. pyruvate)') bigg_id = StringField('Bigg ID (e.g. pyr) *', validators=[DataRequired()]) metanetx_id = StringField('MetaNetX ID') compartments = QuerySelectMultipleField('Compartments', query_factory=get_compartments, allow_blank=True) chebi_ids = StringField('ChEBI IDs (e.g. CHEBI:86354, CHEBI:8685)') inchis = StringField( 'InChIs (e.g. InChI=1S/C3H4O3/c1-2(4)3(5)6/h4H,1H2,(H,5,6), InChI=1S/C3H4O4/c1-2(4)3(5)6/h4H,1H2,(H,5,6) )') submit = SubmitField('Submit') def validate_grasp_id(self, grasp_id): if self.flag != 'modify' or grasp_id.data != self.local_data['grasp_id']: metabolite_list = Metabolite.query.all() metabolite_list = set([enzyme.grasp_id for enzyme in metabolite_list]) if metabolite_list else {} if grasp_id.data in metabolite_list: raise ValidationError( 'The metabolite grasp id you specified already exists. Please choose a different one.') def validate_bigg_id(self, bigg_id): if self.flag != 'modify' or bigg_id.data != self.local_data['bigg_id']: metabolite_list = Metabolite.query.all() metabolite_list = set([enzyme.bigg_id for enzyme in metabolite_list]) if metabolite_list else {} if bigg_id.data in metabolite_list: raise ValidationError( 'The metabolite bigg id you specified already exists. Please choose a different one.') def validate_inchis(self, inchis): chebi_list = parse_input_list(self.chebi_ids.data) inchi_list = parse_input_list(inchis.data, False) if len(inchi_list) != len(chebi_list): raise ValidationError( 'The list of ChEBI ids and InChIs should have the same length. Also make sure you separated each value with a comma.') class ModelAssumptionsForm(FlaskForm): model = QuerySelectField('Model *', query_factory=get_models) assumption = StringField('Assumption *', validators=[DataRequired()]) description = TextAreaField('Description *', validators=[DataRequired()]) evidence_level = QuerySelectField('Evidence level', query_factory=get_evidence_names, allow_blank=True) included_in_model = SelectField('Is this assumption included in the model?', choices=[('True', 'True'), ('False', 'False')]) references = StringField( 'References, please use DOI (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236)') comments = TextAreaField('Comments') submit = SubmitField('Submit') class ModelForm(FlaskForm): name = StringField('Model name (e.g. E coli - iteration 1) *', validators=[DataRequired()]) organism_name = StringField('Organism name (e.g. E coli) *', validators=[DataRequired()], id='organism_name') strain = StringField('Organism strain (e.g. MG1655)') enz_rxn_orgs = QuerySelectMultipleField('Reactions in the model', query_factory=get_enzyme_reaction_organisms, allow_blank=True) comments = TextAreaField('Comments') submit = SubmitField('Submit') def validate_name(self, name): model_db = Model.query.filter_by(name=name.data).first() if model_db: raise ValidationError('A model with that name already exists, please use another name') class ModelModifyForm(FlaskForm): def __init__(self, data): FlaskForm.__init__(self, data=data) self.local_data = data name = StringField('Model name (e.g. E coli - iteration 1) *', validators=[DataRequired()]) organism_name = StringField('Organism name (e.g. E coli) *', validators=[DataRequired()], id='organism_name') strain = StringField('Organism strain (e.g. MG1655)') enz_rxn_orgs = QuerySelectMultipleField('Reactions', query_factory=get_enzyme_reaction_organisms, allow_blank=True) model_inhibitions = QuerySelectMultipleField('Enzyme inhibitions', query_factory=get_enzyme_inhibitions, allow_blank=True) model_activations = QuerySelectMultipleField('Enzyme activations', query_factory=get_enzyme_activations, allow_blank=True) model_effectors = QuerySelectMultipleField('Enzyme effectors', query_factory=get_enzyme_effectors, allow_blank=True) model_misc_infos = QuerySelectMultipleField('Enzyme misc info', query_factory=get_enzyme_misc_infos, allow_blank=True) model_assumptions = QuerySelectMultipleField('Model assumptions', query_factory=get_model_assumptions, allow_blank=True) comments = TextAreaField('Comments') submit = SubmitField('Submit') def validate_name(self, name): if name.data != self.local_data['name']: model_db = Model.query.filter_by(name=name.data).first() if model_db: raise ValidationError( 'A model with that name already exists, please use either the original name or another one.') class ModelFormBase(FlaskForm): model_base = QuerySelectField('Models', query_factory=get_models, allow_blank=True) submit = SubmitField('Continue') class OrganismForm(FlaskForm): name = StringField('Organism name (e.g. E coli) *', validators=[DataRequired()]) submit = SubmitField('Submit') def validate_name(self, name): organism_db = Organism.query.filter_by(name=name.data).first() if organism_db: raise ValidationError('An organism with that name already exists, please use another name') class ReactionForm(FlaskForm): def __init__(self, data=None, flag='insert'): FlaskForm.__init__(self, data=data) self.local_data = data self.flag = flag name = StringField('Reaction name (e.g. phosphofructokinase) *', validators=[DataRequired()]) acronym = StringField('Reaction acronym (e.g. PFK) *', validators=[DataRequired()]) grasp_id = StringField('GRASP ID (e.g. PFK1) *', validators=[DataRequired()]) reaction_string = StringField('Reaction string, use Bigg IDs (e.g. 1 pep_c + 1.5 adp_c <-> 1 pyr_c + 2.0 atp_m) *', validators=[DataRequired()]) metanetx_id = StringField('Metanetx ID') bigg_id = StringField('Bigg ID') kegg_id = StringField('Kegg ID') compartment = QuerySelectField('Compartment name', query_factory=get_compartments, allow_blank=True) organism = QuerySelectField('Organism name *', query_factory=get_organisms) models = QuerySelectMultipleField('Model name', query_factory=get_models, allow_blank=True) enzymes = QuerySelectMultipleField('Isoenzyme(s) that catalyze the reaction *', query_factory=get_enzymes, validators=[DataRequired()]) mechanism = QuerySelectField( 'Enzyme mechanism name (if you add the mechanism, you also need to add the isoenzyme(s) that catalyze the reaction)', query_factory=get_mechanisms, allow_blank=True) mechanism_references = StringField( 'DOI for mechanism references (e.g. https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236) ') mechanism_evidence_level = QuerySelectField('Enzyme mechanism evidence level', query_factory=get_evidence_names, allow_blank=True) subs_binding_order = StringField('Substrate binding order (e.g. adp_c, pep_c)') prod_release_order = StringField('Product release order (e.g. atp_c, pyr_c)') std_gibbs_energy = FloatField('Standard Gibbs energy (in kJ/mol)', validators=[Optional()]) std_gibbs_energy_std = FloatField('Standard Gibbs energy standard deviation(in kJ/mol)', validators=[Optional()]) std_gibbs_energy_ph = FloatField('pH for Gibbs energy', validators=[Optional()]) std_gibbs_energy_ionic_strength = FloatField('Ionic strength for Gibbs energy', validators=[Optional()]) std_gibbs_energy_references = StringField( 'Reference for Gibbs energy (if it is equilibrator just type equilibrator, otherwise use DOI, https://doi.org/10.1093/bioinformatics/bty942, http://doi.org/10.5334/jors.236') comments = TextAreaField('Comments') submit = SubmitField('Submit') def validate_enzymes(self, enzymes): if self.flag == 'modify': if len(enzymes.data) > 1: raise ValidationError('Please select one and only one isoenzyme.') def validate_reaction_string(self, reaction_string): reversible, stoichiometry = ReactionParser().parse_reaction(reaction_string.data) # (True, OrderedDict([('m_pep_c', -1.0), ('m_adp_c', -1.5), ('m_pyr_c', 1.0), ('m_atp_m', 2.0)])) for met, stoich_coef in stoichiometry.items(): met_compartment = re.findall('(\w+)_(\w+)', met) if not met_compartment: raise ValidationError( 'Please specify the metabolite' + met + 'as metabolite_compartmentAcronym, e.g. adp_c.') compartment_db = Compartment.query.filter_by(bigg_id=met_compartment[0][1]).first() if not compartment_db: raise ValidationError('The specified compartment bigg_acronym' + met_compartment[0][ 1] + ' is not part of the database, please insert it first.') def validate_mechanism(self, mechanism): if mechanism.data and not self.enzymes.data: raise ValidationError('If you add a reaction mechanism, you need to specify the catalyzing isoenzyme(s).') def validate_mechanism_evidence_level(self, mechanism_evidence_level): if mechanism_evidence_level.data and not self.mechanism.data: raise ValidationError('You cannot specify evidence level for the mechanism without specifying a mechanism.') def validate_subs_binding_order(self, subs_binding_order): if subs_binding_order.data and not self.enzymes.data: raise ValidationError('If you add substrate binding order without specifying the catalyzing isoenzyme(s).') substrate_list = parse_input_list(subs_binding_order.data) for substrate in substrate_list: if self.reaction_string.data.find(substrate) == -1: raise ValidationError( 'The metabolite' + substrate + 'does not match any metabolite in' + self.reaction_string.data + '.') def validate_prod_release_order(self, prod_release_order): if prod_release_order.data and not self.enzymes.data: raise ValidationError('If you add product release order without specifying the catalyzing isoenzyme(s).') product_list = parse_input_list(prod_release_order.data) for product in product_list: if self.reaction_string.data.find(product) == -1: raise ValidationError( 'The metabolite' + product + 'does not match any metabolite in' + self.reaction_string.data + '.') def validate_std_gibbs_energy_std(self, std_gibbs_energy_std): if not self.models.data: raise ValidationError( 'Gibbs energies cannot be added to reactions alone, a model must be associated as well. Please add model name.') if std_gibbs_energy_std.data and not self.std_gibbs_energy.data: raise ValidationError('Please specify the standard Gibbs energy as well.') def validate_std_gibbs_energy_ph(self, std_gibbs_energy_ph): if std_gibbs_energy_ph.data and not self.std_gibbs_energy.data: raise ValidationError('Please specify the standard Gibbs energy as well.') def validate_std_gibbs_energy_ionic_strength(self, std_gibbs_energy_ionic_strength): if std_gibbs_energy_ionic_strength.data and not self.std_gibbs_energy.data: raise ValidationError('Please specify the standard Gibbs energy as well.') def validate_std_gibbs_energy_references(self, std_gibbs_energy_references): if self.std_gibbs_energy.data and not std_gibbs_energy_references.data: raise ValidationError('Please specify the reference for the above standard Gibbs energy.') if std_gibbs_energy_references.data and not self.std_gibbs_energy.data: raise ValidationError('Please specify the standard Gibbs energy as well.') class ModifyDataForm(FlaskForm): submit = SubmitField('Modify') class SelectOrganismForm(FlaskForm): organism = QuerySelectField('Organisms (leave blank if you only want to change the enzyme info).', query_factory=get_organisms, allow_blank=True) submit = SubmitField('Continue') class SelectIsoenzymeForm(FlaskForm): enzyme = QuerySelectMultipleField('Isoenzyme that catalyzes the reaction (select only one) *', query_factory=get_enzymes, validators=[DataRequired()]) submit = SubmitField('Continue') def validate_enzyme(self, enzyme): if len(enzyme.data) != 1: raise ValidationError('Please select one and only one isoenzyme.') class SelectModelForm(FlaskForm): model = QuerySelectMultipleField('Model name *', query_factory=get_models, validators=[DataRequired()]) submit = SubmitField('Continue') def validate_model(self, model): if len(model.data) > 1: raise ValidationError('Please select only one model.') class UploadModelForm(FlaskForm): organism = QuerySelectField('Organism', query_factory=get_organisms, validators=[DataRequired()]) model = FileField('Model', validators=[FileRequired()]) submit = SubmitField('Submit') #def validate_model(self, model): # # make sure all the sheets are there and are not empty # make sure enzyme_Reaction columns have the correct names # return 0 #validate model name # make sure kinetics1 sheet has the right column names
2.421875
2
pysrc/labstreaminglayer_ros/Converter/TransformStamped.py
guthom/labstreaminglayer_ros
0
12792070
<gh_stars>0 from Transform import Transform from labstreaminglayer_ros.msg import LSLTransformStamped as message from geometry_msgs.msg import TransformStamped as stdmessage class TransformStamped(Transform): def __init__(self): super(Transform, self).__init__( commonType="TransformStamped", rosType=message, rosStdType=stdmessage, lslChannels=7, lslType="float32" ) def ToLSL(self, data): translation = data.transform.translation rotation = data.transform.rotation return [translation.x, translation.y, translation.z, rotation.x, rotation.y, rotation.z, rotation.w]
2.140625
2
main.py
arvinzhang815/jd-assistant
2
12792071
<reponame>arvinzhang815/jd-assistant<filename>main.py<gh_stars>1-10 #!/usr/bin/env python # -*- coding:utf-8 -*- from jd_assistant import Assistant if __name__ == '__main__': asst = Assistant() asst.login_by_QRcode() # 扫码登陆 asst.clear_cart() # 清空购物车 asst.add_item_to_cart(sku_id='1626336666') # 添加到购物车 # 3种订单提交方式: # 1.直接提交订单 # asst.submit_order() # 2.有货时提交订单 asst.submit_order_by_stock(sku_id='1626336666', area='1_2802_2821') # 监控的商品id和地址id # 3.定时提交订单 # asst.submit_order_by_time(buy_time='2018-10-19 00:00:00.500', retry=3, interval=5) asst.get_order_info(unpaid=False) # 查询未付款订单 """ 输出实例: [2018-10-19 02:38:58] 登录成功 [2018-10-19 02:38:58] 购物车清空成功 [2018-10-19 02:38:58] 1626336666已成功加入购物车 [2018-10-19 02:38:59] 1626336666有货了,正在提交订单…… [2018-10-19 02:38:59] 订单提交成功! 订单号:811545xxxxx ************************订单列表页查询************************ 订单号:811545xxxxx----下单时间:2018-10-19 02:38:59----商品列表:1626336666 x 1----订单状态:等待付款----总金额:89.90元----付款方式:在线支付 """
2.109375
2
api/dataloader.py
clutso/sampleAPI
0
12792072
from datahandler import ConnData import pandas import os #influx import influxdb_client from influxdb_client.client.write_api import SYNCHRONOUS class DataLoader(ConnData): def init(self, fileName): data = pandas.read_csv(fileName) write_api = self.influx_client.write_api(write_options=SYNCHRONOUS) for i in range (len(data)): try: p = influxdb_client.Point("test_Mote")\ .tag("type", "multi_sensor_dev") \ .field("power", data["power"][i])\ .field("temp", data["temp"][i])\ .field("humidity", data["humidity"][i])\ .field("light", data["light"][i])\ .field("CO2", data["CO2"][i])\ .field("dust", data["dust"][i])\ .time(data["time"][i]) write_api.write(bucket=self.influx_bucket, org=self.influx_org, record=p) except: print("failed at entry: "+ str(i)) continue return True
2.546875
3
02. Defining Classes - Exercise/guild_system_07/project/guild.py
elenaborisova/Python-OOP
1
12792073
<gh_stars>1-10 from guild_system_07.project.player import Player class Guild: def __init__(self, name: str): self.name = name self.players: list = [] def assign_player(self, player: Player): if player in self.players: return f"Player {player.name} is already in the guild." if not player.guild == "Unaffiliated": return f"Player {player.name} is in another guild." self.players.append(player) player.guild = self.name return f"Welcome player {player.name} to the guild {self.name}" def kick_player(self, player_name: str): if player_name not in [player.name for player in self.players]: return f"Player {player_name} is not in the guild." player = [player for player in self.players if player.name == player_name][0] self.players.remove(player) return f"Player {player.name} has been removed from the guild." def guild_info(self): players_details = "".join([player.player_info() for player in self.players]) return f"Guild: {self.name}\n" \ f"{players_details}" player = Player("George", 50, 100) print(player.add_skill("Shield Break", 20)) print(player.player_info()) guild = Guild("UGT") print(guild.assign_player(player)) print(guild.guild_info())
3.359375
3
test/test_get_transaction_details_by_transaction_id_response_item_blockchain_specific.py
xan187/Crypto_APIs_2.0_SDK_Python
0
12792074
<gh_stars>0 """ CryptoAPIs Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501 The version of the OpenAPI document: 2.0.0 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ import sys import unittest import cryptoapis from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_bitcoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_bitcoin_cash import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash_vin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dash_vout import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_dogecoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum_classic import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_ethereum_classic_gas_price import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific_litecoin import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificBitcoinCash globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDash globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDashVout globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificDogecoin globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereum globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassic globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificEthereumClassicGasPrice globals()['GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin'] = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecificLitecoin from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_blockchain_specific import GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific class TestGetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific(unittest.TestCase): """GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific unit test stubs""" def setUp(self): pass def tearDown(self): pass def testGetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific(self): """Test GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific""" # FIXME: construct object with mandatory attributes with example values # model = GetTransactionDetailsByTransactionIDResponseItemBlockchainSpecific() # noqa: E501 pass if __name__ == '__main__': unittest.main()
1.585938
2
src/a_tuin/metadata/field_group.py
gordon-elliott/glod
0
12792075
<gh_stars>0 __copyright__ = 'Copyright(c) <NAME> 2017' """ """ import logging from collections import OrderedDict from operator import getitem, setitem from a_tuin.metadata.exceptions import FieldAssignmentError, field_errors_check, DATA_LOAD_ERRORS from a_tuin.metadata.field_transformations import copy_field LOG = logging.getLogger(__name__) class FieldGroup(object): def __init__(self, fields, container_type): self._fields = fields self._container_type = container_type def __iter__(self): self._current_field_index = 0 return self def __next__(self): if self._current_field_index >= len(self._fields): raise StopIteration result = self._fields[self._current_field_index] self._current_field_index += 1 return result def __len__(self): return len(self._fields) def __getitem__(self, key): for field in self._fields: if field.name == key: return field raise KeyError('{} not found.'.format(key)) def __setitem__(self, key, replacement_field): assert key == replacement_field.name, 'Key does not match field name.' # replace field for i, field in enumerate(self._fields): if field.name == key: self._fields[i] = replacement_field break def derive(self, transformation=None, field_group_class=None): transformation = copy_field if transformation is None else transformation field_group_class = field_group_class if field_group_class else self.__class__ return field_group_class( [ field.derive(transformation) for field in self._fields ] ) def fill_instance_from_dict(self, input_dict): raise NotImplementedError def update_instance_from_dict(self, instance, input_dict): raise NotImplementedError def iterate_instance(self, instance, per_field_map): with field_errors_check() as errors: for field, destination_field in per_field_map.items(): try: yield field, self._get_value(instance, field), destination_field except FieldAssignmentError as field_error: errors.append(field_error) except Exception as ex: errors.append(FieldAssignmentError(field, ex)) def _get_field_index(self, field): for i, item in enumerate(self._fields): if item == field: return i, item return None, None def _type_cast(self, input_dict): cast_values = [] with field_errors_check() as errors: for field in self._fields: try: if field.name in input_dict: raw_value = input_dict[field.name] else: raw_value = field.get_value(self, input_dict) cast_values.append((field.name, field.type_cast(raw_value))) except FieldAssignmentError as fae: errors.append(fae) except DATA_LOAD_ERRORS as ex: errors.append(FieldAssignmentError(field, ex)) return OrderedDict(cast_values) def _group_get_value(self, instance, field): raise NotImplementedError def _get_value(self, instance, field): if field.is_computed: return field.get_value(self, instance) else: return self._group_get_value(instance, field) def _accessor(self, instance, key): raise NotImplementedError # # Used in unittests # def as_dict(self, instance): return { field.name: self._get_value(instance, field) for field in self._fields } def instances_differ(self, instance, other): return any( self._get_value(instance, field) != self._get_value(other, field) for field in self._fields ) class SequenceFieldGroup(FieldGroup): def fill_instance_from_dict(self, input_dict): values_in_order = self._type_cast(input_dict) return self._container_type(values_in_order.values()) def _accessor(self, instance, key): return getitem(instance, key) def _group_get_value(self, instance, field): index, _ = self._get_field_index(field) return self._accessor(instance, index) class TupleFieldGroup(SequenceFieldGroup): def __init__(self, fields): super().__init__(fields, tuple) class MutableSequenceFieldGroup(FieldGroup): def update_instance_from_dict(self, instance, input_dict): with field_errors_check() as errors: for field_name, value in input_dict.items(): field = self[field_name] try: self.set_value(instance, field, value) except FieldAssignmentError as fae: if isinstance(fae.original_exception, AttributeError): LOG.warning('Unable to assign {} to {}'.format(value, field_name)) else: errors[field] = fae def set_value(self, instance, field, value): raise NotImplementedError def _mutator(self, instance, key, value): raise NotImplementedError def empty_instance(self): return self._container_type() class ListFieldGroup(MutableSequenceFieldGroup, SequenceFieldGroup): def __init__(self, fields): super().__init__(fields, list) def set_value(self, instance, field, value): try: if not field.is_computed: value = field.prepare_value(value) index, _ = self._get_field_index(field) return self._mutator(instance, index, value) except FieldAssignmentError: raise except Exception as ex: raise FieldAssignmentError(field, ex) def _mutator(self, instance, key, value): return setitem(instance, key, value) def empty_instance(self): return [None] * len(self) class DictFieldGroup(MutableSequenceFieldGroup): def __init__(self, fields, container_type=None): container_type = dict if not container_type else container_type super().__init__(fields, container_type) def fill_instance_from_dict(self, input_dict): return self._container_type(self._type_cast(input_dict)) def _group_get_value(self, instance, field): return self._accessor(instance, field.name) def _accessor(self, instance, key): return getitem(instance, key) def set_value(self, instance, field, value): try: if not field.is_computed: value = field.prepare_value(value) return self._mutator(instance, field.name, value) except FieldAssignmentError: raise except Exception as ex: raise FieldAssignmentError(field, ex) def _mutator(self, instance, key, value): return setitem(instance, key, value) class ObjectFieldGroup(MutableSequenceFieldGroup): def fill_instance_from_dict(self, input_dict): return self._container_type(**self._type_cast(input_dict)) def _group_get_value(self, instance, field): return self._accessor(instance, field.name) def _accessor(self, instance, key): return getattr(instance, key) def set_value(self, instance, field, value): try: if not field.is_computed: value = field.prepare_value(value) return self._mutator(instance, field.name, value) except FieldAssignmentError: raise except Exception as ex: raise FieldAssignmentError(field, ex) def _mutator(self, instance, key, value): return setattr(instance, key, value) class PartialDictFieldGroup(DictFieldGroup): def __init__(self, fields): super().__init__(fields, OrderedDict) def iterate_instance(self, instance, per_field_map): with field_errors_check() as errors: for fieldname, value in instance.items(): field = self[fieldname] try: yield field, value, per_field_map.get(field) except FieldAssignmentError as fae: errors[field] = fae def _type_cast(self, input_dict): field_and_value = ( (fieldname, self[fieldname], value) for fieldname, value in input_dict.items() ) key_values = [] with field_errors_check() as errors: for fieldname, field, value in field_and_value: try: key_values.append((fieldname, field.type_cast(value))) except FieldAssignmentError as fae: errors[field] = fae for field in self._fields: try: computed_value = field.get_value(self, input_dict) key_values.append((field.name, computed_value)) except KeyError: pass return OrderedDict(key_values) def as_dict(self, instance): return OrderedDict( (field.name, self._get_value(instance, field)) for field in self._fields if field.name in instance ) def instances_differ(self, instance, other): return any( self._get_value(instance, field) != self._get_value(other, field) for field in self._fields if field.name in instance and field.name in other )
2.078125
2
cwt_vs_STFT.py
mn270/Human-Activity-Recognize-HARdataset-
0
12792076
""" Show differences between WT and STFT """ from scipy import signal import matplotlib.pyplot as plt import numpy as np import pywt waveletname = 'morl' scales = range(1,200) t = np.linspace(-1, 1, 200, endpoint=False) sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) t = np.linspace(-1, 1, 50, endpoint=False) sig1 = np.sin(2 * np.pi * 16 * t)+100*np.sin(2 * np.pi *0.1 * t) for i in range(50): sig[50+i] = sig1[i] + sig[50+i] coeff, freq = pywt.cwt(sig, scales, waveletname, 1) t = np.linspace(0, 200, 200, endpoint=False) plt.plot(t,sig,color='k') plt.title('Transformed signal') plt.ylabel('Amplitude') plt.xlabel('t [s]') plt.figure() plt.pcolormesh(coeff, cmap='plasma') plt.title('Wavelet Transform (Morlett kernel)') plt.ylabel('f [Hz]') plt.xlabel('t [s]') f, t, Zxx = signal.stft(sig, fs=400,nperseg = 8) t = t*400 plt.figure() plt.pcolormesh(t, f, np.abs(Zxx), cmap='plasma') plt.title('Short Time Fourier Transform (STFT)') plt.ylabel('f [Hz]') plt.xlabel('t [s]') plt.show()
2.59375
3
linear_algebra/exercise1.py
coherent17/physics_calculation
1
12792077
import math import numpy as np #1-a def function1(): value=0 for i in range(1,1000+1): value+=i return value #1-b def function2(m): value=0 for i in range(1,m+1): value+=i return value #2 def function3(): value=0 for i in range(1,100+1): value+=math.sqrt(i*math.pi/100)*math.sin(i*math.pi/100) return value print(function1()) print(function2(1000)) print(function3()) # 500500 # 500500 # 77.51389798916512 #oriented object programming class physic_calculation: def __init__(self): pass def function_a(self): value1=0 for i in range(1,1000+1): value1+=i return value1 def function_b(self,m): self.m=m value2=0 for i in range(1,self.m+1): value2+=i return value2 def function_c(self): value3=0 for i in range(1,100+1): value3+=math.sqrt(i*math.pi/100)*math.sin(i*math.pi/100) return value3 pc=physic_calculation() print("---------------OOP----------------") print(pc.function_a()) print(pc.function_b(1000)) print(pc.function_c()) # 500500 # 500500 # 77.51389798916512 print("---------------numpy----------------") a=np.arange(1,26).reshape(5,5) print(a) # [[ 1 2 3 4 5] # [ 6 7 8 9 10] # [11 12 13 14 15] # [16 17 18 19 20] # [21 22 23 24 25]]
3.59375
4
test/add_test_contact_new.py
Yevhen-Code/Python_Sample
0
12792078
<gh_stars>0 # -*- coding: utf-8 -*- from model.contact import Contact def test_add_contact(app): app.session.login(username="admin", password="<PASSWORD>") app.contact.create_contact( Contact(name="Yevhen", lastname="Hurtovyi", company="Sidley", address="Chicago", mobile="7733311608", email="<EMAIL>")) app.session.logout() def test_add_digits_contact(app): app.session.login(username="admin", password="<PASSWORD>") app.contact.create_contact(Contact(name="1", lastname="2", company="3", address="4", mobile="5", email="6")) app.session.logout()
2.203125
2
packages/bazel/src/utils/webpack.bzl
jeffbcross/nx
0
12792079
<filename>packages/bazel/src/utils/webpack.bzl def _collect_es5_sources_impl(target, ctx): result = set() if hasattr(ctx.rule.attr, "srcs"): for dep in ctx.rule.attr.srcs: if hasattr(dep, "es5_sources"): result += dep.es5_sources if hasattr(target, "typescript"): result += target.typescript.es5_sources return struct(es5_sources = result) _collect_es5_sources = aspect( _collect_es5_sources_impl, attr_aspects = ["deps", "srcs"], ) def _webpack_bundle_impl(ctx): inputs = set() for s in ctx.attr.srcs: if hasattr(s, "es5_sources"): inputs += s.es5_sources config = ctx.attr.config.files.to_list()[0] if ctx.attr.mode == 'prod': main = ctx.new_file('bundles/main.bundle.prod.js') polyfills = ctx.new_file('bundles/polyfills.bundle.prod.js') vendor = ctx.new_file('bundles/vendor.bundle.prod.js') styles = ctx.new_file('bundles/styles.bundle.prod.js') else: main = ctx.new_file('bundles/main.bundle.js') polyfills = ctx.new_file('bundles/polyfills.bundle.js') vendor = ctx.new_file('bundles/vendor.bundle.js') styles = ctx.new_file('bundles/styles.bundle.js') inputs += [config] args = [] if ctx.attr.mode == 'prod': args += ['-p'] args += ['--config', config.path] args += ['--env.bin_dir', ctx.configuration.bin_dir.path] args += ['--env.package', ctx.label.package] args += ['--env.mode', ctx.attr.mode] ctx.action( progress_message = "Webpack bundling %s" % ctx.label, inputs = inputs.to_list(), outputs = [main, polyfills, vendor, styles], executable = ctx.executable._webpack, arguments = args, ) return DefaultInfo(files=depset([main, polyfills, vendor, styles])) webpack_bundle = rule(implementation = _webpack_bundle_impl, attrs = { "srcs": attr.label_list(allow_files=True, aspects=[_collect_es5_sources]), "config": attr.label(allow_single_file=True, mandatory=True), "mode": attr.string(default="dev"), "_webpack": attr.label(default=Label("@nrwl//:webpack"), executable=True, cfg="host") } )
1.78125
2
simplemotionpiano.py
arpruss/motionpiano
1
12792080
<reponame>arpruss/motionpiano<filename>simplemotionpiano.py import time, cv2 import numpy as np import rtmidi NOTES = [ 60, 62, 64, 65, 67, 69, 71, 72, 74 ] # , 76, 77, 79 ] NOTE_VELOCITY = 127 WINDOW_NAME = "MotionPiano" KEY_HEIGHT = 0.25 RECOGNIZER_WIDTH = 500 KERNEL_SIZE = 0.042 RESET_TIME = 5 SAVE_CHECK_TIME = 1 THRESHOLD = 25 COMPARISON_VALUE = 128 numKeys = len(NOTES) playing = numKeys * [False] midiout = rtmidi.MidiOut() assert(midiout.get_ports()) portNumber = 0 if len(midiout.get_ports()) == 1 or 'through' not in str(midiout.get_ports()[0]).lower() else 1 midiout.open_port(portNumber) video = cv2.VideoCapture(0) frameWidth = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frameHeight = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) if RECOGNIZER_WIDTH >= frameWidth: scaledWidth = frameWidth scaledHeight = frameHeight else: aspect = frameWidth / frameHeight scaledWidth = RECOGNIZER_WIDTH scaledHeight = int(RECOGNIZER_WIDTH / aspect) kernelSize = 2*int(KERNEL_SIZE*scaledWidth/2)+1 blankOverlay = np.zeros((frameHeight,frameWidth,3),dtype=np.uint8) cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_AUTOSIZE) cv2.resizeWindow(WINDOW_NAME, frameWidth, frameHeight) scaledRects = [] frameRects = [] for i in range(numKeys): x0 = scaledWidth*i//numKeys x1 = scaledWidth*(i+1)//numKeys-1 r = [(x0,0),(x1,int(KEY_HEIGHT*scaledHeight))] scaledRects.append(r) x0 = frameWidth*i//numKeys x1 = frameWidth*(i+1)//numKeys-1 r = [(x0,0),(x1,int(KEY_HEIGHT*frameHeight))] frameRects.append(r) keysTopLeftFrame = (min(r[0][0] for r in frameRects),min(r[0][1] for r in frameRects)) keysBottomRightFrame = (max(r[1][0] for r in frameRects),max(r[1][1] for r in frameRects)) keysTopLeftScaled = (min(r[0][0] for r in scaledRects),min(r[0][1] for r in scaledRects)) keysBottomRightScaled = (max(r[1][0] for r in scaledRects),max(r[1][1] for r in scaledRects)) keysWidthScaled = keysBottomRightScaled[0]-keysTopLeftScaled[0] keysHeightScaled = keysBottomRightScaled[1]-keysTopLeftScaled[1] keys = np.zeros((keysHeightScaled,keysWidthScaled),dtype=np.uint8) def adjustToKeys(xy): return (xy[0]-keysTopLeftScaled[0],xy[1]-keysTopLeftScaled[1]) for i in range(numKeys): r = scaledRects[i] cv2.rectangle(keys, adjustToKeys(r[0]), adjustToKeys(r[1]), i+1, cv2.FILLED) comparisonFrame = None savedFrame = None savedTime = 0 lastCheckTime = 0 def compare(a,b): return cv2.threshold(cv2.absdiff(a, b), THRESHOLD, COMPARISON_VALUE, cv2.THRESH_BINARY)[1] while True: ok, frame = video.read() if not ok: time.sleep(0.05) continue frame = cv2.flip(frame, 1) keysFrame = frame[keysTopLeftFrame[1]:keysBottomRightFrame[1], keysTopLeftFrame[0]:keysBottomRightFrame[0]] if scaledWidth != frameWidth: keysFrame = cv2.resize(keysFrame, (keysWidthScaled,keysHeightScaled)) keysFrame = cv2.cvtColor(keysFrame, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(keysFrame, (kernelSize, kernelSize), 0) t = time.time() save = False if savedFrame is None: save = True lastCheckTime = t else: if t >= lastCheckTime + SAVE_CHECK_TIME: if COMPARISON_VALUE in compare(savedFrame, blurred): print("saving") save = True lastCheckTime = t if t >= savedTime + RESET_TIME: print("resetting") comparisonFrame = blurred save = True if save: savedFrame = blurred savedTime = t if comparisonFrame is None: comparisonFrame = blurred continue delta = compare(comparisonFrame, blurred) sum = keys+delta overlay = blankOverlay.copy() for i in range(numKeys): r = frameRects[i] if 1+i+COMPARISON_VALUE in sum: cv2.rectangle(overlay, r[0], r[1], (255,255,255), cv2.FILLED) if not playing[i]: midiout.send_message([0x90, NOTES[i], NOTE_VELOCITY]) playing[i] = True else: if playing[i]: midiout.send_message([0x80, NOTES[i], 0]) playing[i] = False cv2.rectangle(overlay, r[0], r[1], (0,255,0), 2) cv2.imshow(WINDOW_NAME, cv2.addWeighted(frame, 1, overlay, 0.25, 1.0)) if (cv2.waitKey(1) & 0xFF) == 27 or cv2.getWindowProperty(WINDOW_NAME, 0) == -1: break video.release() cv2.destroyAllWindows() del midiout
2.390625
2
notebooks/griffin_lim.py
bigpo/TensorFlowTTS_chinese
0
12792081
<filename>notebooks/griffin_lim.py # %% import glob import tempfile import time import librosa.display import yaml import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from tensorflow_tts.utils import TFGriffinLim, griffin_lim_lb # %config InlineBackend.figure_format = 'svg' # %% [markdown] # Get mel spectrogram example and corresponding ground truth audio. # %% mel_spec = np.load("dump/train/raw-feats/1-raw-feats.npy") gt_wav = np.load("dump/train/wavs/1-wave.npy") stats_path = "dump/stats.npy" dataset_config_path = "preprocess/baker_preprocess.yaml" config = yaml.load(open(dataset_config_path), Loader=yaml.Loader) griffin_lim_tf = TFGriffinLim(stats_path, config) # %% print("mel_spec shape: ", mel_spec.shape) print("gt_wav shape: ", gt_wav.shape) print("config\n", config) # %% [markdown] # TF version has GPU compatibility and supports batch dimension. # %% # inv_wav_tf = griffin_lim_tf(mel_spec[tf.newaxis, :], n_iter=32) # [1, mel_len] -> [1, audio_len] inv_wav_lb = griffin_lim_lb(mel_spec, stats_path, config) # [mel_len] -> [audio_len] # %% np.min(inv_wav_lb) # %% [markdown] # Time comparison between both implementations. # %% get_ipython().run_line_magic('timeit', 'griffin_lim_tf(mel_spec[tf.newaxis, :])') # %% get_ipython().run_line_magic('timeit', 'griffin_lim_lb(mel_spec, stats_path, config)') # %% tf_wav = tf.audio.encode_wav(inv_wav_tf[0, :, tf.newaxis], config["sampling_rate"]) lb_wav = tf.audio.encode_wav(inv_wav_lb[:, tf.newaxis], config["sampling_rate"]) gt_wav_ = tf.audio.encode_wav(gt_wav[:, tf.newaxis], config["sampling_rate"]) # %% items = [ Audio(value=x.numpy(), autoplay=False, loop=False) for x in [gt_wav_, lb_wav, tf_wav] ] labels = [Label("Ground Truth"), Label("Librosa"), Label("TensorFlow")] GridBox( children=[*labels, *items], layout=Layout(grid_template_columns="25% 25% 25%", grid_template_rows="30px 30px"), ) # %% _, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(10, 8), sharey=True, sharex=True) librosa.display.waveplot(gt_wav, sr=config["sampling_rate"], color="b", ax=ax1) ax1.set_title("Ground truth") ax1.set_xlabel("") librosa.display.waveplot(inv_wav_lb*100, sr=config["sampling_rate"], color="g", ax=ax2) ax2.set_title("Griffin-Lim reconstruction (librosa)") ax2.set_xlabel("") librosa.display.waveplot( inv_wav_tf[0].numpy()*100, sr=config["sampling_rate"], color="r", ax=ax3 ) ax3.set_title("Griffin-Lim reconstruction (TF)"); # %% def gen(): file_list = glob.glob("../dump/train/norm-feats/*-norm-feats.npy") for file in file_list: yield np.load(file) mel_ds = tf.data.Dataset.from_generator( gen, (tf.float32), tf.TensorShape([None, config["num_mels"]]) ).padded_batch(10) for mel_batch in mel_ds.take(5): start_batch = time.perf_counter() inv_wav_tf_batch = griffin_lim_tf(mel_batch) print( f"Iteration time: {time.perf_counter() - start_batch:.4f}s, output shape: {inv_wav_tf_batch.shape}" ) # %% [markdown] # Saving outputs with both implementations. # %% # Single file griffin_lim_lb(mel_spec, stats_path, config, output_dir="../tmp", wav_name="lb") griffin_lim_tf.save_wav(inv_wav_tf, output_dir="../tmp", wav_name="tf") # # Batch files # griffin_lim_tf.save_wav(inv_wav_tf_batch, tempfile.gettempdir(), [x for x in range(10)]) # %ls {tempfile.gettempdir()} | grep '.wav' # %% tempfile.gettempdir() # %%
2.265625
2
templatedir/nice/objective.py
Kooper95/Shape-optimiser
0
12792082
<filename>templatedir/nice/objective.py import sys #if len(sys.argv) == 1: # print(200000) #else: Powerout = (float(sys.argv[1]) - 298.15) * 3.14159265 * 0.1 * 0.1 * 0.22 / 0.005 #p = 611.21 * 2.718281828 ** ((18.678 - (float(sys.argv[1])-273.15)/234.5)*((float(sys.argv[1])-273.15)/(float(sys.argv[1])-16.01))) #Powerin = float(sys.argv[2]) * 2256600 * 18.01528 * p/(1000 * 8.31446261815324 * float(sys.argv[1])) print(Powerout + float(sys.argv[2]))
2.46875
2
qtplotlib/barplot.py
jeremiedecock/qtplotlib-python
0
12792083
from PyQt5.QtWidgets import QWidget from PyQt5.QtGui import QPainter, QBrush, QPen, QColor from PyQt5.QtCore import Qt import math class QBarPlot(QWidget): def __init__(self): super().__init__() self.horizontal_margin = 10 self.vertical_margin = 10 self.data = None self.data_index = None self.data_color = None self.title = None self.title_size = 32 self.title_margin = 5 self.hlines = None self.hlines_style = None self.ymin = None self.ymax = None self.x_label_height = 50 # Set window background color self.setAutoFillBackground(True) palette = self.palette() palette.setColor(self.backgroundRole(), Qt.white) self.setPalette(palette) def paintEvent(self, event): qp = QPainter(self) try: num_bar = len(self.data) except: num_bar = 0 if self.data_index is not None and len(self.data_index) != len(self.data): raise ValueError("len(data_index) != len(data)") # TODO size = self.size() widget_width = size.width() widget_height = size.height() if num_bar > 0: plot_area_width = max(0, widget_width - 2 * self.horizontal_margin) plot_area_height = max(0, widget_height - 2 * self.vertical_margin) # Set antialiasing ################################################ # Set anti-aliasing See https://wiki.python.org/moin/PyQt/Painting%20and%20clipping%20demonstration qp.setRenderHint(QPainter.Antialiasing) # Set Font ######################################################## font = qp.font() font.setPointSize(self.title_size) qp.setFont(font) # Draw title ###################################################### title_x_start = self.title_margin title_y_start = self.title_margin title_width = widget_width - 2 * self.title_margin title_height = self.title_size title_x_end = title_x_start + title_width title_y_end = title_y_start + title_height qp.drawText(title_x_start, title_y_start, title_width, title_height, Qt.AlignCenter, self.title) # Prepare coordinates transform ################################### filtered_data = [data_value for data_value in self.data if data_value is not None] self.top_ordinate_value = max(filtered_data) if self.ymax is None else self.ymax self.bottom_ordinate_value = min(filtered_data) if self.ymin is None else self.ymin plot_area_x_start = self.horizontal_margin plot_area_x_end = widget_width - self.horizontal_margin plot_area_width = plot_area_x_end - plot_area_x_start self.plot_area_y_start = title_y_end + self.title_margin + self.vertical_margin self.plot_area_y_end = widget_height - self.vertical_margin - self.x_label_height plot_area_height = self.plot_area_y_end - self.plot_area_y_start brush = QBrush(Qt.white, Qt.SolidPattern) qp.setBrush(brush) qp.drawRect(plot_area_x_start, self.plot_area_y_start, plot_area_width, plot_area_height) # TODO # Set Pen and Brush ############################################### #see https://hci.isir.upmc.fr/wp-content/uploads/2018/03/PyQt-Dessin.pdf #pen = QPen(Qt.black, 3, Qt.SolidLine) pen = QPen() pen.setStyle(Qt.SolidLine) # Qt.DotLine Qt.DashLine Qt.DashDotLine pen.setWidth(2) pen.setBrush(Qt.black) # Qt.green pen.setCapStyle(Qt.RoundCap) pen.setJoinStyle(Qt.RoundJoin) qp.setPen(pen) # See https://en.wikipedia.org/wiki/Tango_Desktop_Project#Palette and https://web.archive.org/web/20160202102503/http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines black_pen = QPen(Qt.black, Qt.SolidLine) green_pen = QPen(QColor("#4e9a06"), Qt.SolidLine) yellow_pen = QPen(QColor("#c4a000"), Qt.SolidLine) red_pen = QPen(QColor("#a40000"), Qt.SolidLine) white_brush = QBrush(Qt.white, Qt.SolidPattern) green_brush = QBrush(QColor("#73d216"), Qt.SolidPattern) yellow_brush = QBrush(QColor("#edd400"), Qt.SolidPattern) red_brush = QBrush(QColor("#cc0000"), Qt.SolidPattern) #green_brush = QBrush(QColor("#8ae234"), Qt.SolidPattern) #yellow_brush = QBrush(QColor("#fce94f"), Qt.SolidPattern) #red_brush = QBrush(QColor("#ef2929"), Qt.SolidPattern) # Draw horizontal lines ########################################### if self.hlines is not None: for hline_index, hline_value in enumerate(self.hlines): hline_position = self.ordinateTransform(hline_value) if hline_position is not None: try: hline_style = self.hlines_style[hline_index] if hline_style == ":": pen = qp.pen() pen.setStyle(Qt.DotLine) qp.setPen(pen) else: pen = qp.pen() pen.setStyle(Qt.SolidLine) qp.setPen(pen) except: pen = qp.pen() pen.setStyle(Qt.SolidLine) qp.setPen(pen) qp.drawLine(plot_area_x_start, hline_position, plot_area_x_end, hline_position) # x_start, y_start, x_end, y_end # Draw bars ####################################################### pen = qp.pen() pen.setStyle(Qt.SolidLine) qp.setPen(pen) if self.data_color is None: self.data_color = [None for data_value in self.data] for data_index, (data_value, data_color) in enumerate(zip(self.data, self.data_color)): if data_value is not None: if data_color == "green": qp.setBrush(green_brush) qp.setPen(green_pen) elif data_color == "yellow": qp.setBrush(yellow_brush) qp.setPen(yellow_pen) elif data_color == "red": qp.setBrush(red_brush) qp.setPen(red_pen) else: qp.setBrush(white_brush) qp.setPen(black_pen) x_length = math.floor(plot_area_width / num_bar) x_start = self.horizontal_margin + data_index * x_length y_start = self.ordinateTransform(data_value) # TODO: what if y_start is None ? if y_start is None: if data_value > self.bottom_ordinate_value: y_start = self.plot_area_y_start else: y_start = self.plot_area_y_end y_end = self.ordinateTransform(0) if y_end is None: y_end = self.plot_area_y_end y_length = y_end - y_start # Draw bar qp.drawRect(x_start, y_start, x_length, y_length) def ordinateTransform(self, data_ordinate): # self.top_ordinate_value -> self.plot_area_y_start # self.bottom_ordinate_value -> self.plot_area_y_end if self.bottom_ordinate_value <= data_ordinate <= self.top_ordinate_value: data_ordinate_ratio = (self.top_ordinate_value - data_ordinate) / (self.top_ordinate_value - self.bottom_ordinate_value) data_ordinate_position = self.plot_area_y_start + data_ordinate_ratio * (self.plot_area_y_end - self.plot_area_y_start) return math.floor(data_ordinate_position) else: return None
3.125
3
gui.py
hardpenguin/ReplaySorceryGUI
4
12792084
#!/usr/bin/python3 import sys import os import signal from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from replay_sorcery import ReplaySorcery signal.signal(signal.SIGINT, signal.SIG_DFL) app = QApplication([]) dir_name = os.path.dirname(sys.argv[0]) full_path = os.path.abspath(dir_name) icon_filename = 'icon.png' icon_path = os.path.join(full_path, icon_filename) class ReplaySorceryGUI(QWidget): def __init__(self, debug): self.debug = debug self.rs = ReplaySorcery(self.debug) QWidget.__init__(self) self.setWindowTitle('ReplaySorceryGUI') self.setWindowIcon(QIcon(icon_path)) self.setMinimumWidth(300) app_layout = QHBoxLayout() # left side left_layout = QVBoxLayout() left_layout.setAlignment(Qt.AlignCenter) self.icon = QPixmap(icon_path) self.icon = self.icon.scaled(92, 92) self.icon_label = QLabel() self.icon_label.setPixmap(self.icon) self.icon_label.setAlignment(Qt.AlignCenter) left_layout.addWidget(self.icon_label) self.instructions_text = QLabel() self.instructions_text.setText("Ctrl+Super+R to save\nthe last 30 seconds\n") left_layout.addWidget(self.instructions_text) self.status_text = QLabel() self.update_status_text() left_layout.addWidget(self.status_text) self.timer = QTimer(self) self.timer.timeout.connect(self.update_status_text) self.timer.start(1000) button_size = QSize(150, 40) buttons = [] turn_on_button = QPushButton("Turn on") buttons.append(turn_on_button) turn_on_button.clicked.connect(self.turn_on_action) turn_off_button = QPushButton("Turn off") buttons.append(turn_off_button) turn_off_button.clicked.connect(self.turn_off_action) refresh_button = QPushButton("Refresh") buttons.append(refresh_button) refresh_button.clicked.connect(self.refresh_action) quit_button = QPushButton("Quit") buttons.append(quit_button) quit_button.clicked.connect(self.quit_action) for button in buttons: button.setFixedSize(button_size) left_layout.addWidget(button) # right side right_layout = QVBoxLayout() right_layout.setAlignment(Qt.AlignCenter) # both sides app_layout.addLayout(left_layout) app_layout.addLayout(right_layout) self.setLayout(app_layout) def update_status_text(self): text_string = "ReplaySorcery: %s" % self.rs.current_status["name"] self.status_text.setText(text_string) color_string = 'color: %s' % self.rs.current_status["color"] self.status_text.setStyleSheet(color_string) self.rs.get_status() def turn_on_action(self): self.rs.turn_on() def turn_off_action(self): self.rs.turn_off() def refresh_action(self): self.rs.get_status() def quit_action(self): if self.debug > 0: print("Exiting ReplaySorceryGUI") sys.exit() window = ReplaySorceryGUI(1) window.show() if window.debug > 0: print("ReplaySorceryGUI started") app.exec_()
2.265625
2
Hessian/PGGAN_hess_spectrum.py
Animadversio/Visual_Neuro_InSilico_Exp
2
12792085
import torch import numpy as np from time import time from os.path import join import lpips from Hessian.GAN_hessian_compute import hessian_compute #%% ImDist = lpips.LPIPS(net='squeeze').cuda() use_gpu = True if torch.cuda.is_available() else False model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', 'PGAN', model_name='celebAHQ-256', pretrained=True, useGPU=use_gpu) num_images = 1 noise, _ = model.buildNoiseData(num_images) noise.requires_grad_(True) # with torch.no_grad(): generated_images = model.test(noise) #%% img = model.avgG.forward(noise) #%% class PGGAN_wrapper(): # nn.Module def __init__(self, PGGAN, ): self.PGGAN = PGGAN def visualize(self, code, scale=1): imgs = self.PGGAN.forward(code,) # Matlab version default to 0.7 return torch.clamp((imgs + 1.0) / 2.0, 0, 1) * scale G = PGGAN_wrapper(model.avgG) #%% feat = noise.detach().clone().cuda() EPS = 1E-2 T0 = time() eva_BI, evc_BI, H_BI = hessian_compute(G, feat, ImDist, hessian_method="BackwardIter") print("%.2f sec" % (time() - T0)) # 95.7 sec T0 = time() eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method="ForwardIter") print("%.2f sec" % (time() - T0)) # 61.8 sec T0 = time() eva_BP, evc_BP, H_BP = hessian_compute(G, feat, ImDist, hessian_method="BP") print("%.2f sec" % (time() - T0)) # 95.4 sec #%% print("Correlation of Flattened Hessian matrix BP vs BackwardIter %.3f" % np.corrcoef(H_BP.flatten(), H_BI.flatten())[0, 1]) print("Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f" % np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1]) print("Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter %.3f"% np.corrcoef(H_FI.flatten(), H_BI.flatten())[0, 1]) # Correlation of Flattened Hessian matrix BP vs BackwardIter 1.000 # Correlation of Flattened Hessian matrix BP vs ForwardIter 0.877 # Correlation of Flattened Hessian matrix ForwardIter vs BackwardIter 0.877 #%% H_col = [] for EPS in [1E-5, 1E-4, 1E-3, 1E-2, 1E-1, 1, 2, 10]: T0 = time() eva_FI, evc_FI, H_FI = hessian_compute(G, feat, ImDist, hessian_method="ForwardIter", EPS=EPS) print("%.2f sec" % (time() - T0)) # 325.83 sec print("EPS %.1e Correlation of Flattened Hessian matrix BP vs ForwardIter %.3f" % (EPS, np.corrcoef(H_BP.flatten(), H_FI.flatten())[0, 1])) H_col.append((eva_FI, evc_FI, H_FI)) # EPS 1.0e-05 Correlation of Flattened Hessian matrix BP vs ForwardIter 1.000 # EPS 1.0e-04 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.999 # EPS 1.0e-03 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.989 # EPS 1.0e-02 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.901 # EPS 1.0e-01 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.398 # EPS 1.0e+00 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.046 # EPS 2.0e+00 Correlation of Flattened Hessian matrix BP vs ForwardIter 0.008 # EPS 1.0e+01 Correlation of Flattened Hessian matrix BP vs ForwardIter -0.003 #%% #%% Visualize Spectra figdir = r"E:\OneDrive - Washington University in St. Louis\Hessian_summary\PGGAN" savedir = r"E:\Cluster_Backup\PGGAN" # eva_col = [] # evc_col = [] # for triali in tqdm(range(400)): # data = np.load(join(savedir, "Hessian_cmp_%d.npz" % triali)) # eva_BP = data["eva_BP"] # evc_BP = data["evc_BP"] # eva_col.append(eva_BP) # evc_col.append(evc_BP) # # eva_col = np.array(eva_col) from Hessian.hessian_analysis_tools import plot_spectra, compute_hess_corr, plot_consistency_example, plot_consistentcy_mat, average_H, scan_hess_npz eva_col, evc_col, feat_col, meta = scan_hess_npz(savedir, "Hessian_cmp_(\d*).npz", featkey="feat") feat_col = np.array(feat_col).squeeze() H_avg, eva_avg, evc_avg = average_H(eva_col, evc_col) np.savez(join(figdir, "H_avg_%s.npz"%"PGGAN"), H_avg=H_avg, eva_avg=eva_avg, evc_avg=evc_avg, feats=feat_col) #%% fig = plot_spectra(eva_col, figdir=figdir, titstr="PGGAN", ) #%% corr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=True) # without cuda 12:11 mins, with cuda 8:21 # corr_mat_log, corr_mat_lin = compute_hess_corr(eva_col, evc_col, figdir=figdir, use_cuda=False) #%% fig1, fig2 = plot_consistentcy_mat(corr_mat_log, corr_mat_lin, figdir=figdir, titstr="PGGAN") #%% fig3 = plot_consistency_example(eva_col, evc_col, figdir=figdir, nsamp=5, titstr="PGGAN",) fig3.show()
2.265625
2
infinisdk/infinibox/tenant.py
Infinidat/infinisdk
5
12792086
from ..core import Field, SystemObject, MillisecondsDatetimeType from ..core.api.special_values import Autogenerate from ..core.translators_and_types import MunchType class Tenant(SystemObject): FIELDS = [ Field("id", type=int, is_identity=True, is_filterable=True, is_sortable=True), Field("short_tenant_key", type=int, cached=True), Field("name", type=str, is_filterable=True, mutable=True, creation_parameter=True, default=Autogenerate("tenant_{uuid}")), Field("visible_to_sysadmin", type=bool, default=True, creation_parameter=True, optional=True, cached=True), Field("capacity", type=MunchType), Field("entity_counts", type=MunchType), Field("created_at", type=MillisecondsDatetimeType, is_sortable=True, is_filterable=True), Field("updated_at", type=MillisecondsDatetimeType, is_sortable=True, is_filterable=True), Field("anonymous_gid", type=int, creation_parameter=True, optional=True, mutable=True, is_filterable=True, is_sortable=True), Field("anonymous_uid", type=int, creation_parameter=True, optional=True, mutable=True, is_filterable=True, is_sortable=True), Field("nfs_allow_unmapped_users", type=str, mutable=True, is_filterable=True, is_sortable=True, creation_parameter=True, optional=True), Field("nfs_group_policy", type=str, mutable=True, is_filterable=True, is_sortable=True, creation_parameter=True, optional=True) ] @classmethod def is_supported(cls, system): return system.compat.has_tenants()
2.078125
2
docs/examples/use_cases/tensorflow/efficientdet/dataset/create_tfrecord_indexes.py
cyyever/DALI
3,967
12792087
<gh_stars>1000+ # Copyright 2021 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Generate TFRecord index files necessary when using DALI preprocessing. Example usage: python create_tfrecord_indexes.py --tfrecord2idx_script=~/DALI/tools/tfrecord2idx \ --tfrecord_file_pattern=tfrecord/pascal*.tfrecord """ from absl import app from absl import flags from absl import logging from glob import glob from subprocess import call import os.path flags.DEFINE_string("tfrecord_file_pattern", None, "Glob for tfrecord files.") flags.DEFINE_string( "tfrecord2idx_script", None, "Absolute path to tfrecord2idx script." ) FLAGS = flags.FLAGS def main(_): if FLAGS.tfrecord_file_pattern is None: raise RuntimeError("Must specify --tfrecord_file_pattern.") if FLAGS.tfrecord2idx_script is None: raise RuntimeError("Must specify --tfrecord2idx_script") tfrecord_files = glob(FLAGS.tfrecord_file_pattern) tfrecord_idxs = [filename + "_idx" for filename in tfrecord_files] if not os.path.isfile(FLAGS.tfrecord2idx_script): raise ValueError( "{FLAGS.tfrecord2idx_script} does not lead to valid tfrecord2idx script." ) for tfrecord, tfrecord_idx in zip(tfrecord_files, tfrecord_idxs): logging.info(f"Generating index file for {tfrecord}") call([FLAGS.tfrecord2idx_script, tfrecord, tfrecord_idx]) if __name__ == "__main__": app.run(main)
2.125
2
cn2an/transform_test.py
guoqchen1001/cn2an
1
12792088
<gh_stars>1-10 import unittest from .transform import Transform class TransformTest(unittest.TestCase): def setUp(self) -> None: self.strict_data_dict = { "小王捡了100块钱": "小王捡了一百块钱", "用户增长最快的3个城市": "用户增长最快的三个城市", "小王的生日是2001年3月4日": "小王的生日是二零零一年三月四日", "小王的生日是2012年12月12日": "小王的生日是二零一二年十二月十二日", "今天股价上涨了8%": "今天股价上涨了百分之八", "第2天股价下降了-3.8%": "第二天股价下降了百分之负三点八", "抛出去的硬币为正面的概率是1/2": "抛出去的硬币为正面的概率是二分之一", "现在室内温度为39℃,很热啊!": "现在室内温度为三十九摄氏度,很热啊!", "创业板指9月9日早盘低开1.57%": "创业板指九月九日早盘低开百分之一点五七" } self.smart_data_dict = { "约2.5亿年~6500万年": "约250000000年~65000000年", "廿二日,日出东方": "22日,日出东方", "大陆": "大陆" } self.t = Transform() def test_transform(self) -> None: for strict_item in self.strict_data_dict.keys(): self.assertEqual(self.t.transform(strict_item, "an2cn"), self.strict_data_dict[strict_item]) self.assertEqual(self.t.transform(self.strict_data_dict[strict_item], "cn2an"), strict_item) for smart_item in self.smart_data_dict.keys(): self.assertEqual(self.t.transform(smart_item, "cn2an"), self.smart_data_dict[smart_item]) if __name__ == '__main__': unittest.main()
2.859375
3
foodgram/urls.py
dronsovest/foodgram-project
0
12792089
from django.contrib import admin from django.urls import include, path from django.conf.urls.static import static from django.conf import settings from django.conf.urls import handler404, handler500 from . import views handler404 = "foodgram.views.page_not_found" handler500 = "foodgram.views.server_error" urlpatterns = [ path('admin/', admin.site.urls), path("auth/", include("users.urls")), path("auth/", include("django.contrib.auth.urls")), path("favorites/", include("favorites.urls")), path("followings/", include("follows.urls")), path("shopping-list/", include("shopping_list.urls")), path("", include("recipes.urls")), ]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) if settings.DEBUG: # new urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
1.953125
2
src/panels/Panel.py
severus21/PycLi
0
12792090
from tkinter import * import sys class Panel(Frame): def __init__(self, master, *args, **kw): super().__init__(master, *args, **kw) def hide(self): self.grid_forget() def show(self): self.grid() # http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame #https://code.activestate.com/recipes/578894-mousewheel-binding-to-scrolling-area-tkinter-multi/ class ScrollingArea: def __init__(self, root, factor = 2): self.activeArea = None if type(factor) == int: self.factor = factor else: raise Exception("Factor must be an integer.") if sys.platform.startswith('linux') : root.bind_all('<4>', self._on_mouse_wheel, add='+') root.bind_all('<5>', self._on_mouse_wheel, add='+') else: root.bind_all("<MouseWheel>", self._on_mouse_wheel, add='+') def _on_mouse_wheel(self,event): if self.activeArea and self.activeArea != self: self.activeArea._on_mouse_wheel(event) def _mouse_wheel_bind(self, widget): self.activeArea = widget def _mouse_wheel_unbind(self): self.activeArea = None def build_function__on_mouse_wheel(self, widget, orient, factor = 1): view_command = getattr(widget, orient+'view') if sys.platform.startswith('linux'): def _on_mouse_wheel(event): if event.num == 4: view_command("scroll",(-1)*factor,"units" ) elif event.num == 5: view_command("scroll",factor,"units" ) elif sys.platform == 'win32' or sys.platform == 'cygwin': def _on_mouse_wheel(event): view_command("scroll",(-1)*int((event.delta/120)*factor),"units" ) elif sys.platform == 'darwin': def _on_mouse_wheel(event): view_command("scroll",event.delta,"units" ) return _on_mouse_wheel def add_scrolling(self, scrollingArea, xscrollbar=None, yscrollbar=None): if yscrollbar: scrollingArea.configure(xscrollcommand=yscrollbar.set) yscrollbar['command']=scrollingArea.yview if xscrollbar: scrollingArea.configure(yscrollcommand=xscrollbar.set) xscrollbar['command']=scrollingArea.xview scrollingArea.bind('<Enter>',lambda event: self._mouse_wheel_bind(scrollingArea)) scrollingArea.bind('<Leave>', lambda event: self._mouse_wheel_unbind()) if xscrollbar and not hasattr(xscrollbar, '_on_mouse_wheel'): xscrollbar._on_mouse_wheel = self.build_function__on_mouse_wheel(scrollingArea,'x', self.factor) if yscrollbar and not hasattr(yscrollbar, '_on_mouse_wheel'): yscrollbar._on_mouse_wheel = self.build_function__on_mouse_wheel(scrollingArea,'y', self.factor) main_scrollbar = yscrollbar or xscrollbar if main_scrollbar: scrollingArea._on_mouse_wheel = main_scrollbar._on_mouse_wheel for scrollbar in (xscrollbar, yscrollbar): if scrollbar: scrollbar.bind('<Enter>', lambda event, scrollbar=scrollbar: self._mouse_wheel_bind(scrollbar) ) scrollbar.bind('<Leave>', lambda event: self._mouse_wheel_unbind()) class VScrolledPanel(Panel): """A pure Tkinter scrollable frame that actually works! * Use the 'interior' attribute to place widgets inside the scrollable frame * Construct and pack/place/grid normally * This frame only allows vertical scrolling """ def __init__(self, master, *args, **kw): super().__init__(master, *args, **kw) # create a canvas object and a vertical scrollbar for scrolling it self.vscrollbar = Scrollbar(self, orient=VERTICAL) self.vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE) self.canvas = Canvas(self, bd=0, highlightthickness=0, yscrollcommand=self.vscrollbar.set) self.canvas.pack(side=LEFT, fill=BOTH, expand=TRUE) self.vscrollbar.config(command=self.canvas.yview) # reset the view self.canvas.xview_moveto(0) self.canvas.yview_moveto(0) # create a frame inside the canvas which will be scrolled with it self.interior = interior = Frame(self.canvas, bg="white",) interior_id = self.canvas.create_window(0, 0, window=interior, anchor=NW) # track changes to the canvas and frame width and sync them, # also updating the scrollbar def _configure_interior(event): # update the scrollbars to match the size of the inner frame size = (interior.winfo_reqwidth(), interior.winfo_reqheight()) self.canvas.config(scrollregion="0 0 %s %s" % size) if interior.winfo_reqwidth() != self.canvas.winfo_width(): # update the canvas's width to fit the inner frame self.canvas.config(width=interior.winfo_reqwidth()) interior.bind('<Configure>', _configure_interior) def _configure_canvas(event): if interior.winfo_reqwidth() != self.canvas.winfo_width(): # update the inner frame's width to fill the canvas self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width()) self.canvas.bind('<Configure>', _configure_canvas) ScrollingArea(self).add_scrolling(self.canvas, yscrollbar=self.vscrollbar) def reset(self): self.canvas.xview_moveto(0) self.canvas.yview_moveto(0)
3.21875
3
src/thirdparty/harfbuzz/src/check-symbols.py
devbrain/neutrino
0
12792091
#!/usr/bin/env python3 import sys, os, shutil, subprocess, re, difflib os.environ['LC_ALL'] = 'C' # otherwise 'nm' prints in wrong order builddir = os.getenv('builddir', os.path.dirname(__file__)) libs = os.getenv('libs', '.libs') IGNORED_SYMBOLS = '|'.join(['_fini', '_init', '_fdata', '_ftext', '_fbss', '__bss_start', '__bss_start__', '__bss_end__', '_edata', '_end', '_bss_end__', '__end__', '__gcov_.*', 'llvm_.*', 'flush_fn_list', 'writeout_fn_list', 'mangle_path']) nm = os.getenv('NM', shutil.which('nm')) if not nm: print('check-symbols.py: \'nm\' not found; skipping test') sys.exit(77) cxxflit = shutil.which('c++filt') tested = False stat = 0 for soname in ['harfbuzz', 'harfbuzz-subset', 'harfbuzz-icu', 'harfbuzz-gobject']: for suffix in ['so', 'dylib']: so = os.path.join(builddir, libs, 'lib%s.%s' % (soname, suffix)) if not os.path.exists(so): continue # On macOS, C symbols are prefixed with _ symprefix = '_' if suffix == 'dylib' else '' EXPORTED_SYMBOLS = [s.split()[2] for s in re.findall(r'^.+ [BCDGIRST] .+$', subprocess.check_output(nm.split() + [so]).decode('utf-8'), re.MULTILINE) if not re.match(r'.* %s(%s)\b' % (symprefix, IGNORED_SYMBOLS), s)] # run again c++flit also if is available if cxxflit: EXPORTED_SYMBOLS = subprocess.check_output( [cxxflit], input='\n'.join(EXPORTED_SYMBOLS).encode() ).decode('utf-8').splitlines() prefix = (symprefix + os.path.basename(so)).replace('libharfbuzz', 'hb').replace('-', '_').split('.')[0] print('Checking that %s does not expose internal symbols' % so) suspicious_symbols = [x for x in EXPORTED_SYMBOLS if not re.match(r'^%s(_|$)' % prefix, x)] if suspicious_symbols: print('Ouch, internal symbols exposed:', suspicious_symbols) stat = 1 def_path = os.path.join(builddir, soname + '.def') if not os.path.exists(def_path): print('\'%s\' not found; skipping' % def_path) else: print('Checking that %s has the same symbol list as %s' % (so, def_path)) with open(def_path, 'r', encoding='utf-8') as f: def_file = f.read() diff_result = list(difflib.context_diff( def_file.splitlines(), ['EXPORTS'] + [re.sub('^%shb' % symprefix, 'hb', x) for x in EXPORTED_SYMBOLS] + # cheat: copy the last line from the def file! [def_file.splitlines()[-1]] )) if diff_result: print('\n'.join(diff_result)) stat = 1 tested = True if not tested: print('check-symbols.sh: no shared libraries found; skipping test') sys.exit(77) sys.exit(stat)
2
2
pcommon/gdb/pprint/utils.py
mmalyutin/libpcomn
6
12792092
<filename>pcommon/gdb/pprint/utils.py ############################################################################### # A printer object must provide the following fileld: # # - printer_name: A subprinter name used by gdb (required). # - template_name: A string or a list of strings. ############################################################################### import sys import collections import operator import itertools import gdb import gdb.types import gdb.printing from gdb import lookup_type, parse_and_eval __all__ = [ 'GDBValue', 'consume', 'strip_qualifiers', 'template_name' ] def consume(iterator): """Consume the iterator entirely, return None.""" collections.deque(iterator, maxlen=0) ############################################################################### # GDBValue derives from gdb.Value and provides the following extra members: # v.qualifiers # v.basic_type # v.type_name # v.template_name ############################################################################### class GDBValue(gdb.Value): """Wrapper class for gdb.Value""" __slots__ = ( 'qualifiers', 'basic_type', 'type_name', 'template_name' ) def __init__(self, value): gdb.Value.__init__(value) self.qualifiers = type_qualifiers(value.type) self.basic_type = gdb.types.get_basic_type(value.type) self.type_name = str(self.basic_type) self.template_name = template_name(self.basic_type) ############################################################################### # Type qualifiers and template names handling ############################################################################### # # Get the template name of gdb.Type # def template_name(t): """Get the template name of gdb.Type. Only for struct/union/enum.""" assert isinstance(t, gdb.Type) basic_type = gdb.types.get_basic_type(t) return (basic_type.code in [gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION, gdb.TYPE_CODE_ENUM] and str(basic_type).split('<')[0] or '') # # Get a string, which encodes const, volatile, and reference qualifiers of a gdb.Type # def type_qualifiers(t): """Get string encoding the qualifiers of a gdb.Type: const, volatile, reference. The result is a string where 'c' designates const, 'v' volatile, '&' reference. So e.g. 'const &foo' will return 'c&', 'const foo' will return 'c', etc. """ assert isinstance(t, gdb.Type) t = t.strip_typedefs() qualifiers = t.code == gdb.TYPE_CODE_REF and '&' or '' if qualifiers: t = t.target() if t != t.unqualified(): qualifiers += ('c' if t == t.unqualified().const() else 'v' if t == t.unqualified().volatile() else 'cv' if t == t.unqualified().const().volatile() else '') return qualifiers def strip_qualifiers(typename): """Remove const/volatile qualifiers, references, and pointers of a type""" qps = [] while True: typename = typename.rstrip() qual = next(itertools.dropwhile(lambda q: not typename.endswith(q), ('&', '*', 'const', 'volatile', ''))) if not qual: break typename = typename[:-len(qual)] qps.append(qual) while True: typename = typename.lstrip() qual = next(itertools.dropwhile(lambda q: not typename.startswith(q), ('const', 'volatile', ''))) if not qual: break typename = typename[len(qual):] qps.append(qual) return typename, qps[::-1]
2.5625
3
pretix_regex_validation/__init__.py
pretix-unofficial/pretix-regex-validation
1
12792093
<reponame>pretix-unofficial/pretix-regex-validation<filename>pretix_regex_validation/__init__.py from django.utils.translation import gettext_lazy try: from pretix.base.plugins import PluginConfig except ImportError: raise RuntimeError("Please use pretix 2.7 or above to run this plugin!") __version__ = "1.0.1" class PluginApp(PluginConfig): name = "pretix_regex_validation" verbose_name = "Regex Validation" class PretixPluginMeta: name = gettext_lazy("Regex Validation") author = "pretix team" description = gettext_lazy("Allows to add arbitrary regex validation to fields") visible = True version = __version__ category = "CUSTOMIZATION" compatibility = "pretix>=3.18.0.dev0" def ready(self): from . import signals # NOQA default_app_config = "pretix_regex_validation.PluginApp"
2.046875
2
python3-json.py
evmarh/python3-json
0
12792094
<gh_stars>0 #!/usr/bin/env python3 import datetime import time import json import requests epoch = round(time.time()) date = time.ctime() payload = {'query': 'bucket_usage_size{bucket="airtindibucket",instance="at001-s3.managed-dr.com:443",job="minio-job"}', 'time': '%s' % epoch} # Prometheus URL response = requests.get('http://10.2.180.52:9090/api/v1/query', params=payload) data = json.loads(json.dumps(response.json())) dictData = (data[u'data'][u'result']) dictData1 = dictData[0] # Writing to file in append mode file1 = open("TBUsed.txt","a") file1.write('Total usage: ' + str(int(dictData1[u'value'][1])/1000000000000.0) + ' TB as of ' + str(date) + '\n') file1.close()
2.21875
2
chr/utils.py
MrHuff/general_ds_pipeline
0
12792095
import torch from torch.utils.data import Dataset import numpy as np import matplotlib.pyplot as plt import pandas as pd import collections from chr import coverage import pdb class RegressionDataset(Dataset): def __init__(self, X_data, y_data): self.X_data = torch.from_numpy(X_data).float() self.y_data = torch.from_numpy(y_data).float() def __getitem__(self, index): return self.X_data[index], self.y_data[index] def __len__ (self): return len(self.X_data) def evaluate_predictions(pred, Y, X=None): # Extract lower and upper prediction bands pred_l = np.min(pred,1) pred_h = np.max(pred,1) # Marginal coverage cover = (Y>=pred_l)*(Y<=pred_h) marg_coverage = np.mean(cover) if X is None: wsc_coverage = None else: # Estimated conditional coverage (worse-case slab) wsc_coverage = coverage.wsc_unbiased(X, Y, pred, M=100) # Marginal length lengths = pred_h-pred_l length = np.mean(lengths) # Length conditional on coverage idx_cover = np.where(cover)[0] length_cover = np.mean([lengths for i in idx_cover]) # Combine results out = pd.DataFrame({'Coverage': [marg_coverage], 'Conditional coverage': [wsc_coverage], 'Length': [length], 'Length cover': [length_cover]}) return out def plot_histogram(breaks, weights, S=None, fig=None, limits=None, i=0, colors=None, linestyles=None, xlim=None, filename=None): if colors is None: if limits is not None: colors = ['tab:blue'] * len(limits) if linestyles is None: if limits is not None: linestyles = ['-'] * len(limits) if fig is None: fig = plt.figure() plt.step(breaks, weights[i], where='pre', color='black') if S is not None: idx = S[i] z = np.zeros(len(breaks),) z[idx] = weights[i,idx] plt.fill_between(breaks, z, step="pre", alpha=0.4, color='gray') if limits is not None: for q_idx in range(len(limits[i])): q = limits[i][q_idx] plt.axvline(q, 0, 1, linestyle=linestyles[q_idx], color=colors[q_idx]) plt.xlabel('$Y$') plt.ylabel('Density') if xlim is not None: plt.xlim(xlim) if filename is not None: fig.set_size_inches(4.5, 3) plt.savefig(filename, bbox_inches='tight', dpi=300) plt.show()
2.578125
3
Cuadro_Magico/code/bin/processSingleFile.py
sigmadg/sigmadg.github.io
0
12792096
<reponame>sigmadg/sigmadg.github.io<filename>Cuadro_Magico/code/bin/processSingleFile.py<gh_stars>0 #!/Usr/bin/env python """ Create a daemon process that listens to send messages and reads a DICOM file, extracts the header information and creates a Study/Series symbolic link structure. """ import sys, os, time, atexit, stat, tempfile, copy import dicom, json, re from signal import SIGTERM from dicom.filereader import InvalidDicomError class Daemon: """ A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): self.stdin = stdin self.stdout = stdout self.stderr = stderr self.pidfile = pidfile self.pipename = '/tmp/.processSingleFilePipe' def daemonize(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() #si = file(self.stdin, 'r') #so = file(self.stdout, 'a+') #se = file(self.stderr, 'a+', 0) #os.dup2(si.fileno(), sys.stdin.fileno()) #os.dup2(so.fileno(), sys.stdout.fileno()) #os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) file(self.pidfile,'w+').write("%s\n" % pid) def delpid(self): try: os.remove(self.pidfile) except: pass def delpipe(self): try: os.remove(self.pipename) except: pass def start(self): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if pid: message = "pidfile %s already exist. Daemon already running?\n" sys.stderr.write(message % self.pidfile) # Maybe the pid file exits - but the process is not running (crashed). try: os.kill(pid, 0) except OSError: # The process does not exist, forget the pid and wait to be restarted.. pid = None os.remove(self.pidfile) sys.exit(1) # Start the daemon print(' start the daemon') self.daemonize() print ' done' self.run() def send(self,arg): """ Send a message to the daemon via pipe """ # open a named pipe and write to it if stat.S_ISFIFO(os.stat(self.pipename).st_mode): try: wd = open(self.pipename, 'w') wd.write(arg + "\n") wd.flush() wd.close() except IOError: print 'Error: could not open the pipe %s' % self.pipename else: sys.stderr.write(self.pipename) sys.stderr.write("Error: the connection to the daemon does not exist\n") sys.exit(1) def stop(self): """ Stop the daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) os.remove(self.pipename) else: print str(err) sys.exit(1) def restart(self): """ Restart the daemon """ self.stop() self.start() def run(self): """ You should override this method when you subclass Daemon. It will be called after the process has been daemonized by start() or restart(). """ class ProcessSingleFile(Daemon): def init(self): self.classify_rules = 0 self.rulesFile = '/data/code/bin/classifyRules.json' if os.path.exists(self.rulesFile): with open(self.rulesFile,'r') as f: self.classify_rules = json.load(f) # we should resolve dependencies between rules, this could introduce a problem with termination, # Todo: add a check to the program to make sure that the rules are ok # we need to be able to reference a specific rule (id tag?) self.classify_rules = self.resolveClassifyRules(self.classify_rules) else: print "Warning: no /data/code/bin/classifyRules.json file could be found" def resolveClassifyRules(self, classify_rules ): # add recursively rules back until no more changes can be done for attempt in range(100): didChange = False for rule in range(len(classify_rules)): for entry in range(len(classify_rules[rule]['rules'])): r = classify_rules[rule]['rules'][entry] negate = ("notrule" in r) ruleornotrule = "rule" if negate: ruleornotrule = "notrule" if ruleornotrule in r: # find the rule with that ID findID = False for rule2 in range(len(classify_rules)): if "id" in classify_rules[rule2] and classify_rules[rule2]['id'] == r[ruleornotrule]: # found the id this rule refers to # copy the rules and append instead of the reference rule findID = True classify_rules[rule]['rules'].remove(r) cr = copy.deepcopy(classify_rules[rule2]['rules']) if negate: # add the negate flag to this rule for i in cr: i['negate'] = "yes" classify_rules[rule]['rules'].extend(cr) didChange = True if not findID: print "Error: could not find a rule with ID %s" % r[ruleornotrule] continue if not didChange: break return classify_rules def resolveValue(self,tag,dataset,data): # a value can be a tag (array of string length 1) or a tag (array of string length 2) or a specific index into a tag (array of string length 3) v = '' taghere = True if len(tag) == 1: if not tag[0] in data: if not tag[0] in dataset: taghere = False else: v = dataset[tag[0]] else: v = data[tag[0]] elif len(tag) == 2: if not ( int(tag[0],0), int(tag[1],0) ) in dataset: taghere = False else: v = dataset[int(tag[0],0), int(tag[1],0)].value elif len(tag) == 3: if not ( int(tag[0],0), int(tag[1],0) ) in dataset: taghere = False else: v = dataset[int(tag[0],0), int(tag[1],0)].value[int(tag[2],0)] else: raise ValueError('Error: tag with unknown structure, should be 1, 2, or 3 entries in array') print("Error: tag with unknown structure, should be 1, 2, or 3 entries in array") return taghere, v def classify(self,dataset,data,classifyTypes): # read the classify rules if self.classify_rules == 0: print "Warning: no classify rules found in %s, ClassifyType tag will be empty" % self.rulesFile return classifyTypes for rule in range(len(self.classify_rules)): t = self.classify_rules[rule]['type'] # if we check on the series level all rules have to be true for every image in the series (remove at the end) seriesLevelCheck = False if ('check' in self.classify_rules[rule]) and (self.classify_rules[rule]['check'] == "SeriesLevel"): seriesLevelCheck = True ok = True for entry in range(len(self.classify_rules[rule]['rules'])): r = self.classify_rules[rule]['rules'][entry] # we could have a negated rule here def isnegate(x): return x if ('negate' in r) and (r['negate'] == "yes"): def isnegate(x): return not x # check if this regular expression matches the current type t taghere = True try: taghere, v = self.resolveValue(r['tag'],dataset,data) except ValueError: continue # the 'value' could in some cases be a tag, that would allow for relative comparisons in the classification v2 = r['value'] taghere2 = True try: taghere2, v2 = self.resolveValue(v2,dataset,data) except ValueError: v2 = r['value'] if taghere2 == False: v2 = r['value'] if not "operator" in r: r["operator"] = "regexp" # default value op = r["operator"] if op == "notexist": if isnegate(tagthere): ok = False break elif op == "regexp": pattern = re.compile(v2) vstring = v if isinstance(v, (int, float)): #print "v is : ", v, " and v2 is: ", v2 vstring = str(v) if isnegate(not pattern.search(vstring)): # this pattern failed, fail the whole type and continue with the next ok = False break elif op == "==": try: if isnegate(not float(v2) == float(v)): ok = False break except ValueError: pass elif op == "!=": try: if isnegate(not float(v2) != float(v)): ok = False break except ValueError: pass elif op == "<": try: if isnegate(not float(v2) > float(v)): ok = False break except ValueError: pass elif op == ">": try: if isnegate(not float(v2) < float(v)): ok = False break except ValueError: pass elif op == "exist": if isnegate(not tagthere): ok = False break elif op == "contains": if isnegate(v2 not in v): ok = False break elif op == "approx": # check each numerical entry if its close to a specific value approxLevel = 1e-4 if 'approxLevel' in r: approxLevel = float(r['approxLevel']) if (not isinstance(v, list)) and (not isinstance( v, (int, float) )): # we get this if there is no value in v, fail in this case ok = False break if isinstance( v, list ) and isinstance(v2, list) and len(v) == len(v2): for i in range(len(v)): if isnegate(abs(float(v[i])-float(v2[i])) > approxLevel): #print "approx does not fit here" ok = False break if isinstance( v, (int, float) ): if isnegate(abs(float(v)-float(v2)) > approxLevel): ok = False break else: ok = False break # ok nobody failed, this is it if ok: classifyTypes = classifyTypes + list(set([t]) - set(classifyTypes)) if seriesLevelCheck and not ok and (t in classifyTypes): classifyTypes = [y for y in classifyTypes if y != t] return classifyTypes def run(self): try: os.mkfifo(self.pipename) atexit.register(self.delpipe) except OSError: print 'OSERROR on creating the named pipe %s' % self.pipename pass try: rp = open(self.pipename, 'r') except OSError: print 'Error: could not open named pipe for reading commands' sys.exit(1) while True: response = rp.readline()[:-1] if not response: time.sleep(0.1) continue else: try: dataset = dicom.read_file(response) except IOError: print("Could not find file:", response) continue except InvalidDicomError: print("Not a DICOM file: ", response) continue indir = '/data/scratch/archive/' if not os.path.exists(indir): print("Error: indir does not exist") continue outdir = '/data/scratch/views/raw' if not os.path.exists(outdir): os.makedirs(outdir) infile = os.path.basename(response) fn = os.path.join(outdir, dataset.StudyInstanceUID, dataset.SeriesInstanceUID) if not os.path.exists(fn): os.makedirs(fn) if not os.path.exists(fn): print "Error: creating path ", fn, " did not work" fn2 = os.path.join(fn, dataset.SOPInstanceUID) if not os.path.isfile(fn2): os.symlink(response, fn2) #else: # continue # don't do anything because the file exists already # lets store some data in a series specific file fn3 = os.path.join(outdir, dataset.StudyInstanceUID, dataset.SeriesInstanceUID) + ".json" data = {} try: data['Manufacturer'] = dataset.Manufacturer except: pass try: data['Modality'] = dataset.Modality except: pass try: data['StudyInstanceUID'] = dataset.StudyInstanceUID except: pass try: data['SeriesInstanceUID'] = dataset.SeriesInstanceUID except: pass try: data['PatientID'] = dataset.PatientID except: pass try: data['PatientName'] = dataset.PatientName except: pass try: data['StudyDate'] = dataset.StudyDate except: pass try: data['StudyDescription'] = dataset.StudyDescription except: pass try: data['SeriesDescription'] = dataset.SeriesDescription except: pass try: data['EchoTime'] = str(dataset.EchoTime) except: pass try: data['RepetitionTime'] = str(dataset.RepetitionTime) except: pass try: data['SeriesNumber'] = str(dataset.SeriesNumber) except: pass try: data['InstanceNumber'] = str(dataset.InstanceNumber) except: pass try: data['SliceThickness'] = str(dataset[0x18,0x50].value) except: pass try: data['ImageType'] = str(dataset[0x08,0x08].value) except: pass try: data['SliceSpacing'] = str(dataset[0x18,0x88].value) except: pass try: data['ScanningSequence'] = str(dataset[0x18,0x20].value) except: pass try: data['PulseSequenceName'] = str(dataset[0x19,0x109c].value) except: pass try: data['SliceLocation'] = str(dataset[0x20,0x1041].value) except: pass try: data['AccessionNumber'] = str(dataset[0x08,0x50].value) except: pass try: data['StudyTime'] = str(dataset[0x08,0x30].value) except: pass data['NumFiles'] = str(0) try: data['Private0019_10BB'] = str(dataset[0x0019,0x10BB].value) except: pass try: data['Private0043_1039'] = dataset[0x0043,0x1039].value except: pass # keep the slice location (use the maximum values for all slice locations) currentSliceLocation = None try: currentSliceLocation = data['SliceLocation'] except: pass if os.path.exists(fn3): with open(fn3, 'r') as f: data = json.load(f) if currentSliceLocation != None: try: if float(data['SliceLocation']) > float(currentSliceLocation): data['SliceLocation'] = currentSliceLocation; except: pass if not 'ClassifyType' in data: data['ClassifyType'] = [] data['StudyInstanceUID'] = dataset.StudyInstanceUID data['NumFiles'] = str( int(data['NumFiles']) + 1 ) # add new types as they are found (this will create all type that map to any of the images in the series) data['ClassifyType'] = self.classify(dataset, data, data['ClassifyType']) #data['ClassifyType'] = data['ClassifyType'] + list(set(self.classify(dataset, data)) - set(data['ClassifyType'])) with open(fn3,'w') as f: json.dump(data,f,indent=2,sort_keys=True) rp.close() # There are two files that make this thing work, one is the .pid file for the daemon # the second is the named pipe in /tmp/.processSingleFile # Hauke, July 2015 if __name__ == "__main__": pidfilename = '/data/.pids/processSingleFile.pid' p = os.path.abspath(pidfilename) if not os.path.exists(p): pidfilename = tempfile.gettempdir() + '/processSingleFile.pid' daemon = ProcessSingleFile(pidfilename) daemon.init() if len(sys.argv) == 2: if 'start' == sys.argv[1]: try: daemon.start() except: print "Error: could not create processing daemon: ", sys.exc_info()[0] sys.exit(-1) elif 'stop' == sys.argv[1]: daemon.stop() elif 'restart' == sys.argv[1]: daemon.restart() elif 'test' == sys.argv[1]: r = daemon.resolveClassifyRules( daemon.classify_rules ) print json.dumps(r, sort_keys=True, indent=2) else: print "Unknown command" sys.exit(2) sys.exit(0) elif len(sys.argv) == 3: if 'send' == sys.argv[1]: daemon.send(sys.argv[2]) sys.exit(0) else: print "Process DICOM files fast using a daemon process that creates study/series directories with symbolic links." print "Use 'start' to start the daemon in the background. Send file names for processing using 'send'." print "Test the rules by running test:" print " python2.7 %s test" % sys.argv[0] print "" print "Usage: %s start|stop|restart|send|test" % sys.argv[0] print "" print "For a simple test send a DICOM directory by:" print " find <dicomdir> -type f -print | grep -v .json | xargs -i echo \"/path/to/input/{}\" >> /tmp/.processSingleFilePipe" print "" sys.exit(2)
2.4375
2
WikiParser/NEDParser.py
imyoungmin/NED
1
12792097
<reponame>imyoungmin/NED import importlib import bz2 import os import time import re from multiprocessing import Pool from pymongo import MongoClient import pymongo from urllib.parse import unquote import html import sys from . import Parser as P importlib.reload( P ) class NEDParser( P.Parser ): """ Parsing Wikipedia extracted and multistream archives to construct the surface forms dictionary and the inter-links table. """ # Static members. _TitlePattern = re.compile( r"<title>\s*(.+?)\s*</title>", re.I ) _RedirectTitlePattern = re.compile( r"<redirect\s+title=\"\s*(.+?)\s*\"\s*/>", re.I ) def __init__( self ): """ Constructor. """ P.Parser.__init__( self ) # Defining connections to collections for entity disambiguation. self._mNed_Dictionary = self._mNED["ned_dictionary"] # {_id:str, m:{"e_1":int, "e_2":int,..., "e_n":int}}. -- m stands for 'mapping.' self._mNed_Linking = self._mNED["ned_linking"] # {_id:int, f:{"e_1":true, "e_2":true,..., "e_3":true}}. -- f stands for 'from.' def parseSFFromEntityNames( self ): """ Grab the entity names from entity_id collection and insert their surface forms in ned_dictionary. """ nEntities = self._mEntity_ID.count() # Retrieve number of entities in DB. startTime = time.time() print( "------- Creating surface forms from Wikipedia titles -------" ) print( "[!] Detected", nEntities, "entities in entity_id collection" ) requests = [] # We'll use bulk writes to speed up process. BATCH_SIZE = 10000 totalRequests = 0 for t in self._mEntity_ID.find(): # Surface forms are in lowercase. requests.append( pymongo.UpdateOne( { "_id": t["e_l"] }, { "$inc": { "m." + str( t["_id"] ): +1 } }, upsert=True ) ) totalRequests += 1 if len( requests ) == BATCH_SIZE: # Send lots of update requests. self._mNed_Dictionary.bulk_write( requests ) print( "[*]", totalRequests, "processed" ) requests = [] if requests: self._mNed_Dictionary.bulk_write( requests ) # Process remaining requests. print( "[*]", totalRequests, "processed" ) endTime = time.time() print( "[!] Done after", endTime - startTime, "secs." ) def parseSFsAndLsFromWikilinks( self, extractedDir ): """ Grab surface forms and link relationships from wikilinks in valid entity pages. Skip processing disambiguation pages, lists, and Wikipedia templates, files, etc. Use this method for incremental analysis of extracted Wikipedia BZ2 files. :param extractedDir: Directory where the individual BZ2 files are located: must end in "/". """ print( "------- Creating surface forms and links from Wikilinks and Disambiguation pages -------" ) startTotalTime = time.time() directories = os.listdir( extractedDir ) # Get directories of the form AA, AB, AC, etc. chunks = [] MAX_CHUNK_SIZE = 20 for directory in directories: fullDir = extractedDir + directory if os.path.isdir( fullDir ): print( "[*] Processing", directory ) files = os.listdir( fullDir ) # Get all files in current parsing directory, e.g. AA/wiki_00.bz2. files.sort() for file in files: fullFile = fullDir + "/" + file if os.path.isfile( fullFile ) and P.Parser._FilenamePattern.match( file ): # Read bz2 file and process it. with bz2.open( fullFile, "rt", encoding="utf-8" ) as bz2File: documents = self._extractWikiPagesFromBZ2( bz2File.readlines(), keepDisambiguation=True, lowerCase=False ) # Add documents to a chunk list in preparation for multiprocessing. chunks.append( documents ) if len( chunks ) == MAX_CHUNK_SIZE: self._extractAndProcessWikilinks( chunks ) # Extract Wikilinks and update collections in DB. chunks = [] if chunks: self._extractAndProcessWikilinks( chunks ) # Process remaining chunks of documents. endTotalTime = time.time() print( "[!] Completed process in", endTotalTime - startTotalTime, "secs" ) def _extractAndProcessWikilinks( self, chunks ): """ Extract wikilinks from regular and disambiguation pages, and write results to ned_dictionary and ned_linking collections. :param chunks: A list of lists of extracted wiki documents of the form {id:int, title:str, lines:[str]}. """ startTime = time.time() pool = Pool() rChunks = pool.map( NEDParser._extractWikilinks, chunks ) # Each chunk of document objects in its own thread. pool.close() pool.join() # Close pool and wait for work to finish. # Split rChunks' lists of tuples into surface form dicts and linking dicts. sfDocuments = [] linkDocuments = [] for chunk in rChunks: for doc in chunk: if doc[0]: sfDocuments.append( doc[0] ) # Surface forms. if doc[1]["to"]: linkDocuments.append( doc[1] ) # Link information. # Update DB collections. self._updateSurfaceFormsDictionary( sfDocuments ) self._updateLinkingCollection( linkDocuments ) endTime = time.time() print( "[**] Processed", len( chunks ), "chunks in", endTime - startTime, "secs" ) def parseSFFromRedirectPages( self, msIndexFilePath, msDumpFilePath ): """ Extract surface form from redirect pages. We don't modify the ned_linking collection in this function since redirect pages are only aliases for entity pages. :param msIndexFilePath: Multistream index file path (e.g. enwiki-20141106-pages-articles-multistream-index.txt). :param msDumpFilePath: Multistream dump file path (e.g. enwiki-20141106-pages-articles-multistream.xml.bz2). """ print( "------- Creating surface forms from redirect pages -------" ) startTotalTime = time.time() startTime = time.time() blockRequests = [] # Accumulate blocks for multithreading processing. REQUESTS_BLOCK_COUNT = 1000 requestId = 0 with open( msDumpFilePath, "rb" ) as bz2File: with open( msIndexFilePath, "r", encoding="utf-8" ) as indexFile: seekByte = -1 for lineNumber, line in enumerate( indexFile ): # Read index line by line. components = line.strip().split( ":" ) # [ByteStart, DocID, DocTitle] newSeekByte = int( components[0] ) # Find the next seek byte start that is different to current (defines a block). if seekByte == -1: # First time reading seek byte from file. seekByte = newSeekByte continue if newSeekByte != seekByte: # Changed seek-byte? requestId += 1 count = newSeekByte - seekByte # Number of bytes to read from bz2 stream. bz2File.seek( seekByte ) # Read block of data. block = bz2File.read( count ) dData = bz2.decompress( block ).decode( "utf-8" ) blockRequests.append( (requestId, dData) ) # Append new block to requests. if len( blockRequests ) == REQUESTS_BLOCK_COUNT: # Accumulate REQUESTS_BLOCK_COUNT requests for incremental parsing. self._parseMSBZ2BlockRequests( blockRequests ) print( "[*]", len( blockRequests ), "request blocks starting at byte", seekByte, "parsed after", time.time() - startTime, "secs" ) blockRequests = [] startTime = time.time() seekByte = newSeekByte # Add the last seek byte with count = -1 to read all until EOF. requestId += 1 bz2File.seek( seekByte ) # Read block of data. block = bz2File.read( -1 ) dData = bz2.decompress( block ).decode( "utf-8" ) blockRequests.append( (requestId, dData) ) # Append new block to requests. self._parseMSBZ2BlockRequests( blockRequests ) # And parse this last block. print( "[*]", len( blockRequests ), "request blocks starting at byte", seekByte, "parsed after", time.time() - startTime, "secs" ) endTotalTime = time.time() print( "[!] Completed process for redirect surface forms in", endTotalTime - startTotalTime, "secs" ) def _parseMSBZ2BlockRequests( self, requests ): """ Read surface forms from blocks of Wikipedia documents obtained from the BZ2 multi-stream dump file. :param requests: A list of tuples of the form (requestID, blockStringData) """ pool = Pool() sfDocuments = pool.map( NEDParser._processMSBZ2Block, requests ) # Each block request tuple in its own thread. pool.close() pool.join() # Update ned_dictionary collection. self._updateSurfaceFormsDictionary( sfDocuments ) @staticmethod def _processMSBZ2Block( block ): """ Read and process data from a block in multi-stream Wikipedia dump file. :param block: A tuple containing (requestId, string block of data). :return: A dictionary of the form {"sf1":{"m.EID1":int,..., "m.EIDn":int}, "sf2":{"m.EID1":int,..., "m.EIDn":int}, ...} """ startTime = time.time() mClient = MongoClient( "mongodb://localhost:27017/" ) # One single connection to rule all requests in this block. mNED = mClient.ned mEntity_ID = mNED["entity_id"] requestId = block[0] dData = block[1] # Obtain the title and redirect titles from read block. lines = dData.split( "\n" ) skipLine = False # To avoid unnecessary checks. title = None nDoc = {} # Output dictionary. for line in lines: line = line.strip() if not line: # Skip empty lines. continue if line == "</page>": # End of document? skipLine = False title = None elif not skipLine: if not title: # Find page title first. m = NEDParser._TitlePattern.search( line ) if m: title = m.group( 1 ).lower() # Now reading a <page>. Make title a lowercase surface form. else: m = NEDParser._RedirectTitlePattern.search( line ) # Find redirect title. if m: entity = m.group( 1 ) # Check against DB that the referenced real-world entity exists in entity_id collection. # Skip links to another disambiguation page or an invalid entity page. if P.Parser._DisambiguationPattern.match( entity ) is None and P.Parser._SkipTitlePattern.match( entity ) is None: record = None # Sentinel for found entity in DB. # First check how many entities match the lowercase version given in link: we may have ALGOL and Algol... n = mEntity_ID.find( { "e_l": entity.lower() } ).count() if n == 1: # One match? Then retrieve entity ID. record = mEntity_ID.find_one( { "e_l": entity.lower() }, projection={ "_id": True } ) elif n > 1: # If more than one record, then Wikilink must match the true entity name: case sensitive. record = mEntity_ID.find_one( { "e": entity }, projection={ "_id": True } ) if record: # Process only those entities existing in entity_id collection. eId = "m." + str( record["_id"] ) # Creating entry in output dict. if nDoc.get( title ) is None: nDoc[title] = {} if nDoc[title].get( eId ) is None: nDoc[title][eId] = 0 nDoc[title][eId] += 1 # Entity is referred to by this surface form one more time (i.e. increase count). # else: # print( "[!] Entity", entity, "doesn't exist in the DB!", file=sys.stderr ) # else: # print( "[W] Skipping entry", title, "pointing to invalid entity", entity, file=sys.stderr ) skipLine = True # Found what we wanted... skip the rest of the page. elif line.find( "<text", 0, 6 ) != -1: # Reached the body of the page? skipLine = True # No need to continue checking for redirect until page ends in </page>. mClient.close() endTime = time.time() print( "[**] Processed", len( nDoc ), "redirect entries in block", requestId, "in", endTime - startTime ) return nDoc def initDBCollections( self ): """ Reset the DB collections to start afresh. """ self._mNed_Dictionary.drop() # Note that we don't drop the entity_id collection here: use the TFIDFParser for that. self._mNed_Linking.drop() print( "[!] Collections for surface forms computations have been dropped" ) @staticmethod def _extractWikilinks( docs ): """ Parse inter-Wikilinks from an entity page or disambiguation page to obtain surface forms (by convention these will be lowercased). Also, collect the IDs of entities that current document points to, as long as current doc is a non-disambiguatio page. :param docs: List or chunk of document dictionaries to process: {id:int, title:str, lines:[str]}. :return: A list of tuples with two dicts: one of the form {"sf1":{"m.EID1":int,..., "m.EIDn":int}, "sf2":{"m.EID1":int,..., "m.EIDn":int}, ...}, \ and another of the form {from:int, to: set{int, int, ..., int}}. """ mClient = MongoClient( "mongodb://localhost:27017/" ) # One single connection to rule all docs' DB requests. mNED = mClient.ned mEntity_ID = mNED["entity_id"] result = [] for doc in docs: nDoc = {} # This dict stores the surface forms and their corresponding entity mappings with a reference count. nSet = set() # Stores the IDs of pages pointed to by this non-disambiguation document (e.g. nSet is empty for a disambiguation page). # Treat disambiguation pages differently than regular valid pages. surfaceForm = "" isDisambiguation = False m = P.Parser._DisambiguationPattern.match( doc["title"] ) if m is not None: isDisambiguation = True # We'll be processing a disambiguation page. surfaceForm = m.group( 1 ).strip().lower() # This will become the common surface name for all wikilinks within current disambiguation page. for line in doc["lines"]: line = P.Parser._ExternalLinkPattern.sub( r"\3", line ) # Remove external links to avoid false positives. for matchTuple in P.Parser._LinkPattern.findall( line ): entity = html.unescape( unquote( matchTuple[0] ) ).strip() # Clean entity name: e.g. "B%20%26amp%3B%20W" -> "B &amp; W" -> "B & W". if not isDisambiguation: surfaceForm = matchTuple[1].lower() # For non disambiguation pages, anchor text is the surface form. if len( surfaceForm ) > 128: # Skip too long of a surface form. continue # Skip links to another disambiguation page or an invalid entity page. if P.Parser._DisambiguationPattern.match( entity ) is None and P.Parser._SkipTitlePattern.match( entity ) is None: record = None # Sentinel for found entity in DB. # First check how many entities match the lowercase version given in link: we may have ALGOL and Algol... n = mEntity_ID.find( { "e_l": entity.lower() } ).count() if n == 1: # One match? Then retrieve entity ID. record = mEntity_ID.find_one( { "e_l": entity.lower() }, projection={ "_id": True } ) elif n > 1: # If more than one record, then Wikilink must match the true entity name: case sensitive. record = mEntity_ID.find_one( { "e": entity }, projection={ "_id": True } ) if record: # Process only those entities existing in entity_id collection. eId = "m." + str( record["_id"] ) # Creating entry in output dict. if nDoc.get( surfaceForm ) is None: nDoc[surfaceForm] = {} if nDoc[surfaceForm].get( eId ) is None: nDoc[surfaceForm][eId] = 0 nDoc[surfaceForm][eId] += 1 # Entity is referred to by this surface form one more time (i.e. increase count). if not isDisambiguation: nSet.add( record["_id"] ) # Keep track of page IDs pointed to by this non-disambiguation document. else: print( "[!] Entity", entity, "doesn't exist in the DB!", file=sys.stderr ) else: print( "[W] Skipping entry", surfaceForm, "pointing to invalid entity", entity, file=sys.stderr ) # print( "[***]", doc["id"], doc["title"], "... Done!" ) result.append( ( nDoc, { "from": doc["id"], "to": nSet } ) ) mClient.close() return result def _updateSurfaceFormsDictionary( self, sfDocuments ): """ Update the NED dictionary of surface forms. :param sfDocuments: List of (possibly empty) surface form docs of the form {"sf1":{"m.EID1":int,..., "m.EIDn":int}, "sf2":{"m.EID1":int,..., "m.EIDn":int}, ...}. """ print( "[*] Updating ned_dictionary collection... ", end="" ) requests = [] # We'll use bulk writes to speed up process. BATCH_SIZE = 10000 totalRequests = 0 for sfDoc in sfDocuments: if not sfDoc: continue # Skip empty sf dictionaries. for sf in sfDoc: # Iterate over surface forms in current dict. requests.append( pymongo.UpdateOne( { "_id": sf }, { "$inc": sfDoc[sf] }, upsert=True ) ) totalRequests += 1 if len( requests ) == BATCH_SIZE: # Send lots of update requests. self._mNed_Dictionary.bulk_write( requests ) requests = [] if requests: self._mNed_Dictionary.bulk_write( requests ) # Process remaining requests. print( "Done with", totalRequests, "requests sent!" ) def _updateLinkingCollection( self, linkDocuments ): """ Add more link references to the ned_linking collection. :param linkDocuments: A list of dicts of the form {from:int, to: set{int, int, ..., int}}. """ print( "[*] Updating ned_linking collection... ", end="" ) # Conform input to the following format: {"eId1":{"f.eId2":True, "f.eId3":True}, ..., "eIdn":{"f.eId1":True,...}} toFrom = {} for doc in linkDocuments: fId = "f." + str( doc["from"] ) # --> from 12 to "f.12". for to in doc["to"]: if toFrom.get( to ) is None: toFrom[to] = {} toFrom[to][fId] = True # Now, upsert ned_linking collection with UpdateOne bulk writes. requests = [] BATCH_SIZE = 10000 totalRequests = 0 for to in toFrom: requests.append( pymongo.UpdateOne( { "_id": int( to ) }, { "$set": toFrom[to] }, upsert=True ) ) totalRequests += 1 if len( requests ) == BATCH_SIZE: # Send lots of update requests at once. self._mNed_Linking.bulk_write( requests ) requests = [] if requests: self._mNed_Linking.bulk_write( requests ) # Process remaining requests. print( "Done with", totalRequests, "requests sent!" )
2.625
3
profile_app_mod/apps.py
kurniantoska/medicalwebapp_project
1
12792098
<filename>profile_app_mod/apps.py from django.apps import AppConfig class ProfileAppModConfig(AppConfig): name = 'profile_app_mod'
1.359375
1
Assignment2/Matlab - Group 9/datasets/split.py
Tchinmai7/CSE535
0
12792099
import os if not os.path.exists("data"): os.mkdir("data") if not os.path.exists("data/about"): os.mkdir("data/about") if not os.path.exists("data/father"): os.mkdir("data/father") for filename in os.listdir("raw_data"): full_filename = f"raw_data/{filename}" if "About" in filename: dest_file = f"data/about/{filename}" elif "Father" in filename: dest_file = f"data/father/{filename}" os.rename(full_filename, dest_file)
3.03125
3
shipment/admin.py
Medinaaz/Vodafone-Payment
0
12792100
from django.contrib import admin from shipment.models import Shipment # Register your models here. @admin.register(Shipment) class ShipmentAdmin(admin.ModelAdmin): list_display = ("name", "surname", "email", "phone","city","district","neighborhood","others")
1.859375
2
test/linkloading.py
fbernhart/xhtml2pdf
0
12792101
# -*- coding: utf-8 -*- # Copyright 2010 <NAME>, h<EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "$Revision: 194 $" __author__ = "$Author: holtwick $" __date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $" import ho.pisa as pisa import os import logging log = logging.getLogger(__file__) def dummyLoader(name): return '\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00F\x00\x00\x00\x89\x04\x03\x00\x00\x00c\xbeS\xd6\x00\x00' \ '\x000PLTE\x00\x00\x00\n\x06\x04\x18\x14\x0f-&\x1eLB6w`E\x8f\x80q\xb2\x9c\x82\xbe\xa1{' \ '\xc7\xb0\x96\xd1\xbd\xa9\xd9\xd0\xc6\xef\xeb\xe6\xf8\xf3\xef\xff\xfb\xf7\xff\xff\xffZ\x83\x0b|\x00\x00' \ '\x0c\xedIDATx^u\x97]l\x1bWv\xc7g\xe2`\x81\xbe\xcd%Gr\xd3\xa7P\x12e\xb7\x01\x8a\xd0")E\x01\x02\x8f\xf8' \ '!\x8bI\x17\x10\xc5!))5`\xf1C\xb4\xb25`S\xb2l\xb95\x90H\xa4.\xb9/u$K3\xe3\xa2\x80W\x12\xc59L\xf6a\xb3' \ '\x8dcN\xd6@\xb7\x1f\x01\x8a\x85\x16\x9b-\xfa\x81M\xb8@\x83l\xd1\xd8\xbc|)\xd0\x97\x82\xea\xb93\x92\xec' \ '"\xce\x11 \t3?\xfe\xcf\xff\x9e{\xce\x01(' \ '\x1c>7\x18\xfb\xc2\xfaE\xffk_\xb6\x18\xeb\x1e>\x8f\xe92d\xfe%T\xa8\x98\xfa\x07\x1f ' \ '$<\x0f\xe1\x91\xabT\xc1\xacT\xf2\xbfd\xec\xbb\x98\xdfM\xeb\x86aYP\xfa\xd3\xd6\xf3\x98C[' \ '\xa6\xaaU\xa1a5\xe9\x1b\xad\xef\xd0i}\x91\xccy+\xc8X\xf5E\xf6]:\xff0\xd8\x97\xce7\xb9P\xf1\xd1\xb7\x98' \ '\xaec\xe7/\xd3\xa1\xeb\x81{\x96e5\xd7.\xb6\x85\xe7\x99aO\x94\xf1R(' \ '\xfeC\xce\xd4F\xbf\xc50\x1b\xfa\xefS\xa9\xb2\x12p\x98({' \ '\x8eN\x9b\xb1\xbf\xf5O\xa5\xd7\x0b\xb4\xc9\x0f\x96\xec<G\xa7\xc5\x1e\xbf\xfa\xe2b\x90\x16\xb2\x00\x96E' \ '\x93O\x9e\xe7\xe77\x8b\xd2@ \xa3\xa7\x96\xe6\r\xab\xb9\x97\xfc\xf6\xb90WV\x0e\x8d(' \ '\xa1\xa5dd*\x06PL\xa2\xe7g\xdfw\xba\xe8\xe6o\x06\xc6\xd5\x80\xc7\xe5s\xbb|\xbd\x91\xd2\xb9 ' \ '\x13\x9e1\xc2\x13\xb5\xfeN\rn\xa5\xd5a\xc5+\xe7\xb7\xf5\xa2\xcbC\xde>a\x9c\xd2\xb5\xad\x07\xdbS\x0b\xb0' \ '\xa5z\xeb\x94\xd2y\x80kD\xee<e\x10h\x7fs]\xf4g\xa7\x01\xb6\x12\x91z\xa9P\x8a\\\xcfg\xfdQ\xf6\x0c\x83' \ '\xb1CD?\x05\x80\xf2\xa4;z)\xb8\x11\xf1\x11\xf7\xe5\x8b\x9d\xff\xcf\\\x92H\x846\x80f\x91Ys/\x11\xe2r\x85' \ '\xfe\x98u\x9e\xf5\xf3_\x1eB\xd2U\x00\x9a\xf3\xc9\xc92\xb9\xbc\xbc\xec\x93N?:\xce\xd59\xect\xdb\xec_\xbdC' \ '\xa4\x1f\x99\xb9\x81\x97\xddj\xb9g\x8c\xf4\xaf\xe8\x8f\xba\xc8\x1cwy\xbb\xd3\xb8\xab.\xfb\x0bU\xd03S\xa2' \ '\xac\x96\x03k\xe1\x02\xe4\x19\xbe\x12N\xcc|3<U\xd8O\x02\xd4iQ\x12\\j\x81R\x80\xbd\x14\x16\xed\x88\xc1' \ '\xfavw&\x02isj\xa2\xa9\xd1\x12\x91\xc4\xfe$\xa5\xe1\xbc\xf2f\xbbs\xcc ' \ '\xc2\xb2\xc6\xcd\xec\xe8\xfe\xa2\x05\xb4F$A\x0c\x94\n\xee\x9b\xc5\xec_\xb3\xa7\x0c\xfb\xf7q\xad\xb2\xb6b5' \ '?h\xea\xe6$\x11\t\xe9\xebs\r\xbdv\xf5\xf6\t\xd3a\xec#5\xb8\x9c\x08\xdf\xb4\xc0J\xc1\x9a$\x11\x7f8\x1c\x01' \ '\xb8\xf4\x17\xec\xb0s\xe29\x93\x18\x08\xa5\xcc\xa4eA\xaep\xd7#\xca\xa0\xeb\xd7o\xd5\x8a\xb7\x19;a:.\x1f' \ '\x11\xdd7\x1b8R\xcb\x83\xf5\xac<\xbf\x1e.,\xce~<\xff\xe3N\x9b\x1d3m\x0f\xea\x8b\x85{' \ '\xd6\xa7\xd6\xc3\xf8e}\xd9\xdc C\xd1\xd9f\xfe\x9d\x16;f\xba\x7f/\x12A\x10\xce\xe2\x88[' \ '\xffT\x9a\x99\xc8\x0co\xf5\xf5\x05g\xad\xda\x0fX\xeb\xa4\xceqQ\x10$\xb1\xb7\xd2@\xa86x\x7f8>h._\x9dh4\x8d' \ '\xa7:\x8f#X\x13At\xdb3nF\xee\xc8\x19wV^\xf4\x1b\xd6\xdc\xed\x13\xe6w\x01I\x90\x90\xa1F\x05\x99\xdc}B\x88(' \ '\x87}\xb7\xac\xda\x99\x13\xe6\xa7\xa1\xf3\x02fs\xa5)\xbd\xd70\r\xceH"\x91\xc2\x15\xc8\x1e\x9f\xbd\xbd\x17' \ '\xf7\x8b\x04m\x07\xd2\xb4\x02\xc8 !\xcf\xe1\x83\x0b\xc6\x9d+\\\x87u;\xedl\xdc{' \ '^\x12\x05\x89$\x0b\xd40\xef\x12\tu\xd2\x99!\xec\xc4\xab\x17\x8f\x98\xc7/\xc6\x07\xc6$;\xc1YZ\xd1+\n\x11E' \ '\x12\xa0\xe0\x1b\x18G\xd3\x0e\xf3\xb57\xeeN\xbc,\x89\xa2@z\xd0\x12]\xc34C\x11d\xbct\x809\x0c\xfbU ' \ 'N"\x1eA\x92\xf0l\x03\xd8]\xeb\nq/\xc9\xb4\xe6\x91\x13\xf2\x97\xc8t\x1dF\xea#\xa2\xc0\xebH\x06)\x98\x8b' \ '\xc4\xbd\xd73\x12\x17e\xe5\x956g\xb0C~\x15P\x89(' \ '\t<\x08\xe9\xbda\xc0]\xcf\x1f\xed\x91\xbcBd\xe5\rv\xc4\xfc:\xac\xe2Qlf\xc8G\x82\x95\xc6\'\xf1\x18(' \ '><\xa6\xfb\xc0\xf6\x83\xcc\xe7\t\xd5G\x1c&\x8d\xc3E\x1b\x0fK\x00\x8a"\xc8\xd9\xde\x93\xfb\xfa\\U\xa7\x08' \ '\xcf\x85\x96\xd3\xf9\xb1\xf4\x0f\x9b\x9c\x11\xa4q_\xf8\xe0)3\xa5\x9e\x97\x1c;^\xbaU\xa8Z[' \ '1x\x9f\xbcX$3_v9\xd3\xedt?W\xe3^\x14r\xa04T\xc0\xfad\x14\xc6r\x83\xf7\xa5\xc4\x91\x1f\xc6\x90!r\x9fs0\xb1' \ '\xa76\xdd\xb0\x1e\xc66\xcf\\\x9ay\xf5\x85\xc4\xc1aW\xb0\x97\xd355A\x88,' \ '8AjA\x1d\x1b-S\x98Ly\xe4\xe4m\xe7\xec-\xe6WU\x82%\x94\x1cF\xed\xa1Uk/\xa2\xb9\xb3\xe4T\xee\r\xf6[' \ 'dZ-\x16@F\xc2{w\x92\x05C#\xd4\x1a\x1f\xae\xcbe\x8f\xff\\\xaf\xe3\xa7\xfd\xf5\xd9\xb2:\x89wu\x14\xb2\xe2' \ '\xbeqO_\xa9\x0f\xaf\xfb\xfa\x06\xe7\xae\xb4m?\xff\xdc[\x8a\xa8\xca1$\x8a!\xf2Zc\x13\xea\x17\xd6\\I(' \ '\xcd\xb4\x84\xeea\x9b}\xe4\xce\x8f\x85\x13\xce\x8d\x89\xc8HR\x10\xb2P\xa7\x19w\x0c\xf6\x93\xbf\xe4L\xeb' \ '\x12\x89\x95\\\x11\xc5\xbe1" *\xca\xc6\x80Ik\xbe\xf0\x02\xd4s\x8f\xb8\x9fo|\xbd\x83\xda\x80+\xc7\xdbPD' \ '\x10\x8f\xf8\xc2B?\xadlD\x8b\x00\x943]\xf6?\xa9\xfe\x1e\xdc\xd6\x83\x08\t\xbc\x00\xc3\x8aH\xd2\xfd\x85' \ '\x8a_\x1b?a~\xb4\xb0\x99\xf1-g\xfc\x86\x11\x1a\x1a:\xd7G\x00\xce\x8b\xbd\xef\x176a\xed\xb5f\xb3\x9e{' \ '\x9b\xe7\xda\xbde\xc1^h\x1cj\x97s*\xc69\x80]B2\x05]\xcb.\x00\xd4\xcb\xafs\x9d\xfb\xef\xe0\x90\xefG\r\x8d' \ '\xaa\xe10\x9aA\x8eH\xee\x02-\xab^\x00\xd3f\xba\xbb\xc6\xa7V\xb3\xa9Uu]\xcf\x86\xb1\xda\xf6\x8c\xbe\x90,' \ '\xe4\x16]Q\xd08s\xd8\xde\xc5=\xd0\x040\xa0\x01e\x1f\x8e\xab\xcd\x90Hr\xdd\xf4yS\xb0\xc5\x99\xc71\x04@\xdf' \ '\x1c6\x00\xeeb\x89$\xde\xb5\xc4C\xfa\x01v\x86\xd2\xb0\x8f\x9e\xbb\xffV\x05\x93\x96\t\x99\x9b\x013DPG$R' \ '\xdf\xa9bx\x85\x7f\x12\xac\x07\x9c\xf9\xa4\n:\x8d\xe3h\xcfC.\xcb\xcbH\xdc\x03j\x90\xa2]\xdd\xc0\x9de\xfe' \ '\x00\x99T\x15\xa0\xe6!\x0159\x9f\xcf\xc7\t"I\x7f\xb9@\xab\x1a\xa5Z\xf5SK{\x13\x99\xf1*\xd4\xe7\xc8 ' \ '\x8e\xf0\xe5\x89p\xde#{\xe3\xe9<\xb5\xa3R\xbfgY\x9a\x1f=GQg{' \ '\xfe\x06\xc5X\xd0\xebD.\xac\xf3\xff\xcb\xaa\x9a\xac\\\xc0\x9a\x94\\\x8e\x0e\x0f\xcd\xf9\xa4G.P\x8cuU' \ '\x8dxw\x0b\r0Koq\x86\x1aO!\x9a\x90\xd3\x1c\xc9*\x84\x8c\x16/7\xabu\xfa\xe7\xc8Di\xc5fL\x8a&\xe9v8\x89' \ '\x7fscD\x92\x17&W\x1e\xde\xd3J\xaf\xd8\x0c\xad\xd8\x14\xbe\x03C_T\xf3\xf9\\\xe2eB\xdc\xb1\x84F\xf5\xf0' \ '\x1a?{\x84[D\xa4\x01u\x8a\xbf\xf6T\x1e\xb83\xce\x04\xbd\xa6\xaa\xcd\xaf}\x88\xe7:?L\xb5\xfcM\'\x1b`(' \ 'X*\xf5UQL-\xf5>\x18\xce\x8c$\x99\xc0\x98\x12\xa4tJ\xbd\xac\xeb<\x1bX\xcd\x1d{w\xf2\xae\x1d\xfeI\x94,' \ 'q\xa6\xa3\x04\n\xebJ\x00\x97.\xcc\xeb\xb4\n\xf0>2|d%\x12\xfbI\xbe\'\x94\xecp\x9d@j]q\x0f\x8d\xd3\x9a?\xa6' \ '\x1b\x00\xef\x11I\xe0\xbb\x91\xb8\xa6wj\xd3\xc1 \xcf\xf5sY\xcdM\x11\x12(' \ '\x94\x88\\\xb1>K\xbf\xe7\x91\x88\xc8\xb5\xdc\xc9\xd0\xb5\xec\x99\xb78\xf3\xebS\xaa\x8a\x03\x88\x8c\x87' \ '\\\xf8\xf4\xfe\xcc5\xb4\x83\x86\x029\xf7\xd4\xe9\x9b\xa1\xa5/\xb9\x9f\xff\x15#jbh(' \ '\x92\xc6\x06\t6\xe6.\xfb\xb1\xc4\xfdb\x8fV\xf2\x89\xa2\x1c\xb9\xd2\xe6\xcc\x93\xc9\x80\x8a\x81\xf5\xc5d' \ '\xd5D\xed\x0f\xefr\xdd\x0b\xb4<\x89\xae\xc8\x15\xc6\x84\x0e\xeb~\x16Bh\x8a\xa8\xe5\xb0+Y\xd9\xdc\x9b\xb5,' \ 'S!7hi\nG\x92\x1cp\xe6\xf0\xb7\x1fo\xf7\xf5\xf5\xbdL\x06K\x02\xb9P\x9d\xd8\xbbeY;\xa4\x07\xef,' \ '!\x89\xd2\xe9N\xf7\x10\x99v\x13\xee\xa0K\xd2[' \ '"nZ\x81M\xec\xab;\x9e42\x93\x82$\xbe\xd29\xe4\xcc\x93\x18lp\xd5`\x89\x04\x0bU\x98Z\xb1\x9a\xfex\x9a\x96' \ '\xf9\xfa#\xb79\xc3\xba\xc8\x94\xf9|\xde(' \ '\x91\xe84@\xb2a}\x9c\x0c\xdb\xa9\x04\xe1\xd4#\x9ba\xc8`k\x89\xb2^"\x91\n\xec\xa7,' \ 'kiKFF\xc1\x91\xc5m\x88\xcc!{2\x08\xb4\xe4\x11\'\x00sU\xeb\xc5\xd9fx\xa6&\xd3r\x02\'Q|\xb3c3\x87\xed\xbbP_' \ '#d\xc6\x98\x93\xd3\xd5\xd5\xc0\xec\xc3\x01(' \ '\xcbeu\n\x19r\x91ul\xa6\xb3\x07u\xac\xde\xeeK\x97\x08\xf6Vpv\'\x06\xef\x8e\xe4T\x85\x88\x92\xcc\x1c\xa6' \ '\xcb\x90YC\xe6\xb4B\xc2!wa=\x07\xf5w\xc7U,\x0e\x91\xfe\xa4\xd5:a\xcc\xb2O\xde\xed%\x18=t{' \ '\x06\xb4w\x83\t\x9f\x84%\xfbY\xf7(\x17\xdbY\x00\xaa\xc8\xbbI>\xea\x11\xdee\x9a\x12T\xb0b\xe2\xf7\x0eP\xc7' \ '\xf1|\x9f3$Q\xe4\xdb9J\rd\xce\xe5}\x9c\xf9\xb36;\xd6\xb9?\x83\x8c\x18\xbe\x86\x0c\x19__\x01s\xcd\xbd\xf8' \ '\x02\xf6*\x16\x87\xb5\x8f\xfc\xd8:b\xe2\x9a$H\xaedy\x01\xccLOv@\xb2\xdb\x82u\x1d\xa6\xbd\xb3b3s(' \ '\xe3N\xa1\x9fm_$\x11\x97D^c\xac\xa0\xe3g\x0f\x00\xeb<4\x87\x1f\x95SK\xbcX\xc3XA\xe9-4s\xc4t\x9f\xf8\x01' \ '\xd6\xf0H\xd8\xc7DNfM:\xd7sF\x9d\x12\xe5\x1f?\xcb\x8c\xa2K\x91\xb8\xe6DI\x94\xd3\xa3Z\x9ex\x83\x81\xb1' \ '\x84\xf7g\xfcP\xc7L\x8c\xdf\xa9\xf0\xa2\xffUQ\x08\xa4\xce\xe6|$\x91\x95U5\xf8\x08\x99\xae\xc3`\x8f\x99' \ '\x94*\x828\x91\x11p\x80\x06}\xe2)\xf5\xd2@^M\x7f\x88\x9e\x9f\xea\xd4)\x9d#\xe2BV\x10\x02\xd9~\\\x18\xd7' \ '\xc7\x92TM\xbf\xdd:a\x0e\xbf\x18EfU ' \ '+\x8b\xc8d\xb0\xbe\xc1\xa4/J\xf37^G\xe4X\xe7q\xcc\x04Z&\xc2K\x0eC\\Y\x1a\xb8`,' \ '\x9a\xb7Z\xad\xa7\xb9Fu\x13u\xa4\x97\xb26#}\xcfK#\xd4\xd85W\xdb\xec\x19\xc6\x00\r\xeb\xfaR\xc9a\xc6F\xea' \ '\xab\x9aQ\x87U\xf6\x8cN\x0c\x1a\xday"\xfe\x9e\xc3\x90k#\xf52gJWX\x17\xef\xeb\x98\x01\x9a\xc7\xfa\x95\x88' \ '\xcd\xcc\x05\xa3U\xce\xd4\xdf\xc0+\xed:3\xf8x\x14\x99u\t\xbd\x12\x11\x19W1\xd0c\xd8\x8c\xcaX\x8b9\xf3\xf5' \ '\x1f1\xa8\xd3UIt\xe1p\xb8\xb3~Z\xf1\x91\r\xcd\xa85\xcc\xdc\x01k\x1f33\x00\xda\xaa\xe4\x0e/\x12\x89\xa4' \ '\xb1V\x8b\xbe\xa2\x06\xc5\x15(\xf1\x9b?\xb4\x99\xaf\x00\x80\xc6\xdd)\xc8\x12B\xfc\xcd\n\xad\x14s\xbay\x15' \ '\'|\x98\xb1\x13\x1d\x03h$U\x1b?\'\x86C\xa4\x01\x94\xee\x8e\xe8p\x15\x1b8\x8c\xd7\xeax\xfe\xeaF\xb5^\xd1k' \ '\xe7z\xb13\xae\xfb\x1aVS\xd39\x13\x03\x9ayttv\x16\xa2\x06\x98EQ\xec\x15"xo\xb8\xa1\x00Ftc\xaf\x17\x05\xdf' \ '\xec:\xf3\xce\xa2\x94\xc2&\x1f?\x92\xa6\xd5\xcd3M\x1d`\xa62\xbf\x13Df\x03\r\xd9~\xc2i\n\x97H8\xac\x88i' \ '\xdd0\x07,]\xdfZ\xd9^\xd9\xcf\x1b\x94\x96n\x1f1\xf7\xbdUXR)}\xcf\xfe\xa27`\x81V6\xf6rZn\x85\xd2\xf2\xf7' \ '\x8f\xcf%\xc3\x05\n\xf8@\xec\x1f1`\xee\x9df}j\xc5\xdc\x18Voit\xf5\xfb-\xc7\xf3\xcf\'\x8a\x7f\x00\x1a\xa5' \ '\xeb\xc4C&\xe0\xfdY\x0b&\x0bK\x99A\xafQ\xa7k\x07-\x9e\xab\xc3\xc6\xb6\x94\xd3\x00uZ\x96T%X\xd9\x8b!\x93t' \ '\'\x06\xaf\x83I\xd7o\xb7\x9c\\\x91\xc5p\xbfa\xeat]I\xff\xc8O\xf7\x83M\xc8\x10w\xc0\xbb\xb4b\xd2\xf2\xa8' \ '\xc3\xfc\xe7|\x94\xc6\xa7ML\x86_m\xb3\x14\x96\x8cz9G\xc8\xd9\xaca\x96\xe6C\x1fr\xa6\xf5@+\x18\xa5A\xd3' \ '\x04\x9a\xed\xd9\xc8j\xb0\x1f\xa6\xd4X"\xeei0\xd6\n\xea\x01g\xday\x8dB=~\x06\x1d\x95zV\xb7\xab`\xea\x1aB' \ '\xba\xc9\x1d\x06\xdf\xb6\xeb\xf3\x9b\n4\xf9N\xd8\xc6c(Y\xb3\x02{\xf3\x0f\n\x15@\xc3\x18\xfeN\xd7f(' \ '>\xc0\x9e\xbf3\x0e\x1a\xda\xd2\xa1\xe6\xc9O\xa0\xa8\x81H\xeeb\xdb\xd6\xf9G.\x0c\xb0zU\x9e\x81\xcd\xdf7' \ '\x00\x96<\xde( \xab\xd1l\xe0\xc0\xe9\xc3\x8f\x90G\xa9\xf8\xc6\xbc\x1fv\xe5J\xb5\xba\xd9#\'\x81K\xaf\xc5' \ '>hu\xed>\xfc)\xe5a\x8cm\xc2F\xcc\x1cZ\xde\xdc\x9f\x0ef\xd1\xf8:-\xfd\xd5\x01;\xea\xc3S\xd4\x8e\xdd\xe5' \ '\x19\x80\x86\x8fd\xca\x13\xd1\x1e\xa3\x9e\x0fEX\x1b\x7f\x1c\x1dU-\xd8\xd9F5t\x95 ' \ '\xa1\xa5\x89\xa8:\xddTg\xf9N\xc5\xc9\xb1\x99\xc7J\xc4\x16\x9a\xd6\xd0\x95\x99 ' \ 'J4\xb5\x7f\xab\x85D\x8b\xffr\xf6<{\xb8\x1d\x0e\xf9\xa9\x13\xb0GnZ\xd6/Z\xfc%\xb3\x99\xae\xcd0f\xe1c\x1e' \ '\x9f\r\r\x05\xad\x16{&\x10\xc0\xf8?Z\n\xf1+\xfb\x81\xd5F\x00\x00\x00\x00IEND\xaeB`\x82 ' class myLinkLoader: """ This object is just a wrapper to track additional informations and handle temporary files after they are not needed any more. """ def __init__(self, **kw): """ The self.kw could be used in getFileName if you like """ self.kw = kw self.tmpFileList = [] def __del__(self): for path in self.tmpFileList: os.remove(path) self.tmpFileList = [] def getFileName(self, path, relative=None): import os import tempfile log.info("myLinkLoader.getFileName: %r %r %r", path, relative, self.kw) try: if "." in path: new_suffix = "." + path.split(".")[-1].lower() if new_suffix in (".css", ".gif", ".jpg", ".png"): suffix = new_suffix tmpPath = tempfile.mktemp(prefix="pisa-", suffix=suffix) tmpFile = file(tmpPath, "wb") try: # Here you may add your own stuff tmpFile.write(dummyLoader(path)) finally: tmpFile.close() self.tmpFileList.append(tmpPath) return tmpPath except Exception as e: log.exception("myLinkLoader.getFileName") return None def helloWorld(): filename = __file__ + ".pdf" lc = myLinkLoader(database="some_name", port=666).getFileName pdf = pisa.CreatePDF( u""" <p> Hello <strong>World</strong> <p> <img src="apath/some.png"> """, file(filename, "wb"), link_callback=lc, ) if not pdf.err: pisa.startViewer(filename) if __name__ == "__main__": pisa.showLogging() helloWorld() # print repr(open("img/denker.png", "rb").read())
1.960938
2
libs/models.py
eaplab/RaSeedGAN
1
12792102
<filename>libs/models.py # -*- coding: utf-8 -*- """ Created on Sat Apr 10 11:38:43 2021 @author: guemesturb """ import numpy as np import tensorflow as tf import tensorflow.keras as keras import tensorflow.keras.layers as layers def SubpixelConv2D(input_shape, scale=4): """ Custom layer to shuffle abd reduce tensor filters (last dimension) to upsample their height and width. :param input_shape: Tensorflow object containing the tensor size (batch x height x width x filters). :param scale: Integer containing the ratio to increase/decrease the heigth and width/number of filters. :return: Tensorflow tensor with rescaled dimensions """ def subpixel_shape(input_shape): """ Function to compute the new tensor size after pixel shuffling. :param input_shape: Tensorflow object containing the tensor size (batch x height x width x filters). :return: Tuple containing the rescaled tensor size (batch x height x width x filters). """ # Compute new dimensions dims = [input_shape[0], input_shape[1] * scale, input_shape[2] * scale, int(input_shape[3] / (scale ** 2))] # Transform list into tuple output_shape = tuple(dims) return output_shape def subpixel(x): """ Function to change tensor size. :return: Tensorflow tensor with rescaled dimensions """ return tf.nn.depth_to_space(x, scale, data_format='NHWC') return layers.Lambda(subpixel, output_shape=subpixel_shape) class GANPIV(object): def __init__(self, model_name, us, nx, ny, channels=2, n_residual_blocks=16): """ Python class to generate the Tensorflow models for resolution enhancement of PIV images through GANs. :param model_name: String containing the assigned name to the model, for storage purposes. :param us: Integer containing the upsampling ratio between the low- and high-resolution data. :param nx: Integer containing the grid points in the streamwise direction for the low-resolution data. :param ny: Integer containing the grid points in the wall-normal direction for the low-resolution data. :param channels: Integer containing the number of velocity components present in the data. Default is 2. :param n_residual_blocks : Integer containing the number residual blocks to be applied in the GAN generator. Default is 16. :return: """ # Declare variable inside the class self.us = us self.nx = nx self.ny = ny self.channels = channels self.model_name = model_name self.n_residual_blocks = n_residual_blocks return def build_architecture(self): if (self.model_name == 'architecture-01') or (self.model_name == 'architecture01'): return self.architecture01() else: return self.architecture01() def architecture01(self): """ Function to generate the SRGAN architecture as Ledig et al. (2017). This version is modified with respect the original one by removing the batch normalization layers. :return generator: Tensorflow object containing the generator model. :return discriminator: Tensorflow object containing the discriminator model. :return generator_loss: Self-defined Python function containing the generator loss :return discriminator_loss: Self-defined Python function containing the discriminator loss. """ """ Generator model """ def res_block_gen(model, kernal_size, filters, strides): """ Function to generate a residual block :param model: Tensorflow tensor containing the internal model state. :param kernel_size: Integer containing the kernel (or filter) size for the convolutional operations. :param filters: Integer containing the number of filters to apply in the convolution operation. :param strides: Integer containing the stride value to ba applied in the convolutional operations. :return model: Tensorflow tensor """ # Copy model for skip-connection purposes gen = model # Apply convolutional operation model = layers.Conv2D(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same", data_format='channels_last')(model) # Apply Parametric ReLU activation function model = layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model) # Apply convolutional operation model = layers.Conv2D(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same", data_format='channels_last')(model) # Add model with input model (skip connection operation) model = layers.Add()([gen, model]) return model def up_sampling_block(model, kernel_size, filters, strides): """ Function to upsample the wight and height dimensions of the data. :param model: Tensorflow tensor containing the internal model state. :param kernel_size: Integer containing the kernel (or filter) size for the convolutional operations. :param filters: Integer containing the number of filters to apply in the convolution operation. :param strides: Integer containing the stride value to ba applied in the convolutional operations. :return model: Tensorflow tensor """ # Apply convolutional operation model = layers.Conv2D(filters = filters, kernel_size = kernel_size, strides = strides, padding = "same", data_format='channels_last')(model) # Apply Pixxle Shuffle layer model = SubpixelConv2D(model.shape, scale=2)(model) # Apply Parametric ReLU activation function model = layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model) return model # Define input layer inputs = keras.Input(shape=(self.ny, self.nx, self.channels), name='low-res-input') # Apply a convolutional layer conv_1 = layers.Conv2D(filters=64, kernel_size=9, strides=1, activation='linear', data_format='channels_last', padding='same')(inputs) # conv_1 = layers.Conv2D(filters=128, kernel_size=9, strides=1, activation='linear', data_format='channels_last', padding='same')(inputs) # Apply a Parametric ReLU activation function prelu_1 = layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(conv_1) # Copy model for its use in a residual-block loop res_block = prelu_1 # Apply N residual blocks for index in range(self.n_residual_blocks): res_block = res_block_gen(res_block, 3, 64, 1) # res_block = res_block_gen(res_block, 3, 128, 1) # Apply a convolutional layer conv_2 = layers.Conv2D(filters = 64, kernel_size = 3, strides = 1, padding = "same", data_format='channels_last')(res_block) # conv_2 = layers.Conv2D(filters = 128, kernel_size = 3, strides = 1, padding = "same", data_format='channels_last')(res_block) # Apply one last skip connection between the input and output of the residual-block loop up_sampling = layers.Add()([prelu_1, conv_2]) # Upsample the data to the high-resolution dimensions for index in range(int(np.log2(self.us))): up_sampling = up_sampling_block(up_sampling, 3, 256, 1) # Apply the last convolutional layer to assert the number of filter is equal to the desired number of channels. outputs = layers.Conv2D(filters = self.channels, kernel_size = 9, strides = 1, padding = "same", data_format='channels_last')(up_sampling) # Connect input and output layers generator = keras.Model(inputs, outputs, name='SRGAN-Generator') """ Discriminator model """ def discriminator_block(model, filters, kernel_size, strides): """ Function to generate discriminator blocks. :param model: Tensorflow tensor containing the internal model state :param filters: Integer containing the number of filters to apply in the convolution operation. :param kernel_size: Integer containing the kernel (or filter) size for the convolutional operations :param strides: Integer containing the stride value to ba applied in the convolutional operations. :return model: Tensorflow tensor """ # Apply convolutional operation model = layers.Conv2D(filters = filters, kernel_size = kernel_size, strides = strides, padding = "same", data_format='channels_last')(model) # Apply Leaky ReLU activation function model = layers.LeakyReLU(alpha = 0.2)(model) return model # Define input layer inputs = keras.Input(shape=(self.ny*self.us, self.nx*self.us, self.channels), name='high-res-input') # Apply a convolutional layer model = layers.Conv2D(filters = 64, kernel_size = 3, strides = 1, padding = "same", data_format='channels_last')(inputs) # Apply a Leaky ReLU activation function model = layers.LeakyReLU(alpha = 0.2)(model) # Apply 7 discriminator blocks model = discriminator_block(model, 64, 3, 4) model = discriminator_block(model, 128, 3, 1) model = discriminator_block(model, 128, 3, 2) model = discriminator_block(model, 256, 3, 1) model = discriminator_block(model, 256, 3, 2) model = discriminator_block(model, 512, 3, 1) model = discriminator_block(model, 512, 3, 2) # Flatten the tensor into a vector model = layers.Flatten()(model) # Apply a fully-conncted layer model = layers.Dense(1024)(model) # Apply a convolutional layer model = layers.LeakyReLU(alpha = 0.2)(model) # Apply a fully-conncted layer model = layers.Dense(1)(model) # Apply a sigmoid connection function model = layers.Activation('sigmoid')(model) # Connect input and output layers discriminator = keras.Model(inputs=inputs, outputs = model, name='SRGAN-Discriminator') """ Generator loss """ # Define generator loss as a function to be returned for its later use during the training def custom_mse_loss(y_pred, y_true, flag): """ Custom function to compute the mean-squared error between target and predicted data only for a certain number of bins. :param y_pred: Tensorflow tensor containing the predicted high-resolution fields :param y_true: Tensorflow tensor containing the target high-resolution fields :param flag: Tensorflow tensor containing boolean information regarding the bins in the target data that have information :return loss: Float containg the mean-squared error """ # Compute the number of bins in the target high-resolution data that contains information N = tf.reduce_sum(flag) # Compute the conditional mean-squared error loss = tf.math.divide( tf.reduce_sum( tf.math.square( tf.math.subtract( y_true, y_pred ) ) ), N ) return loss def generator_loss(fake_Y, hr_predic, hr_target, fl_target): """ Function to compute the generator loss as the mean-squared error between the target and the predicted high-resolution fields plus and adversarial error for the perdicted fields. :param fake_Y: Tensorflow vector of dimensions batch size x 1 containing the labels of the predicted data. :param hr_predic: Tensorflow tensor containing the predicted high-resolution fields :param hr_target: Tensorflow tensor containing the target high-resolution fields :param fl_target: Tensorflow tensor containing the information about which bins in the high-resolution target data contain information. :return loss: Float containing the generator loss """ # Define binary cross-entropy function cross_entropy = tf.keras.losses.BinaryCrossentropy() # Compute the capability of the generator to cause the discriminator to misidentify the predicted data as real, adding a small perturbation for stability issues adversarial_loss = cross_entropy( np.ones(fake_Y.shape) - np.random.random_sample(fake_Y.shape) * 0.2, fake_Y ) # Compute the mean-squared error between the target and predicted data only as a function of the bins in the target data that contain information content_loss = custom_mse_loss( hr_target, tf.math.multiply(hr_predic, fl_target), fl_target ) # Compute loss loss = content_loss + 1e-3*adversarial_loss return loss """ Discriminator loss """ # Define discriminator loss as a function to be returned for its later use during the training def discriminator_loss(real_Y, fake_Y): """ Function to compute the discriminator loss as the mean value of the binary cross-entropy for the target and predicted labels. :param real_Y: Tensorflow vector of dimensions batch size x 1 containing the labels of the target data. :param fake_Y: Tensorflow vector of dimensions batch size x 1 containing the labels of the predicted data. :return total_loss: Float containing the mean value of the binary cross-entropy for the target and predicted labels. """ # Define binary cross-entropy function cross_entropy = tf.keras.losses.BinaryCrossentropy() # Compute the capability of the discriminator to identify the target data as real, adding a small perturbation for stability issues real_loss = cross_entropy(np.ones(real_Y.shape) - np.random.random_sample(real_Y.shape)*0.2, real_Y) # Compute the capability of the discriminator to identify the predicted data as fake, adding a small perturbation for stability issues fake_loss = cross_entropy(np.random.random_sample(fake_Y.shape)*0.2, fake_Y) # Compute mean value total_loss = 0.5 * (real_loss + fake_loss) return total_loss return generator, discriminator, generator_loss, discriminator_loss def optimizer(self, learning_rate): generator_optimizer = tf.keras.optimizers.Adam(learning_rate) discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate) return generator_optimizer, discriminator_optimizer
2.578125
3
backend/twilio_account.py
jeff-vincent/Slingshot
2
12792103
import asyncio import subprocess from quart import session from async_http import AsyncHTTP from config import twilio_available_sms_numbers_base_uri from config import twilio_purchase_sms_number_base_uri from config import twilio_create_subaccount_base_uri from config import twilio_assign_new_number_base_uri from config import auth_token from config import admin_sid class CreateTwilioAccount: def __init__(self, friendly_name): self.friendly_name = friendly_name self.async_http = AsyncHTTP() async def create_user_account(self): """A method for creating a system user account. Args: self: an instance of the CreateTwilioAccount class """ params = { 'friendly_name': self.friendly_name, 'auth_token': auth_token } request_uri = twilio_create_subaccount_base_uri print(request_uri) print(params) user_account = await self.async_http.post( base_uri=request_uri, params=params) user_sid = user_account.sid try: signed_up_user = await self._get_sms_user(user_sid) except Exception as e: print('Twilio sign up error: {}'.format(str(e))) return signed_up_user async def _get_sms_user(self, user_sid): """A private method for getting a list of available, sms-enabled phone numbers in a given area code. Calls private helper methods to complete the process of purchasing a number from the list, and assigning it to the Twilio subaccount. NOTE: the area code is set on the session. Args: self: an instance of the CreateTwilioAccount class user_sid: string """ request_uri = twilio_available_sms_numbers_base_uri.format( auth_token=auth_token) response = await self.async_http.get(base_uri=request_uri) sms_number = response.available_phone_numbers[0].friendly_name response = await self._purchase_sms_number(user_sid, sms_number) return response async def _purchase_sms_number(self, user_sid, sms_number): """A private method for purchasing a given sms-enabled number. Args: self: an instance of the CreateTwilioAccount class user_sid: string sms_number: string: the sms number to buy """ params = {'phone_number':sms_number} request_uri = twilio_purchase_sms_number_base_uri.format( auth_token=auth_token) response = await self.async_http.post( base_uri=request_uri, params=params) response = await self._assign_sms_number_to_user(user_sid, sms_number) return response async def _assign_sms_number_to_user(self, user_sid, sms_number): """A private method for assigning a sms-enabled number to a Twilio subaccount. Args: self: an instance of the CreateTwilioAccount class user_sid: string sms_number: string: the number that was just purchased. """ params = { 'phone_number':sms_number, 'auth_token': auth_token, 'AddressSid': user_sid } request_uri = twilio_assign_new_number_base_uri.format( admin_sid=admin_sid, sms_number=sms_number) response = await self.async_http.post( base_uri=request_uri, params=params) return response
2.65625
3
Competitive Programming/Heap/Building Heap from Array.py
shreejitverma/GeeksforGeeks
2
12792104
'''https://www.geeksforgeeks.org/building-heap-from-array/ Given an array of N elements. The task is to build a Binary Heap from the given array. The heap can be either Max Heap or Min Heap. Example: Input: arr[] = {4, 10, 3, 5, 1} Output: Corresponding Max-Heap: 10 / \ 5 3 / \ 4 1 Input: arr[] = {1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17} Output: Corresponding Max-Heap: 17 / \ 15 13 / \ / \ 9 6 5 10 / \ / \ 4 8 3 1 Suppose the given input elements are: 4, 10, 3, 5, 1. The corresponding complete binary tree for this array of elements [4, 10, 3, 5, 1] will be: 4 / \ 10 3 / \ 5 1 Note: Root is at index 0 in array. Left child of i-th node is at (2*i + 1)th index. Right child of i-th node is at (2*i + 2)th index. Parent of i-th node is at (i-1)/2 index. Simple Approach: Suppose, we need to build a Max-Heap from the above-given array elements. It can be clearly seen that the above complete binary tree formed does not follow the Heap property. So, the idea is to heapify the complete binary tree formed from the array in reverse level order following a top-down approach. That is first heapify, the last node in level order traversal of the tree, then heapify the second last node and so on. Time Complexity: Heapify a single node takes O(log N) time complexity where N is the total number of Nodes. Therefore, building the entire Heap will take N heapify operations and the total time complexity will be O(N*logN). In reality, building a heap takes O(n) time depending on the implementation which can be seen here. Optimized Approach: The above approach can be optimized by observing the fact that the leaf nodes need not to be heapified as they already follow the heap property. Also, the array representation of the complete binary tree contains the level order traversal of the tree. So the idea is to find the position of the last non-leaf node and perform the heapify operation of each non-leaf node in reverse level order. Last non-leaf node = parent of last-node. or, Last non-leaf node = parent of node at (n-1)th index. or, Last non-leaf node = Node at index ((n-1) - 1)/2. = (n/2) - 1. Illustration: Array = {1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17} Corresponding Complete Binary Tree is: 1 / \ 3 5 / \ / \ 4 6 13 10 / \ / \ 9 8 15 17 The task to build a Max-Heap from above array. Total Nodes = 11. Last Non-leaf node index = (11/2) - 1 = 4. Therefore, last non-leaf node = 6. To build the heap, heapify only the nodes: [1, 3, 5, 4, 6] in reverse order. Heapify 6: Swap 6 and 17. 1 / \ 3 5 / \ / \ 4 17 13 10 / \ / \ 9 8 15 6 Heapify 4: Swap 4 and 9. 1 / \ 3 5 / \ / \ 9 17 13 10 / \ / \ 4 8 15 6 Heapify 5: Swap 13 and 5. 1 / \ 3 13 / \ / \ 9 17 5 10 / \ / \ 4 8 15 6 Heapify 3: First Swap 3 and 17, again swap 3 and 15. 1 / \ 17 13 / \ / \ 9 15 5 10 / \ / \ 4 8 3 6 Heapify 1: First Swap 1 and 17, again swap 1 and 15, finally swap 1 and 6. 17 / \ 15 13 / \ / \ 9 6 5 10 / \ / \ 4 8 3 1''' # Python3 program for building Heap from Array # To heapify a subtree rooted with node i # which is an index in arr[]. N is size of heap def heapify(arr, n, i): largest = i # Initialize largest as root l = 2 * i + 1 # left = 2*i + 1 r = 2 * i + 2 # right = 2*i + 2 # If left child is larger than root if l < n and arr[l] > arr[largest]: largest = l # If right child is larger than largest so far if r < n and arr[r] > arr[largest]: largest = r # If largest is not root if largest != i: arr[i], arr[largest] = arr[largest], arr[i] # Recursively heapify the affected sub-tree heapify(arr, n, largest) # Function to build a Max-Heap from the given array def buildHeap(arr, n): # Index of last non-leaf node startIdx = n // 2 - 1 # Perform reverse level order traversal # from last non-leaf node and heapify # each node for i in range(startIdx, -1, -1): heapify(arr, n, i) # A utility function to print the array # representation of Heap def printHeap(arr, n): print("Array representation of Heap is:") for i in range(n): print(arr[i], end=" ") print() # Driver Code if __name__ == '__main__': # Binary Tree Representation # of input array # 1 # / \ # 3 5 # / \ / \ # 4 6 13 10 # / \ / \ # 9 8 15 17 arr = [1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17] n = len(arr) buildHeap(arr, n) printHeap(arr, n) # Final Heap: # 17 # / \ # 15 13 # / \ / \ # 9 6 5 10 # / \ / \ # 4 8 3 1
3.71875
4
google/easy/roman_to_integer.py
salman-kgp/leetcode
0
12792105
<reponame>salman-kgp/leetcode<filename>google/easy/roman_to_integer.py class Solution(object): def romanToInt(self, s): """ :type s: str :rtype: int """ roman_to_int_map = {"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000} current_ptr = 0 result = 0 while current_ptr<len(s): if current_ptr+1<len(s) and roman_to_int_map[s[current_ptr]]<roman_to_int_map[s[current_ptr+1]]: result+=(roman_to_int_map[s[current_ptr+1]]-roman_to_int_map[s[current_ptr]]) current_ptr+=2 continue result+=roman_to_int_map[s[current_ptr]] current_ptr+=1 return result
3.4375
3
visualize_classifer/visualize_model.py
a-n-rose/language-classifier
3
12792106
import keras from keras.models import model_from_json from keras.utils import plot_model import pandas as pd import numpy as np from ann_visualizer.visualize import ann_viz import glob import os #batch visualize: for model in glob.glob('./models/*.json'): #load json model: classifier_name = os.path.splitext(model)[0] classifier_weights = classifier_name+'.h5' model = classifier_name+'.json' json_file = open(model,'r') classifier_json = json_file.read() json_file.close() classifier = model_from_json(classifier_json) #load weights: classifier.load_weights(classifier_weights) print("Loaded model from disk") #try visualizing it #view = True will result in .pdf files of visualization ann_viz(classifier,view=True, filename='Visualize_'+classifier_name,title='English German Classifier: Simple ANN') plot_model(classifier,to_file='VisualizeLayers_'+classifier_name+'.png',show_shapes=True,show_layer_names=True) print("Models have been visualized")
3.015625
3
python/setup.py
gitter-badger/rikai
0
12792107
import pathlib import re from setuptools import find_packages, setup about = {} with open(pathlib.Path("rikai") / "__version__.py", "r") as fh: exec(fh.read(), about) with open( pathlib.Path(__file__).absolute().parent.parent / "README.md", "r", ) as fh: long_description = fh.read() # extras test = ["pytest"] torch = ["torch>=1.5.0", "torchvision"] jupyter = ["matplotlib", "jupyterlab"] aws = ["boto"] docs = ["sphinx"] youtube = ["pafy", "youtube_dl", "ffmpeg-python"] all = test + torch + jupyter + aws + docs + youtube setup( name="rikai", version=about["version"], license="Apache License, Version 2.0", author="<NAME>", author_email="<EMAIL>", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/eto-ai/rikai", packages=find_packages(), include_package_data=True, python_requires=">=3.7", install_requires=[ "antlr4-python3-runtime", "ipython", "jsonschema", "numpy", "opencv-python", "pandas", "Pillow", "pyarrow>=2.0", "pyspark>=3.1,<3.2", "pyyaml", "requests", ], extras_require={ "test": test, "pytorch": torch, "jupyter": jupyter, "aws": aws, "docs": docs, "youtube": youtube, "all": all, }, classifiers=[ "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Topic :: Software Development :: Libraries", ], )
1.554688
2
visualization/panda/filtermanager.py
takuya-ki/wrs
23
12792108
<reponame>takuya-ki/wrs<filename>visualization/panda/filtermanager.py from direct.filter import FilterManager as pfm from panda3d.core import Texture, CardMaker, NodePath, AuxBitplaneAttrib, LightRampAttrib, Camera, OrthographicLens, \ GraphicsOutput, WindowProperties, FrameBufferProperties, GraphicsPipe class FilterManager(pfm.FilterManager): def __init__(self, win, cam): super().__init__(win, cam) def renderSceneInto(self, depthtex=None, colortex=None, auxtex=None, auxbits=0, textures=None, fbprops=None, clamping=None): """ overload direct.filters.FilterManager.renderSceneInto :param depthtex: :param colortex: :param auxtex: :param auxbits: :param textures: :param fbprops: :param clamping: :return: """ if (textures): colortex = textures.get("color", None) depthtex = textures.get("depth", None) auxtex = textures.get("aux", None) auxtex0 = textures.get("aux0", auxtex) auxtex1 = textures.get("aux1", None) else: auxtex0 = auxtex auxtex1 = None if (colortex == None): colortex = Texture("filter-base-color") colortex.setWrapU(Texture.WMClamp) colortex.setWrapV(Texture.WMClamp) texgroup = (depthtex, colortex, auxtex0, auxtex1) # Choose the size of the offscreen buffer. (winx, winy) = self.getScaledSize(1, 1, 1) if fbprops is not None: buffer = self.createBuffer("filter-base", winx, winy, texgroup, fbprops=fbprops) else: buffer = self.createBuffer("filter-base", winx, winy, texgroup) if (buffer == None): return None cm = CardMaker("filter-base-quad") cm.setFrameFullscreenQuad() quad = NodePath(cm.generate()) quad.setDepthTest(0) quad.setDepthWrite(0) quad.setTexture(colortex) quad.setColor(1, 0.5, 0.5, 1) cs = NodePath("dummy") cs.setState(self.camstate) # Do we really need to turn on the Shader Generator? # cs.setShaderAuto() if (auxbits): cs.setAttrib(AuxBitplaneAttrib.make(auxbits)) if clamping is False: # Disables clamping in the shader generator. cs.setAttrib(LightRampAttrib.make_identity()) self.camera.node().setInitialState(cs.getState()) quadcamnode = Camera("filter-quad-cam") lens = OrthographicLens() lens.setFilmSize(2, 2) lens.setFilmOffset(0, 0) lens.setNearFar(-1000, 1000) quadcamnode.setLens(lens) quadcam = quad.attachNewNode(quadcamnode) self.region.setCamera(quadcam) self.setStackedClears(buffer, self.rclears, self.wclears) if (auxtex0): buffer.setClearActive(GraphicsOutput.RTPAuxRgba0, 1) buffer.setClearValue(GraphicsOutput.RTPAuxRgba0, (0.5, 0.5, 1.0, 0.0)) if (auxtex1): buffer.setClearActive(GraphicsOutput.RTPAuxRgba1, 1) self.region.disableClears() if (self.isFullscreen()): self.win.disableClears() dr = buffer.makeDisplayRegion() dr.disableClears() dr.setCamera(self.camera) dr.setActive(1) self.buffers.append(buffer) self.sizes.append((1, 1, 1)) return quad def createBuffer(self, name, xsize, ysize, texgroup, depthbits=1, fbprops=None): """ overload direct.filters.FilterManager.createBuffer :param name: :param xsize: :param ysize: :param texgroup: :param depthbits: :param fbprops: :return: """ winprops = WindowProperties() winprops.setSize(xsize, ysize) props = FrameBufferProperties(FrameBufferProperties.getDefault()) props.setBackBuffers(0) props.setRgbColor(1) props.setDepthBits(depthbits) props.setStereo(self.win.isStereo()) if fbprops is not None: props.addProperties(fbprops) depthtex, colortex, auxtex0, auxtex1 = texgroup if (auxtex0 != None): props.setAuxRgba(1) if (auxtex1 != None): props.setAuxRgba(2) buffer=base.graphicsEngine.makeOutput( self.win.getPipe(), name, -1, props, winprops, GraphicsPipe.BFRefuseWindow | GraphicsPipe.BFResizeable, self.win.getGsg(), self.win) if (buffer == None): return buffer if (colortex): buffer.addRenderTexture(colortex, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor) if (depthtex): buffer.addRenderTexture(depthtex, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepth) if (auxtex0): buffer.addRenderTexture(auxtex0, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba0) if (auxtex1): buffer.addRenderTexture(auxtex1, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPAuxRgba1) buffer.setSort(self.nextsort) buffer.disableClears() self.nextsort += 1 return buffer
2.109375
2
tests/testprettify.py
davidseibert/docxperiments
1
12792109
<gh_stars>1-10 # coding: utf-8 import context, testutils, unittest from docxperiments import prettify class PrettifyTest(unittest.TestCase): def test_prettify_makes_xml_pretty(self): xml_file_path = 'testdata/ugly.xml' pretty_xml_str = prettify.prettify_xml(xml_file_path) with open('testdata/pretty.xml') as f: txt = f.read().decode('utf-8') self.assertEqual(pretty_xml_str, txt[:-1]) def test(): runner = unittest.TextTestRunner(resultclass=testutils.CustomResult) unittest.main(verbosity=4, testRunner=runner) if __name__ == '__main__': test()
2.84375
3
apps/users/adminx.py
Airren/mxonline-python
6
12792110
<reponame>Airren/mxonline-python<filename>apps/users/adminx.py # _*_ encoding:utf-8 _*_ __author__ = 'wuqy' __date__ = '2021/7/7 11:09' import xadmin from xadmin import views from .models import EmailVerifyRecord,Banner class BaseSetting(object): enable_themes = True use_bootswatch = True class GlobalSetting(object): site_title = 'wuqy的管理系统' site_footer = 'wuqy的在线课程网' menu_style = "accordion" class EmailVerifyRecordAdmin(object): list_display = ['code','email','send_type','send_time'] search_fields = ['code','email','send_type'] list_filter = ['code', 'email', 'send_type', 'send_time'] class BannerAdmin(object): list_display = ['title', 'image', 'url', 'index','add_time'] search_fields =['title', 'image', 'url', 'index'] list_filter = ['title', 'image', 'url', 'index','add_time'] xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin) xadmin.site.register(Banner, BannerAdmin) xadmin.site.register(views.BaseAdminView,BaseSetting) xadmin.site.register(views.CommAdminView,GlobalSetting)
1.671875
2
tests/unit/test_song.py
DakaraProject/dakara-feeder
0
12792111
from datetime import timedelta from unittest import TestCase from unittest.mock import patch from path import Path from dakara_feeder.directory import SongPaths from dakara_feeder.metadata import FFProbeMetadataParser, MediaParseError from dakara_feeder.song import BaseSong from dakara_feeder.subtitle.parsing import Pysubs2SubtitleParser, SubtitleParseError class BaseSongTestCase(TestCase): """Test the BaseSong class.""" @patch.object(Pysubs2SubtitleParser, "parse", autoset=True) @patch.object(FFProbeMetadataParser, "parse", autoset=True) def test_subtitle_parser_error(self, mocked_metadata_parse, mocked_subtitle_parse): """Test an invalid subtitle file raises no exception but logs error.""" # setup mocks mocked_metadata_parse.return_value.get_duration.return_value = timedelta( seconds=1 ) mocked_metadata_parse.return_value.get_audio_tracks_count.return_value = 1 mocked_subtitle_parse.side_effect = SubtitleParseError("invalid") # create paths paths = SongPaths(Path("file.mp4"), subtitle=Path("file.ass")) # create BaseSong instance song = BaseSong(Path("/base-dir"), paths) # get song representation with self.assertLogs("dakara_feeder.song") as logger: representation = song.get_representation() # check no lyrics has been found self.assertEqual(representation["lyrics"], "") # assert logs self.assertListEqual( logger.output, ["ERROR:dakara_feeder.song:Lyrics not parsed: invalid"] ) @patch.object(Pysubs2SubtitleParser, "parse", autoset=True) @patch.object(FFProbeMetadataParser, "parse", autoset=True) def test_metadata_error(self, mocked_metadata_parse, mocked_subtitle_parse): """Test an invalid video file raises no exception but logs error.""" # setup mocks mocked_metadata_parse.side_effect = MediaParseError("invalid") mocked_subtitle_parse.return_value.get_lyrics.return_value = "" # create paths paths = SongPaths(Path("file.mp4"), subtitle=Path("file.ass")) # create BaseSong instance song = BaseSong(Path("/base-dir"), paths) # get song representation with self.assertLogs("dakara_feeder.song") as logger: representation = song.get_representation() # check duration defaults to zero self.assertEqual(representation["duration"], 0) # assert logs self.assertListEqual( logger.output, ["ERROR:dakara_feeder.song:Cannot parse metadata: invalid"] )
2.59375
3
train.py
226wyj/You-Draw-I-Guess
0
12792112
<gh_stars>0 # -*- coding: utf-8 -* import torch as t from torch.autograd import Variable from tqdm import tqdm class Trainer(): def __init__(self, net, criterion, optimizer, scheduler, train_loader, test_loader, model_path, args): self.net = net self.criterion = criterion self.optimizer = optimizer self.scheduler = scheduler self.train_loader = train_loader self.test_loader = test_loader self.model_path = model_path self.args = args self.best_acc = 0.0 self.device = t.device("cuda:1" if t.cuda.is_available() and not args.no_cuda else "cpu") self.net.to(self.device) # 在训练集上进行评估以此来确定是否保存模型 def _evaluate(self): correct = 0 # 预测正确的图片数 total = 0 # 总共的图片数 self.net.eval() # 将net设置成eval模式 print('Evaluating...') for data in tqdm(self.test_loader, desc="Eval Iteration", ncols=70): images, labels = data images, labels = images.to(self.device), labels.to(self.device) outputs = self.net(Variable(images)) _, predicted = t.max(outputs.data, 1) # torch.max返回值为(values, indices) total += labels.size(0) correct += (predicted == labels).sum() accuracy = 100. * correct / total return accuracy def save_model(self, epoch): accuracy = self._evaluate() if accuracy > self.best_acc: print('Accuracy: %f' % accuracy) print('Saving model...') state = { 'net': self.net.state_dict(), 'acc': accuracy, 'epoch': epoch } t.save(state, self.model_path) self.best_acc = accuracy def train(self, epochs): self.net.train() # 将net设置成训练模式 for epoch in range(epochs): print("\n******** Epoch %d / %d ********\n" % (epoch + 1, epochs)) running_loss = 0.0 epoch_iterator = tqdm(self.train_loader, desc="Train Iteration", ncols=70) for i, data in enumerate(epoch_iterator): # 输入数据 inputs, labels = data inputs, labels = Variable(inputs), Variable(labels) inputs, labels = inputs.to(self.device), labels.to(self.device) # 梯度清零 self.optimizer.zero_grad() # forward + backward outputs = self.net(inputs) loss = self.criterion(outputs, labels).to(self.device) loss.backward() # 更新参数 self.optimizer.step() # 更新学习率 self.scheduler.step() # 打印训练信息 running_loss += loss.item() # if i % 2000 == 1999: # print('[%d, %5d] loss: %3f' % (epoch + 1, i + 1, running_loss / 2000)) # running_loss = 0.0 print('\nEpoch {} finish, loss: {}\n'.format(epoch + 1, running_loss / (i + 1))) self.save_model(epoch) print('\nFinish training\n')
2.453125
2
ics_demo/dao/__init__.py
lielongxingkong/ics-demo
0
12792113
<filename>ics_demo/dao/__init__.py from db import init_db from interfaces.demo import rabbit as rabbit_dao from interfaces.demo import carrot as carrot_dao from interfaces.demo import carrot as corps_dao from interfaces import host as host_dao from interfaces import blockdevice as block_dao from interfaces.vsan import mon as mon_dao from interfaces.vsan import osd as osd_dao from interfaces.vsan import vsan as vsan_dao
1.523438
2
utils/cache/meeting.py
ttppren-github/MeetingSample-Backend
7
12792114
<reponame>ttppren-github/MeetingSample-Backend from meeting_sample.settings import REDIS_PREFIX from utils.cache.connection import client MEETING_KEY = f'{REDIS_PREFIX}:meeting:' def open_meeting(meeting_id: int, ex: int): key = MEETING_KEY + str(meeting_id) value = { 'sharing_user': 0, } client.hset(key, mapping=value) client.expire(key, ex) def close_meeting(meeting_id: int): key = MEETING_KEY + str(meeting_id) client.delete(key) def get_sharing_user(meeting_id: int) -> int: key = MEETING_KEY + str(meeting_id) val = client.hget(key, 'sharing_user') if val is None: return -1 return int(val) def start_share(meeting_id: int, user_id: int): key = MEETING_KEY + str(meeting_id) client.hset(key, 'sharing_user', user_id) def stop_share(meeting_id: int): key = MEETING_KEY + str(meeting_id) client.hset(key, 'sharing_user', 0) def is_meeting_open(meeting_id: int) -> bool: key = MEETING_KEY + str(meeting_id) return bool(client.exists(key))
2.328125
2
user/plot.py
yashbidasaria/CS537-p2
0
12792115
<filename>user/plot.py #!/usr/bin/python import argparse import matplotlib.pyplot as plt # Specify cmdline args parser = argparse.ArgumentParser() default = 'You should really put in a title and label axes!' parser.add_argument('-i', '--input', nargs='+', help='Input File(s)') parser.add_argument('-o', '--output', help='Output File') parser.add_argument('-t', '--title', default=default, help='Plot title') parser.add_argument('-x', '--xlabel', default=default, help='Label for x axis') parser.add_argument('-y', '--ylabel', default=default, help='Label for y axis') # Parse arguments args = parser.parse_args() inFiles = args.input outFile = args.output title = args.title xlabel = args.xlabel ylabel = args.ylabel # Extract info from each input file xs = [] ys = [] for fname in inFiles: x = [] y = [] with open(fname, 'r') as f: for line in f: data = line.strip().split(',') x.append(float(data[0])) y.append(float(data[1])) xs.append(x) ys.append(y) # Generate plots plots = [] min_x = min(xs[0]) min_y = min(ys[0]) max_x = max(xs[0]) max_y = max(ys[0]) for x, y in zip(xs, ys): min_x = min(min_x, min(x)) min_y = min(min_y, min(y)) max_x = max(max_x, max(x)) max_y = max(max_y, max(y)) plot, = plt.step(x, y) plots.append(plot) # Set 5% margin plt.xlim((min_x - .05*(max_x - min_x), max_x + .05*(max_x - min_x))) plt.ylim((min_y - .05*(max_y - min_y), max_y + .05*(max_y - min_y))) # Set specified title/axes labels & legend plt.legend(plots, inFiles) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) # Either save or show plot if outFile != None: plt.savefig(outFile) else: plt.show()
3.015625
3
hangman.py
juank27/Hangman_python
0
12792116
<gh_stars>0 print("hola a todos") for i in range(0,100): print("Hola") print("Hola otra vez")
3.21875
3
Aula14/app.py
refatorando/curso_python_iniciantes
3
12792117
<filename>Aula14/app.py from carro import Carro fusca = Carro("Volks","Fusca","Azul","Gasolina") fusca.ligar() # fusca.ligar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.acelerar() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.frear() # fusca.desligar() ferrari = Carro("Ferrari","Ferrari 911","Vermelho","Gasolina") tesla = Carro("Tesla","Cybertruck","cinza","Eletrico") ferrari.ligar() tesla.ligar()
2.046875
2
evaluation/iccv19/transfer.py
kumasento/gconv-prune
8
12792118
""" Training from scratch with different conditions. """ import os import sys import argparse import copy import time import shutil import json import logging logging.getLogger().setLevel(logging.DEBUG) import numpy as np import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data as data import torchvision.transforms as transforms import torchvision.datasets as datasets from gumi import model_utils from gumi.ops import * from gumi.pruning.export import GroupExporter from gumi.model_runner import utils from gumi.model_runner.model_runner import ModelRunner from gumi.model_runner.parser import create_cli_parser # CLI parser parser = create_cli_parser(prog="CLI tool for pruning") parser.add_argument( "--skip-train", action="store_true", default=False, help="Whether to skip the training step.", ) parser.add_argument( "--fine-tune", action="store_true", default=False, help="Whether to fine-tune ONLY the linear classifiers.", ) args = parser.parse_args() # CUDA os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id use_cuda = torch.cuda.is_available() cudnn.benchmark = True class TransferRunner(ModelRunner): """ Runner for transfer learning. """ def validate_args(self, args): pass def create_update_state_dict_fn(): def update_state_dict(state_dict): """ Here are several update rules: - In this new script, we won't have "module." prefix - There won't be any '.conv2d' in the module """ state_dict_ = copy.deepcopy(state_dict) for key, val in state_dict.items(): key_ = key if "module" in key_: del state_dict_[key_] key_ = key_.replace("module.", "") state_dict_[key_] = val if "fc" in key_: del state_dict_[key_] return state_dict_ return update_state_dict def main(): """ Main """ # initialise runner logging.info("==> Initializing TransferRunner ...") runner = TransferRunner(args) # load model logging.info("==> Loading model ...") model = runner.load_model( update_state_dict_fn=create_update_state_dict_fn(), fine_tune=args.fine_tune ) # Validate logging.info("==> Validating the loaded model ...") loss1, acc1 = runner.validate(model) # Train if args.skip_train: logging.info("==> Training has been skipped.") else: logging.info("==> Run training ...") best_acc = runner.train(model) # parameters are in args # Validate again logging.info("==> Validating the trained model ...") loss2, acc2 = runner.validate(model) if __name__ == "__main__": main()
2.234375
2
AutomatedParking/AutomatedParking/models.py
COMP-SCI-72/Automated-Parking-System
0
12792119
from django.db import models class User(models.Model): pass class Car(models.Model): pass class Parking(models.Model): pass
1.632813
2
datam/__init__.py
bengranett/datam
0
12792120
<reponame>bengranett/datam import subprocess, os __version__ = '' try: # if we are running in a git repo, look up the hash __version__ = subprocess.Popen( ('git','--git-dir',os.path.dirname(__file__),'describe','--always'), stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] assert __version__ except: # otherwise check for a version file try: from . version import version as __version__ except: pass
2.203125
2
Hackerrank/World Cup 2016/World Cup/Problem F/gen.py
VastoLorde95/Competitive-Programming
170
12792121
from math import * from fractions import * from random import * n = 1000000000 q = 200000 print n, q for _ in xrange(q): t = randrange(1,4) l,r,c = randrange(1,n+1), randrange(1,n+1), 1000000000 if t < 3: print t,l,r,c else: print t,l,r #print 3, 1, 1000000000
3.03125
3
metric_learn/jde.py
saviorabelo/metric-learn
0
12792122
""" Using jDE for evolving a triangular Mahalanobis matrix based on Fukui 2013: Evolutionary Distance Metric Learning Approach to Semi-supervised Clustering with Neighbor Relations There are some notable differences between the paper and this implementation, please refer to https://github.com/svecon/thesis-distance-metric-learning/releases/tag/1.0 """ from .evolution import MetricEvolution from .evolution import fitness as fit from .evolution import strategy as st from .evolution import transformer as tr class JDE(MetricEvolution): """ Using jDE for evolving a triangular Mahalanobis matrix. """ def __init__(self, n_gen=25, split_size=0.33, train_subset_size=1.0, max_workers=1, random_state=None, verbose=False): """Initialize the learner. Parameters ---------- n_gen : int, optional Number of generations for the evolution strategy. split_size : double, optional Ratio of train:test sample size during evolution. train_subset_size : double, optional Ratio of samples used in training the model during evolution. max_workers : int, optional Number of workers for parallelization. random_state : int, optional If provided, controls random number generation. verbose : bool, optional If true then outputs debugging information. """ super(JDE, self).__init__( strategy=st.SelfAdaptingDifferentialEvolution( n_gen=n_gen, split_size=split_size, train_subset_size=train_subset_size, max_workers=max_workers, random_state=random_state, verbose=verbose ), fitness_list=[fit.WeightedFMeasureFitness()], transformer_func=tr.TriangularMatrixTransformer(), random_state=random_state, verbose=verbose )
2.671875
3
app.py
gibbsie/cdk-graviton2-alb-aga-route53
2
12792123
#!/usr/bin/env python3 from aws_cdk import core import os from ec2_ialb_aga_custom_r53.network_stack import NetworkingStack from ec2_ialb_aga_custom_r53.aga_stack import AgaStack from ec2_ialb_aga_custom_r53.alb_stack import ALBStack from ec2_ialb_aga_custom_r53.certs_stack import CertsStack from ec2_ialb_aga_custom_r53.ec2_stack import EC2Stack deploy_env = core.Environment( account=os.environ["CDK_DEFAULT_ACCOUNT"], region=os.environ["CDK_DEFAULT_REGION"]) # These need to be injected at synth/deployment time CIDR = os.getenv("VPC_CIDR", "") DOMAIN = os.getenv("R53_DOMAIN", "") SUB_DOMAIN = "code-server" app = core.App() net = NetworkingStack(app, "GravitonBlog-NetworkingStack", CIDR, env=deploy_env) ec2 = EC2Stack(app, "GravitonBlog-EC2Stack", net.vpc, env=deploy_env) ec2.add_dependency(net) cert = CertsStack(app, "GravitonBlog-CertsStack", DOMAIN, SUB_DOMAIN, env=deploy_env) alb = ALBStack(app, "GravitonBlog-ALBStack", net.vpc, ec2.instance, cert.domain_cert, env=deploy_env) alb.add_dependency(net) alb.add_dependency(ec2) alb.add_dependency(cert) aga = AgaStack(app, "GravitonBlog-AGAStack", net.vpc, alb.alb, cert.blog_hosted_zone, SUB_DOMAIN, env=deploy_env) aga.add_dependency(net) aga.add_dependency(cert) aga.add_dependency(alb) app.synth()
1.90625
2
core/views.py
HortenciaArliane/speakerfight
369
12792124
from django.contrib.auth.models import User from django.utils.translation import ugettext as _ from django.http import Http404, HttpResponseRedirect from django.contrib import messages from django.shortcuts import get_object_or_404 from django.utils import translation from vanilla import TemplateView, DetailView, UpdateView from deck.models import Event, Proposal from core.models import Profile from core.forms import ProfileForm, ProfilePictureForm, ProfileChangeLanguageForm from core.mixins import LoginRequiredMixin, FormValidRedirectMixing class IndexView(TemplateView): template_name = 'index.html' def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context.update( events=Event.objects.count(), proposals=Proposal.objects.count(), users=User.objects.count() ) return context class AboutView(TemplateView): template_name = 'about.html' class ProfileView(DetailView): template_name = 'account/profile.html' model = Profile lookup_field = 'user__username' def get_object(self, **kwargs): queryset = self.get_queryset() username = self.kwargs.get('user__username') if not username and self.request.user.is_authenticated(): return self.request.user.profile else: return get_object_or_404(queryset, user__username=username) def get_context_data(self, **kwargs): context = super(ProfileView, self).get_context_data(**kwargs) self.object = self.get_object() context.update( profile_form=ProfileForm(instance=self.object), language_form=ProfileChangeLanguageForm(instance=self.object), events=self.object.get_profile_events(), proposals=self.object.get_profile_proposals(), ) return context class ProfileUpdateView(LoginRequiredMixin, FormValidRedirectMixing, UpdateView): template_name = 'account/profile.html' model = Profile form_class = ProfileForm lookup_field = 'user__username' def get_object(self, **kwargs): queryset = self.get_queryset() username = self.kwargs.get('user__username') if not username and self.request.user.is_authenticated(): return self.request.user.profile elif (username == self.request.user.username or self.request.user.is_superuser): return get_object_or_404(queryset, user__username=username) else: raise Http404 def form_valid(self, form): self.object = form.save() return self.success_redirect(_(u'Profile updated.')) def get(self, *args, **kwargs): self.object = self.get_object() return HttpResponseRedirect( self.object.get_absolute_url() ) def form_invalid(self, form): for error in form.errors.itervalues(): messages.error(self.request, error.as_data()[0].message) return self.get() class ProfileUpdatePictureView(ProfileUpdateView): form_class = ProfilePictureForm def form_valid(self, form): self.object = form.save() return self.success_redirect(_(u'Photo changed.')) class ProfileChangeLanguageView(ProfileUpdateView): form_class = ProfileChangeLanguageForm def form_valid(self, form): self.object = form.save() translation.activate(self.object.language) self.request.session[ translation.LANGUAGE_SESSION_KEY ] = self.object.language return self.success_redirect(_(u'Language changed.'))
1.945313
2
positive no in range.py
KrutikaSoor/print-all-positive-no.in-range
0
12792125
list1=[12,-7,5,64,-14] list2=[12,14,-95,3] for i in list1: if i>0: print(i) for j in list2: if j>0: print(j)
3.75
4
make_mozilla/postgis/base.py
Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c
4
12792126
<reponame>Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c<filename>make_mozilla/postgis/base.py<gh_stars>1-10 from django.db.backends.postgresql_psycopg2.base import * from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper as Psycopg2DatabaseWrapper from django.contrib.gis.db.backends.postgis.creation import PostGISCreation from django.contrib.gis.db.backends.postgis.introspection import PostGISIntrospection from django.contrib.gis.db.backends.postgis.operations import PostGISOperations from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter import psycopg2 class PatchedPostGISAdapter(PostGISAdapter): def __init__(self, geom): super(PatchedPostGISAdapter, self).__init__(geom) self._adapter = psycopg2.Binary(self.ewkb) def prepare(self, conn): """ This method allows escaping the binary in the style required by the server's `standard_conforming_string` setting. """ self._adapter.prepare(conn) def getquoted(self): "Returns a properly quoted string for use in PostgreSQL/PostGIS." # psycopg will figure out whether to use E'\\000' or '\000' return 'ST_GeomFromEWKB(%s)' % self._adapter.getquoted() class PatchedPostGISOperations(PostGISOperations): Adapter = PatchedPostGISAdapter Adaptor = Adapter # Backwards-compatibility alias. class DatabaseWrapper(Psycopg2DatabaseWrapper): def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) self.creation = PostGISCreation(self) self.ops = PatchedPostGISOperations(self) self.introspection = PostGISIntrospection(self)
2.03125
2
bot.py
davidchooo/dst-bot
0
12792127
import os from discord.ext import commands from dotenv import load_dotenv import json import shutil from datetime import datetime load_dotenv() TOKEN = os.getenv('DISCORD_TOKEN') DST_DIR = os.getenv('DST_DIR') BACKUP_DIR = os.getenv('BACKUP_DIR') bot = commands.Bot(command_prefix='!') @bot.event async def on_ready(): print(f'{bot.user} has connected to Discord!') @bot.command() async def ping(ctx): await ctx.send(f'Pong! {round(bot.latency * 1000)}ms') @bot.command(aliases=['clops']) async def deerclops(ctx, *args): with open('data/deerclops.txt', 'r+') as f: day = f.readlines()[0].strip() if (not len(args)): await ctx.send(f'Deerclops will spawn on Day {day}.') elif (args[0] == 'help'): await ctx.send('Deerclops Usage: ') elif (len(args) == 1): # Update file day += 71.8 await ctx.send(f'Updated: Deerclops will spawn on Day {args[0]}.') else: await ctx.send('Only provide 1 argument! e.g. "!deerclops 10"') @bot.command(aliases=['mod']) async def mods(ctx, *args): with open('data/mods.json', 'r+') as f: data = json.load(f) # Display mods if (not len(args)): message = '' # Add server mods message += '__**Server Mods:**__\n' for mod in data['server']: message += f'- {mod}\n' # Add client mods message += '\n__**Client Mods:**__\n' for mod in data['client']: message += f'- {mod}\n' await ctx.send(message) # Add new mod elif (args[0] == 'server' or args[0] == 'client'): mod_type = args[0] # Format the mod and add it to json mod = ' '.join(args[1:]) data[mod_type].append(mod) # Clear the json file before dumping the updated contents f.seek(0) f.truncate() json.dump(data, f, indent=4) # Send confirmation! await ctx.send(f'Added "{mod}" to {mod_type} mods!') # Help elif (args[0] == 'help'): await ctx.send('Mods usage:') @bot.command(aliases=['backup']) async def save(ctx): # TODO: take server name as argument src = f'{DST_DIR}/Cluster_5' server_name = 'the rust buster' dest = f'{BACKUP_DIR}/{server_name}/Backup {datetime.now().strftime("%b-%d-%y %H%M")}' try: shutil.copytree(src, dest) await ctx.send('Server saved!') print(f'Server saved to {dest}') except Exception as e: await ctx.send('Backup failed :( Check console for error') print(e) bot.run(TOKEN)
2.34375
2
bokeh/models/tests/test_renderers.py
gully/bokeh
4
12792128
<reponame>gully/bokeh from __future__ import absolute_import from bokeh.models import Circle, MultiLine, ColumnDataSource from bokeh.models.renderers import GlyphRenderer, GraphRenderer def test_graphrenderer_init_props(): renderer = GraphRenderer() assert renderer.x_range_name == "default" assert renderer.y_range_name == "default" assert renderer.node_renderer.data_source.data == dict(index=[]) assert renderer.edge_renderer.data_source.data == dict(start=[], end=[]) assert renderer.layout_provider is None def test_graphrenderer_check_malformed_graph_source_no_errors(): renderer = GraphRenderer() check = renderer._check_malformed_graph_source() assert check == [] def test_graphrenderer_check_malformed_graph_source_no_node_index(): node_source = ColumnDataSource() node_renderer = GlyphRenderer(data_source=node_source, glyph=Circle()) renderer = GraphRenderer(node_renderer=node_renderer) check = renderer._check_malformed_graph_source() assert check != [] def test_graphrenderer_check_malformed_graph_source_no_edge_start_or_end(): edge_source = ColumnDataSource() edge_renderer = GlyphRenderer(data_source=edge_source, glyph=MultiLine()) renderer = GraphRenderer(edge_renderer=edge_renderer) check = renderer._check_malformed_graph_source() assert check != []
2.1875
2
project_euler/121.py
huangshenno1/project_euler
0
12792129
<reponame>huangshenno1/project_euler def add(p, b, r, x): if (b, r) in p: p[(b, r)] += x else: p[(b, r)] = x n = 15 dp = [] dp.append({(0,0): 1.0}) for rnd in xrange(1, n+1): p = {} for ((b, r), v) in dp[rnd-1].items(): add(p, b+1, r, v/(rnd+1)) add(p, b, r+1, v*rnd/(rnd+1)) dp.append(p) s = 0 for ((b, r), v) in dp[n].items(): if b > r: s += v ans = int(1/s) print ans
2.65625
3
fireup/algos/ppo/actorcritic.py
arnaudvl/firedup
1
12792130
import torch import torch.nn as nn from gym.spaces import Box, Discrete from .agents import MLP, CNN, WorldModel from .policies import CategoricalPolicyMLP, GaussianPolicyMLP from .policies import CategoricalPolicyCNN, GaussianPolicyCNN, GaussianPolicyWM class ActorCriticWM(nn.Module): def __init__(self, action_space=None, activation=torch.relu, output_activation=None): super(ActorCriticWM, self).__init__() if isinstance(action_space, Box): self.policy = GaussianPolicyWM( action_space.shape[0], activation=activation, output_activation=output_activation ) self.value_function = WorldModel( action_space.shape[0], 1, activation=activation, output_activation=None, output_squeeze=True ) def forward(self, x, a): pi, logp, logp_pi = self.policy(x, a) v = self.value_function(x, a) return pi, logp, logp_pi, v class ActorCriticCNN(nn.Module): def __init__(self, cnn_layers, mlp_layers, action_space=None, activation=torch.tanh, output_activation=None, flatten_type='flatten', policy=None): super(ActorCriticCNN, self).__init__() if policy is None and isinstance(action_space, Box): self.policy = GaussianPolicyCNN( cnn_layers, mlp_layers, activation, action_space.shape[0], output_activation=output_activation, flatten_type=flatten_type ) elif policy is None and isinstance(action_space, Discrete): self.policy = CategoricalPolicyCNN( cnn_layers, mlp_layers, activation, action_space.n, output_activation=output_activation, flatten_type=flatten_type ) self.value_function = CNN( cnn_layers, mlp_layers + [1], activation=activation, flatten_type=flatten_type, output_activation=None, output_squeeze=True ) def forward(self, x, a=None): pi, logp, logp_pi = self.policy(x, a) v = self.value_function(x) return pi, logp, logp_pi, v class ActorCriticMLP(nn.Module): def __init__(self, in_features, action_space, hidden_sizes=(64, 64), activation=torch.tanh, output_activation=None, policy=None): super(ActorCriticMLP, self).__init__() if policy is None and isinstance(action_space, Box): self.policy = GaussianPolicyMLP( in_features, hidden_sizes, activation, output_activation, action_dim=action_space.shape[0]) elif policy is None and isinstance(action_space, Discrete): self.policy = CategoricalPolicyMLP( in_features, hidden_sizes, activation, output_activation, action_dim=action_space.n) else: self.policy = policy(in_features, hidden_sizes, activation, output_activation, action_space) self.value_function = MLP( layers=[in_features] + list(hidden_sizes) + [1], activation=activation, output_squeeze=True) def forward(self, x, a=None): pi, logp, logp_pi = self.policy(x, a) v = self.value_function(x) return pi, logp, logp_pi, v
2.34375
2
ml_models/classify_images/infer.py
siruku6/ml_sample
0
12792131
from typing import List, Tuple import numpy as np from PIL import Image import pytorch_lightning as pl import torch from torchvision.models import resnet18 from torchvision import transforms from ml_models.model_initializer import ModelInitializer class PredPostureNet(pl.LightningModule): def __init__(self): super().__init__() self.resnet = resnet18(pretrained=True) self.fc = torch.nn.Linear(1000, 4) def forward(self, x): h0 = self.resnet(x) h1 = self.fc(h0) return h1 class Inference: def __init__(self): BUCKET_NAME: str = 'classify-posture' MODEL_SOURCE_NAME: str = 'posture_4_classes_model.pt' MODEL_FILE_PATH: str = f'ml_models/classify_images/{MODEL_SOURCE_NAME}' initializer: ModelInitializer = ModelInitializer( BUCKET_NAME, MODEL_SOURCE_NAME, MODEL_FILE_PATH ) self.net: PredPostureNet = initializer.init_model(network_class=PredPostureNet) self.class_names: List[str] = ['handstand', 'lying_down', 'sit', 'stand'] def run(self, image_name: str) -> Tuple[np.ndarray, int]: path: str = self._image_file_path(image_name) image = self._prepare_image(path) with torch.no_grad(): y = self.net(image) # NOTE: 1行しかないので 0 で次元を落とす result: np.ndarray = y.softmax(dim=-1).detach().numpy()[0] cls: int = np.argmax(result) return np.round(result, decimals=4), self.class_names[cls] def _image_file_path(self, image_name: str) -> str: return f'media/images/{image_name}' def _prepare_image(self, path: str): transform = transforms.Compose([ # ImageNetで学習したモデルを使うときは、256->224の変換が一般的 transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), # PyTorch公式でもこのmean, stdが推奨されている transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img = Image.open(path).convert('RGB') transformed_img = transform(img) img_torch = torch.stack([transformed_img]) return img_torch
2.4375
2
ok_cart/api/utils.py
LowerDeez/ok-cart
3
12792132
from typing import Dict, TYPE_CHECKING, Type from django.apps import apps from django.utils.translation import ugettext_lazy as _ from ..settings import settings if TYPE_CHECKING: from django.db.models import Model __all__ = ( 'cart_element_representation_serializer', 'get_base_api_view' ) def cart_element_representation_serializer( value: Type['Model'], serializer_context: Dict ): serializers = settings.ELEMENT_REPRESENTATION_SERIALIZERS for model_path, serializer_class in serializers.items(): model_class = apps.get_model(model_path) if isinstance(value, model_class): return serializer_class( instance=value, context=serializer_context ) raise Exception(_('Unexpected type of cart element')) def get_base_api_view(): """ Returns custom pagination class, set in settings """ BaseAPIView = settings.BASE_API_VIEW if BaseAPIView is None: class BaseAPIView: pass return BaseAPIView
2.015625
2
tools/pre_parser/lua_bind/lua_bind.py
maoxiezhao/Cjing3D-Test
0
12792133
<reponame>maoxiezhao/Cjing3D-Test import json import sys import os import traceback import glob import subprocess import code_gen_utils.code_gen_utils as cgu filter_path = "filter.json" class ArgsInfo: input_files = [] output_dir = "" input_dir = "" class MetaInfo: input_head_files = [] class_base_mapping = dict() class_meta_infos = list() def is_empty(self): return len(self.class_meta_infos) <= 0 def sort_dependencies(self): queue = list() in_map = dict() edge_map = dict() need_sort = False class_map = dict() # refresh class base mapping for class_meta_info in self.class_meta_infos: base_classes = class_meta_info["base_class"] class_name = class_meta_info["name"] if base_classes: class_base_info = self.class_base_mapping[class_name] for base_class in base_classes: if base_class not in self.class_base_mapping.keys(): continue class_base_info.append(base_class) # set edge map if not base_class in edge_map.keys(): edge_map[base_class] = list() edge_map[base_class].append(class_name) need_sort = True in_map[class_name] = len(base_classes) else: queue.append(class_name) in_map[class_name] = 0 class_map[class_name] = class_meta_info if not need_sort: return # topological sort new_queue = list() while len(queue) > 0: class_name = queue[len(queue) - 1] queue.pop() new_queue.append(class_name) if class_name in edge_map.keys(): for derived_class_name in edge_map[class_name]: in_map[derived_class_name] = in_map[derived_class_name] - 1 if in_map[derived_class_name] == 0: queue.append(derived_class_name) new_list = list() for class_name in new_queue: new_list.append(class_map[class_name]) self.class_meta_infos = new_list def get_path_file_name(dir): return os.path.basename(dir) def change_path_extension(dir, ext): return os.path.splitext(dir)[0] + ext def check_path_directory(dir): return len(os.path.splitext(dir)[1]) <= 0 ######################################################################################################### # parse ######################################################################################################### def print_help(): print("lua_bind_parser v0.1.0") print("usage: lua_bind [option] [cmd] [params]") print("cmd arguments:") print("-help: display help") print("-i : list of input files") print("-d : input directory") print("-o : ouput directory") def parse_args(args): args_info = ArgsInfo() if len(args) == 1: print_help() return args_info for i in range(1, len(args)): if args[i] == "-i": file_index = i + 1 while file_index < len(args) and args[file_index][0] != "-": args_info.input_files.append(args[file_index]) file_index = file_index + 1 i = file_index elif args[i] == "-o": args_info.output_dir = args[i + 1] elif args[i] == "-d": args_info.input_dir = args[i + 1] return args_info def parse_jsn_meta_info(jsn, meta_info): if "head_name" in jsn.keys(): meta_info.input_head_files.append(jsn["head_name"]) if "class" in jsn.keys(): class_meta_infos = jsn["class"] for i in range(0, len(class_meta_infos)): class_meta_info = class_meta_infos[i] meta_info.class_meta_infos.append(class_meta_info) meta_info.class_base_mapping[class_meta_info["name"]] = list() def parse_meta_info(input_file_name, meta_info): # 0. make temp dir if not os.path.exists("./temp"): os.makedirs("./temp") # 1. use cppparser to generate meta.json cmd = "..\cppParser.exe" cmd += " -i " + input_file_name cmd += " -f " + filter_path cmd += " -od .\\temp" cmd += " -on temp.json" subprocess.call(cmd, shell=True) # 2. parse meta.json to generate lua_register code temp_path = "./temp/temp.json" if not os.path.exists(temp_path): return buffer = open(temp_path).read() if not buffer: return try: jsn = json.loads(buffer) except: traceback.print_exc() exit(1) if not jsn or len(jsn) <= 0: return parse_jsn_meta_info(jsn, meta_info) # 3. remove temp json file os.remove(temp_path) def get_class_register_name(jsn): name = jsn["name"] attributes = jsn["attributes"] if attributes: for i in range(0, len(attributes)): attribute = attributes[i] if attribute.find("LUA_BINDER_NAME") != -1: npos = attribute.find("(") epos = attribute.find(")") name = attribute[npos + 1:epos] break return name ######################################################################################################### # generate ######################################################################################################### def generate_class_contruct(jsn): code = ".AddConstructor(_LUA_ARGS_(" # params params = jsn["params"] if params: for i in range(0, len(params)): param = params[i] if i > 0: code += " ," if param["is_const"]: code += "const " code += param["type"] code += "))" return cgu.src_line(code) def generate_class_function(jsn, class_name): function_name = jsn["name"] function_str = "&" + class_name + "::" + function_name # static function if jsn["is_static"]: code = cgu.src_line(".AddFunction(\"" + function_name+ "\", " + function_str + ")") else: code = cgu.src_line(".AddMethod(\"" + function_name+ "\", " + function_str + ")") return code def generate_class_meta(mete_info, jsn): code = "" class_name = jsn["name"] # base classf class_base_mapping = mete_info.class_base_mapping[class_name] if len(class_base_mapping) > 0: for base_class_name in class_base_mapping: # multiple inheritance is not supported string = ".BeginExtendClass<" string += class_name + ", " + base_class_name + ">" string += "(\"" + get_class_register_name(jsn) + "\")" code += cgu.src_line(string) break else: code += cgu.src_line(".BeginClass<" + class_name + ">(\"" + get_class_register_name(jsn) + "\")") # construct constuctors = jsn["constuctors"] if constuctors: for constuctor in constuctors: code += generate_class_contruct(constuctor) # function functions = jsn["member_function"] if functions: for function in functions: code += generate_class_function(function, class_name) code += cgu.src_line(".EndClass();") return code def generate(meta_info, output_file_name): print("start to generate:", output_file_name) # 1. generate include heads codes code = cgu.src_line("// codegen_lua_bind") code += cgu.src_line("#pragma once") code += cgu.src_line("") code += cgu.src_line('#include ' + cgu.in_quotes("luaHelper\luaBinder.h")) for input_head in meta_info.input_head_files: code += cgu.src_line('#include ' + cgu.in_quotes(input_head)) code += cgu.src_line("") # 2. sort classes by dependencies meta_info.sort_dependencies() # 3. generate lua registering codes code += cgu.src_line("using namespace Cjing3D;") code += cgu.src_line("") code += cgu.src_line("void luabind_registers_AutoBindFunction(lua_State* l) {") for i in range(0, len(meta_info.class_meta_infos)): class_jsn = meta_info.class_meta_infos[i] code += cgu.src_line("LuaBinder(l)") code += generate_class_meta(meta_info, class_jsn) code += cgu.src_line("") code += cgu.src_line("}") # 4. write file output_file = open(output_file_name, "w") output_file.write(cgu.format_source(code, 4)) ######################################################################################################### # main ######################################################################################################### def run(args): args_info = parse_args(args) output_dir = args_info.output_dir if not output_dir or not check_path_directory(output_dir): return input_dir = args_info.input_dir if input_dir and check_path_directory(input_dir): ret = glob.glob(os.path.join(input_dir, "**/*.h"), recursive=True) if len(ret) > 0: for path in ret: args_info.input_files.append(path) meta_info = MetaInfo() # parse input file for i in args_info.input_files: if not os.path.exists(i): continue parse_meta_info(i, meta_info) # generate lua bind codes if not meta_info.is_empty(): if not os.path.exists(output_dir): os.makedirs(output_dir) generate(meta_info, os.path.join(output_dir, "lua_bind_generated.h")) if __name__ == '__main__': run(sys.argv)
2.171875
2
src/old_code/RandomForestDriver.py
amirjankar/graph-analysis
1
12792134
<filename>src/old_code/RandomForestDriver.py<gh_stars>1-10 import pandas as pd, numpy, math, random, DecisionTree from collections import Counter """ @author: anmirjan """ class RandomForest: """ Creates random forest from a decision tree Trains model based on given dataset """ def __init__(self, count=5): testDat= {} testDat['following'] = 0 testDat['followers'] = 0 testDat['tweets_count'] = 0 testDat['sentiment'] = 0 testDat['classification'] = 0 tmp = pd.DataFrame(testDat, index = [0]) tmp = tmp.drop(0) self.data = {i:tmp.copy() for i in range(count)} self.labels = {i:[] for i in range(count)} self.forest = [None for i in range(count)] self.count = count def round_to(self, n, prec): return prec * int( n/prec+.5 ) def round_to_5(self, n): return self.round_to(n, 0.05) def train(self, data, labels, bootstrapping = True): # for i, data in enumerate(data): for i in data.iterrows(): index, rowdata = i assigned_tree = math.floor(random.random() * self.count) # adds key value pair to data, labels # self.data[assigned_tree].append((index, rowdata)) self.data[assigned_tree] = self.data[assigned_tree].append(rowdata) self.labels[assigned_tree].append((index, labels[index])) if bootstrapping: treesPerForest = int(len(data)/3) for i in range(0, self.count): data = data.sample(frac=1) self.data[i] = self.data[i].append(data.iloc[1:treesPerForest, :]) index = data.index.values.astype(int)[1:treesPerForest] for r in index: self.labels[i].append((r, labels[r])) for i, tree in enumerate(self.forest): x = pd.DataFrame(self.labels[i]).drop(0, axis=1) self.forest[i] = DecisionTree.DecisionTree(self.data[i].reset_index(drop=True), x.squeeze()) self.forest[i].build_tree() def predict(self, data, distinct): output = [] output = [dt.classify(data) for dt in self.forest] print(output) # for x in self.forest: # output.append(x.classify(data)) if distinct: return [word for word, word_count in Counter(output).most_common(2)][0] return self.round_to_5(sum(output)/len(output)) if __name__ == "__main__": forest = RandomForest(count = 49) ## train forest csv_data = pd.read_csv('final_data.csv') labels = csv_data['retweet_count'] data = csv_data.drop(['retweet_count'], axis=1) forest.train(data, labels, bootstrapping=True) # prediction # test_data = pd.read_csv('verified_test.csv') # labels = test_data['retweet_count'] # data = test_data.drop(['retweet_count']) testDat= {} testDat['following'] = 62620 testDat['followers'] = 5987 testDat['tweets_count'] = 43101 testDat['sentiment'] = -1 testDat['classification'] = .8 test = pd.Series(testDat) forest.predict(test, True) # for i, data_point in enumerate(data): # assert forest.predict(data) is labels[i] print("Completed") td = pd.read_csv('Final_Test_Data.csv') incorrect = 0 for x in range(0, len(td)): row = td.iloc[x] val = row['retweet_count'] row = row.drop('retweet_count') retVal = forest.predict(row, True) if retVal != val: incorrect = incorrect + 1 print('Real: ', str(val), 'Discovered: ', str(retVal), 'err rate so far: ', str(incorrect)) print("Error rate: " + str(incorrect/len(td)))
3.28125
3
COMP-2080/Week-9/matrixMult.py
kbrezinski/Candidacy-Prep
0
12792135
<gh_stars>0 import numpy as np A = np.random.randint(10, size=[4, 4]) B = np.random.randint(10, size=[4, 4]) def matMul(A: np.ndarray, B: np.ndarray): shape = A.shape[0] mid = shape // 2 C = np.empty([shape, shape]) matMulAux(0, mid) matMulAux(mid + 1, shape) def matMulAux(lo: int, hi: int): if lo == hi: pass
2.671875
3
OpenAttack/data/test.py
e-tornike/OpenAttack
444
12792136
NAME = "test" DOWNLOAD = "/TAADToolbox/test.pkl"
1.054688
1
sktime/transformations/panel/tests/test_PCATransformer.py
biologioholic/sktime
1
12792137
# -*- coding: utf-8 -*- """Tests for PCATransformer.""" import numpy as np import pytest from sktime.transformations.panel.pca import PCATransformer from sktime.utils._testing.panel import _make_nested_from_array @pytest.mark.parametrize("bad_components", ["str", 1.2, -1.2, -1, 11]) def test_bad_input_args(bad_components): """Check that exception is raised for bad input args.""" X = _make_nested_from_array(np.ones(10), n_instances=10, n_columns=1) if isinstance(bad_components, str): with pytest.raises(TypeError): PCATransformer(n_components=bad_components).fit(X) else: with pytest.raises(ValueError): PCATransformer(n_components=bad_components).fit(X)
2.515625
3
src/apps/books/views.py
k0t3n/stepic_drf_tests
1
12792138
<gh_stars>1-10 from rest_framework.permissions import IsAuthenticated from rest_framework.viewsets import ModelViewSet from src.apps.books.models import Book from src.apps.books.serializers import BookSerializer class BookViewSet(ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer permission_classes = (IsAuthenticated,)
1.734375
2
samples/actions/main.py
DiamondsBattle/panda3d-openvr
0
12792139
from direct.showbase.ShowBase import ShowBase from panda3d.core import ExecutionEnvironment from p3dopenvr.p3dopenvr import P3DOpenVR import openvr import os class MinimalOpenVR(P3DOpenVR): def __init__(self): P3DOpenVR.__init__(self) self.left_hand = None self.right_hand = None def init_action(self): main_dir = ExecutionEnvironment.getEnvironmentVariable("MAIN_DIR") filename = os.path.join(main_dir, "demo_actions.json") self.load_action_manifest(filename, "/actions/demo") self.action_haptic_left = self.vr_input.getActionHandle('/actions/demo/out/Haptic_Left') self.source_left = self.vr_input.getInputSourceHandle('/user/hand/left') self.action_pose_left = self.vr_input.getActionHandle('/actions/demo/in/Hand_Left') self.action_haptic_right = self.vr_input.getActionHandle('/actions/demo/out/Haptic_Right') self.source_right = self.vr_input.getInputSourceHandle('/user/hand/right') self.action_pose_right = self.vr_input.getActionHandle('/actions/demo/in/Hand_Right') self.action_left_trigger = self.vr_input.getActionHandle('/actions/demo/in/left_trigger') self.action_right_trigger = self.vr_input.getActionHandle('/actions/demo/in/right_trigger') def update_action(self): left_trigger_state, device = self.get_digital_action_rising_edge(self.action_left_trigger) if left_trigger_state: print("LEFT") self.vr_input.triggerHapticVibrationAction(self.action_haptic_left, 0, 1, 4, 1, openvr.k_ulInvalidInputValueHandle) right_trigger_state, device = self.get_digital_action_rising_edge(self.action_right_trigger) if right_trigger_state: print("RIGHT") self.vr_input.triggerHapticVibrationAction(self.action_haptic_right, 0, 1, 4, 1, openvr.k_ulInvalidInputValueHandle) left_pose = self.get_action_pose(self.action_pose_left) if left_pose.pose.bPoseIsValid: left_matrix = self.get_pose_modelview(left_pose.pose) if self.left_hand is None: self.left_hand = self.tracking_space.attach_new_node('left-hand') model = loader.loadModel("box") model.reparent_to(self.left_hand) model.set_scale(0.1) self.left_hand.show() self.left_hand.set_mat(left_matrix) else: if self.left_hand is not None: self.left_hand.hide() right_pose = self.get_action_pose(self.action_pose_right) if right_pose.pose.bPoseIsValid: right_matrix = self.get_pose_modelview(right_pose.pose) if self.right_hand is None: self.right_hand = self.tracking_space.attach_new_node('right-hand') model = loader.loadModel("box") model.reparent_to(self.right_hand) model.set_scale(0.1) self.right_hand.show() self.right_hand.set_mat(right_matrix) else: if self.right_hand is not None: self.right_hand.hide() base = ShowBase() base.setFrameRateMeter(True) myvr = MinimalOpenVR() myvr.init() model = loader.loadModel("panda") model.reparentTo(render) model.setPos(0, 10, -5) base.accept('d', myvr.list_devices) base.accept('b', base.bufferViewer.toggleEnable) base.run()
2.21875
2
seqp/integration/fairseq/dictionary.py
noe/seqp
1
12792140
<filename>seqp/integration/fairseq/dictionary.py # Copyright (c) 2019-present, <NAME> # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. try: from fairseq.data import Dictionary except ImportError: assert False, "Fairseq is needed for seqp integration with fairseq!" from seqp.vocab import Vocabulary def vocab_to_dictionary(vocab: Vocabulary) -> Dictionary: """ Creates a fairseq Dictionary from a seqp Vocabulary. It manipulates the Dictionary's internal state to avoid reserving token 0 for Lua compatibility in order to respect the token ID associations in the original Vocabulary. :param vocab: Vocabulary to convert to Dictionary. :return: Resulting Dictionary. """ pad_symbol = vocab.idx2symbol[vocab.pad_id] eos_symbol = vocab.idx2symbol[vocab.eos_id] unk_symbol = vocab.idx2symbol[vocab.unk_id] dictionary = Dictionary(pad=pad_symbol, unk=unk_symbol, eos=eos_symbol) # We clear up the internal state to write it from scratch (and without # the Lua heritage token zero, to keep token IDs) dictionary.symbols = [] dictionary.count = [] dictionary.indices = {} dictionary.nspecial = 3 for symbol in vocab.idx2symbol: unknown_frequency = 1 # frequency info is not available dictionary.add_symbol(symbol, unknown_frequency) dictionary.pad_index = vocab.pad_id dictionary.eos_index = vocab.eos_id dictionary.unk_index = vocab.unk_id return dictionary
2.25
2
tests/functional/test_adcm_upgrade.py
arenadata/adcm
16
12792141
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ADCM upgrade""" # pylint:disable=redefined-outer-name, no-self-use, too-many-arguments import random from contextlib import contextmanager from pathlib import Path from typing import Tuple, Union, List, Iterable, Any import allure import pytest from adcm_client.base import ObjectNotFound from adcm_client.objects import ADCMClient, Cluster, Host, Service, Bundle, Component, Provider, Task, Job, Upgrade from adcm_pytest_plugin import params from adcm_pytest_plugin.docker_utils import ADCM from adcm_pytest_plugin.plugin import parametrized_by_adcm_version from adcm_pytest_plugin.steps.actions import ( run_cluster_action_and_assert_result, run_service_action_and_assert_result, run_component_action_and_assert_result, run_provider_action_and_assert_result, ) from adcm_pytest_plugin.utils import catch_failed, get_data_dir, random_string from tests.upgrade_utils import upgrade_adcm_version from tests.functional.conftest import only_clean_adcm from tests.functional.plugin_utils import build_objects_checker, build_objects_comparator from tests.functional.tools import AnyADCMObject, get_config, get_objects_via_pagination from tests.library.utils import previous_adcm_version_tag pytestmark = [only_clean_adcm] AVAILABLE_ACTIONS = { "single_state-available", "state_list-available", "state_any-available", } @pytest.fixture(scope="session") def upgrade_target(cmd_opts) -> Tuple[str, str]: """Actual ADCM version""" if not cmd_opts.adcm_image: pytest.fail("CLI parameter adcm_image should be provided") return tuple(cmd_opts.adcm_image.split(":", maxsplit=2)) # type: ignore def old_adcm_images() -> Tuple[List[Tuple[str, str]], Any]: """A list of old ADCM images""" return parametrized_by_adcm_version(adcm_min_version="2019.10.08")[0] def _create_cluster(sdk_client_fs: ADCMClient, bundle_dir: str = "cluster_bundle") -> Cluster: bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, bundle_dir)) cluster_name = f"test_{random_string()}" return bundle.cluster_prototype().cluster_create(name=cluster_name) def _create_host(sdk_client_fs: ADCMClient, bundle_dir: str = "hostprovider") -> Host: bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, bundle_dir)) provider = bundle.provider_create(name=f"test_{random_string()}") return provider.host_create(fqdn=f"test_host_{random_string()}") @allure.step("Check actions availability") def _assert_available_actions(obj: AnyADCMObject): obj.reread() actions = {action.name for action in obj.action_list()} assert ( actions == AVAILABLE_ACTIONS ), f"Unexpected list of available actions!\nExpected: {AVAILABLE_ACTIONS}\nActual:{actions}" @allure.step("Check that previously created cluster exists") def _check_that_cluster_exists(sdk_client_fs: ADCMClient, cluster: Cluster) -> None: assert len(sdk_client_fs.cluster_list()) == 1, "Only one cluster expected to be" with catch_failed(ObjectNotFound, "Previously created cluster not found"): sdk_client_fs.cluster(name=cluster.name) @allure.step("Check that previously created service exists") def _check_that_host_exists(cluster: Cluster, host: Host) -> None: assert len(cluster.host_list()) == 1, "Only one host expected to be" with catch_failed(ObjectNotFound, "Previously created host not found"): cluster.host(fqdn=host.fqdn) @allure.step("Check encryption") def _check_encryption(obj: Union[Cluster, Service]) -> None: assert obj.action(name="check-password").run().wait() == "success" @pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True) @pytest.mark.parametrize("image", old_adcm_images(), ids=repr, indirect=True) def test_upgrade_adcm( adcm_fs: ADCM, sdk_client_fs: ADCMClient, adcm_api_credentials: dict, adcm_image_tags: Tuple[str, str], ) -> None: """Test adcm upgrade""" cluster = _create_cluster(sdk_client_fs) host = _create_host(sdk_client_fs) cluster.host_add(host) upgrade_adcm_version(adcm_fs, sdk_client_fs, adcm_api_credentials, adcm_image_tags) _check_that_cluster_exists(sdk_client_fs, cluster) _check_that_host_exists(cluster, host) @pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True) @pytest.mark.parametrize("image", old_adcm_images(), ids=repr, indirect=True) def test_pass_in_config_encryption_after_upgrade( adcm_fs: ADCM, sdk_client_fs: ADCMClient, adcm_api_credentials: dict, adcm_image_tags: Tuple[str, str], ) -> None: """Test adcm upgrade with encrypted fields""" cluster = _create_cluster(sdk_client_fs, "cluster_with_pass_verify") service = cluster.service_add(name="PassCheckerService") config_diff = dict(password="<PASSWORD>") cluster.config_set_diff(config_diff) service.config_set_diff(config_diff) upgrade_adcm_version(adcm_fs, sdk_client_fs, adcm_api_credentials, adcm_image_tags) _check_encryption(cluster) _check_encryption(service) @pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True) @pytest.mark.parametrize("image", [["hub.arenadata.io/adcm/adcm", "2021.06.17.06"]], ids=repr, indirect=True) def test_actions_availability_after_upgrade( adcm_fs: ADCM, sdk_client_fs: ADCMClient, adcm_api_credentials: dict, upgrade_target: Tuple[str, str], ) -> None: """Test that actions availability from old DSL remains the same after update""" cluster = _create_cluster(sdk_client_fs, "cluster_with_actions") _assert_available_actions(cluster) upgrade_adcm_version(adcm_fs, sdk_client_fs, adcm_api_credentials, upgrade_target) _assert_available_actions(cluster) # !===== Dirty ADCM upgrade =====! class TestUpgradeFilledADCM: """ Check that ADCM filled with different objects can upgrade correctly: - objects didn't loose their configs and "stable" properties - objects can be manipulated (you can run actions on them) """ LONG_TEXT = f'{"Many" * 200}Words\nTo \"Say\"\n To (me)\n"' * 20 # Services CHEESE_SERVICE = 'cheese_service' SAUCE_SERVICE = 'sauce_service' BREAD_SERVICE = 'bread_service' # Components # on cheese MILK_COMPONENT = 'milk' # on sauce SPICE_COMPONENT = 'spice' TOMATO_COMPONENT = 'tomato' LEMON_COMPONENT = 'lemon' # fixtures @pytest.fixture() def dirty_adcm(self, sdk_client_fs: ADCMClient) -> dict: """ Fill ADCM with many different objects: bundles, clusters, providers, hosts, jobs. All jobs are waited to be finished before returning result dictionary. :returns: Dictionary with providers, clusters and sometimes bundles. """ dirty_dir = Path(get_data_dir(__file__)) / "dirty_upgrade" simple_provider_bundle, simple_providers, simple_hosts, all_tasks = self.create_simple_providers( sdk_client_fs, dirty_dir ) simple_cluster_bundle, simple_clusters, tasks = self.create_simple_clusters(sdk_client_fs, dirty_dir) complex_objects = self.create_complex_providers_and_clusters(sdk_client_fs, dirty_dir) upgraded_cluster, not_upgraded_cluster = self.create_upgradable_clusters(sdk_client_fs, dirty_dir) all_tasks.extend(tasks) _wait_for_tasks(all_tasks) with allure.step('Delete one of simple clusters with jobs'): self._delete_simple_cluster_with_job(simple_clusters) return { 'simple': { 'providers': tuple(simple_providers), 'hosts': tuple(simple_hosts), 'clusters': tuple(simple_clusters), 'provider_bundle': simple_provider_bundle, 'cluster_bundle': simple_cluster_bundle, }, 'complex': { 'providers': {'host_supplier': complex_objects[0], 'free_hosts': complex_objects[1]}, 'clusters': { 'all_services': complex_objects[2], 'config_history': complex_objects[3], 'not_configured': complex_objects[4], }, }, 'upgrade': {'upgraded': upgraded_cluster, 'not_upgraded': not_upgraded_cluster}, } # Test itself @params.including_https @pytest.mark.parametrize("adcm_is_upgradable", [True], indirect=True) @pytest.mark.parametrize("image", [previous_adcm_version_tag()], indirect=True) def test_upgrade_dirty_adcm( self, adcm_fs: ADCM, sdk_client_fs: ADCMClient, adcm_api_credentials: dict, upgrade_target: Tuple[str, str], dirty_adcm: dict, ): """ Create previous version ADCM with a lot of different objects. Upgrade ADCM. Run actions on ADCM. """ objects_are_not_changed = build_objects_checker(changed=None, extractor=_get_object_fields) with allure.step('Upgrade ADCM and expect all objects to be same'), objects_are_not_changed( sdk_client_fs ), self.check_job_related_objects_are_not_changed(sdk_client_fs): upgrade_adcm_version(adcm_fs, sdk_client_fs, adcm_api_credentials, upgrade_target) self.run_actions_after_upgrade( dirty_adcm['complex']['clusters']['all_services'], dirty_adcm['complex']['clusters']['config_history'], dirty_adcm['simple']['providers'][0], ) # Steps and helpers @contextmanager def check_job_related_objects_are_not_changed(self, adcm_client: ADCMClient): """Freeze jobs and check that they aren't changed after upgrade""" def extract_job_info(job: Job) -> dict: return { 'task_id': job.task_id, 'status': job.status, 'start_date': job.start_date, 'finish_date': job.finish_date, 'log_ids': {log.id for log in job.log_list()}, } comparator = build_objects_comparator( get_compare_value=extract_job_info, field_name='Job info', name_composer=lambda obj: f"Job with id {obj.id}" ) jobs: List[Job] = get_objects_via_pagination(adcm_client.job_list) frozen_objects = {job.job_id: extract_job_info(job) for job in jobs} yield with allure.step('Assert that Jobs have correct info'): for job_id, job_info in frozen_objects.items(): comparator(adcm_client.job(id=job_id), job_info) @allure.step('Create simple providers') def create_simple_providers( self, adcm_client: ADCMClient, bundle_dir: Path ) -> Tuple[Bundle, List[Provider], List[Host], List[Task]]: """ Upload simple_provider bundle Create 10 providers and 20 hosts on each provider Change config of one of providers and one of hosts Run failed actions on 3 of providers Run install action on hosts of 2 providers """ provider_bundle = adcm_client.upload_from_fs(bundle_dir / "simple_provider") providers = [provider_bundle.provider_create(f'Provider {random_string(6)}') for _ in range(10)] one_of_providers = providers[-2] one_of_providers.config_set_diff({'ssh_key': self.LONG_TEXT}) hosts = [ provider.host_create(f'{random_string(6)}-{random_string(6)}') for _ in range(20) for provider in providers ] one_of_providers.host_list()[-1].config_set_diff({'hosts_file': self.LONG_TEXT}) tasks = [provider.action(name='validate').run() for provider in providers[:3]] + [ host.action(name='install').run() for provider in providers[-2:] for host in provider.host_list() ] return provider_bundle, providers, hosts, tasks @allure.step('Create a lot of simple clusters') def create_simple_clusters( self, adcm_client: ADCMClient, bundle_dir: Path ) -> Tuple[Bundle, List[Cluster], List[Task]]: """ Upload simple_cluster bundle Create many clusters: - With one service and launched action on component - With one service and altered config of cluster, service and component - With two services and launched cluster install action :returns: Bundle, created clusters and tasks """ amount_of_clusters = 34 params = { 'cluster_altered_config': {'number_of_segments': 2, 'auto_reboot': False, 'textarea': self.LONG_TEXT}, 'service_altered_config': {'simple-is-best': False, 'mode': 'fast'}, 'component_altered_config': {'simpler-is-better': True}, 'cluster_action': 'install', 'service_with_component': 'Tchaikovsky', 'lonely_service': 'Shostakovich', 'component_with_action': 'mazepa', 'component_with_config': 'symphony', 'component_action': 'no_sense_to_run_me', } cluster_bundle = adcm_client.upload_from_fs(bundle_dir / "simple_cluster") tasks = [] with allure.step(f'Create {amount_of_clusters} clusters'): clusters = [cluster_bundle.cluster_create(f'Cluster {random_string(8)}') for _ in range(amount_of_clusters)] with allure.step('Add one service to clusters and run action on component'): for one_service_cluster in clusters[:4]: service = one_service_cluster.service_add(name=params['service_with_component']) component: Component = service.component(name=params['component_with_action']) tasks.append(component.action(name=params['component_action']).run()) with allure.step('Change config of clusters'): for cluster_to_change_config in clusters[6:10]: cluster_to_change_config.config_set_diff(params['cluster_altered_config']) service = cluster_to_change_config.service_add(name=params['service_with_component']) service.config_set_diff(params['service_altered_config']) service.component(name=params['component_with_config']).config_set_diff( params['component_altered_config'] ) with allure.step('Add two services to clusters and run action on them'): for install_cluster_with_two_services in clusters[12:30]: install_cluster_with_two_services.service_add(name=params['service_with_component']) install_cluster_with_two_services.service_add(name=params['lonely_service']) tasks.append(install_cluster_with_two_services.action(name=params['cluster_action']).run()) return cluster_bundle, clusters, tasks @allure.step('Create complex provider and {amount_of_hosts} hosts with prefix "{template}" by action') def create_complex_provider( self, provider_bundle: Bundle, template: str = 'complex-host', amount_of_hosts: int = 18 ) -> Tuple[Provider, Task]: """ Create provider, bunch of hosts via action (provide template if you want to use it more than 1 time). :returns: Create provider and hosts create tasks """ provider = provider_bundle.provider_create(name=f'Complex Provider {random_string(6)}') provider.config_set_diff({'very_important_flag': 54.4}) task = provider.action(name='create_hosts').run(config={'count': amount_of_hosts, 'template': template}) return provider, task @allure.step('Create two complex providers and three complex clusters') def create_complex_providers_and_clusters( self, adcm_client: ADCMClient, bundles_directory: Path ) -> Tuple[Provider, Provider, Cluster, Cluster, Cluster]: """ Upload complex_provider and complex_cluster Create two complex providers: 1. Provider that supply hosts for complex clusters (all hosts created by provider action and taken by clusters) 2. Provider that create multiple hosts via action, run actions on some of hosts and then delete multiple of them by host delete action And three complex clusters: 1. Cluster with all services and finished jobs 2. Cluster with config history (on cluster, one service and its components) 3. Not configured cluster just with hosts and one service added :returns: Tuple with provider and cluster objects in order that is declared above """ provider_bundle = adcm_client.upload_from_fs(bundles_directory / "complex_provider") provider_bundle.license_accept() provider, host_create_task = self.create_complex_provider(provider_bundle) provider_with_free_hosts, _ = self.create_complex_provider(provider_bundle, template='doomed-host') self._run_actions_on_host_and_delete_with_action(provider) cluster_bundle = adcm_client.upload_from_fs(bundles_directory / "complex_cluster") cluster_bundle.license_accept() cluster_with_history = self._create_cluster_with_config_history(cluster_bundle) # we want to wait for tasks on provider to be finished (for hosts to be created) host_create_task.wait() cluster_with_all_services = self._create_cluster_with_all_services( cluster_bundle, tuple(provider.host_list())[:3] ) cluster_with_hosts = self._create_cluster_with_hosts(cluster_bundle, tuple(provider.host_list())[3:]) return provider, provider_with_free_hosts, cluster_with_all_services, cluster_with_history, cluster_with_hosts @allure.step('Create two upgradable clusters, upgrade one of them') def create_upgradable_clusters(self, adcm_client: ADCMClient, bundles_directory: Path) -> Tuple[Cluster, Cluster]: """ 1. Upload two bundles with old and new version with possibility of upgrade 2. Create two clusters of previous version 3. Run dummy actions on both of them 4. Upgrade one of clusters :returns: Tuple with upgraded cluster and old-version cluster """ old_version_bundle = adcm_client.upload_from_fs(bundles_directory / "cluster_to_upgrade") adcm_client.upload_from_fs(bundles_directory / "cluster_greater_version") cluster_to_upgrade = old_version_bundle.cluster_create('I will be upgraded') good_old_cluster = old_version_bundle.cluster_create('I am good the way I am') _wait_for_tasks((cluster_to_upgrade.action(name='dummy').run(), good_old_cluster.action(name='dummy').run())) upgrade: Upgrade = cluster_to_upgrade.upgrade() upgrade.do() return cluster_to_upgrade, good_old_cluster @allure.step('Run some actions in upgraded ADCM') def run_actions_after_upgrade( self, cluster_all_services: Cluster, cluster_config_history: Cluster, simple_provider: Provider ) -> None: """ Run successful actions on: cluster, service, component. Run failed action on provider. """ sauce_service = cluster_config_history.service(name=self.SAUCE_SERVICE) run_cluster_action_and_assert_result(cluster_all_services, 'eat_sandwich') run_service_action_and_assert_result(sauce_service, 'put_on_bread') run_component_action_and_assert_result(sauce_service.component(name=self.SPICE_COMPONENT), 'add_more') run_provider_action_and_assert_result(simple_provider, 'validate', status='failed') @allure.step('Create complex cluster with all services') def _create_cluster_with_all_services(self, cluster_bundle: Bundle, hosts: Tuple[Host, Host, Host]) -> Cluster: """ Create cluster with three services Add three hosts on it Set components on hosts Run some actions """ with allure.step('Create cluster and add services'): cluster = cluster_bundle.cluster_create(name='With all services') cluster.config_set_diff({'very_important_flag': 1.6}) cheese_service = cluster.service_add(name=self.CHEESE_SERVICE) sauce_service = cluster.service_add(name=self.SAUCE_SERVICE) bread_service = cluster.service_add(name=self.BREAD_SERVICE) components = { self.MILK_COMPONENT: cheese_service.component(name=self.MILK_COMPONENT), self.TOMATO_COMPONENT: sauce_service.component(name=self.TOMATO_COMPONENT), self.LEMON_COMPONENT: sauce_service.component(name=self.LEMON_COMPONENT), self.SPICE_COMPONENT: sauce_service.component(name=self.SPICE_COMPONENT), } with allure.step('Add hosts'): for host in hosts: cluster.host_add(host) with allure.step('Run actions on the cluster, all components and services'): self._run_actions_on_components(cluster, sauce_service, components, hosts) _wait_for_tasks(service.action().run() for service in (cheese_service, sauce_service, bread_service)) cluster.action(name='make_sandwich').run().wait() return cluster @allure.step('Create cluster with config history') def _create_cluster_with_config_history(self, bundle: Bundle) -> Cluster: """Create cluster with one service and config history""" def get_random_config_map() -> dict: return { 'a_lot_of_text': {'simple_string': random_string(25), 'file_pass': random_<PASSWORD>(16)}, 'from_doc': { 'memory_size': random.randint(2, 64), 'person': { 'name': random_string(13), 'age': str(random.randint(14, 80)), 'custom_field': random_string(12), }, }, 'country_codes': [ {'country': random_string(12), 'code': int(random.randint(1, 200))} for _ in range(4) ], } def get_component_random_config_map() -> dict: return {'illicium': random.random()} config_change_iterations = 100 cluster = bundle.cluster_create(name='Config history') cluster.config_set_diff({'very_important_flag': 1.6}) with allure.step(f"Change cluster's config {config_change_iterations} times"): for _ in range(config_change_iterations): cluster.config_set_diff(get_random_config_map()) with allure.step(f"Add service and change its config {config_change_iterations} times"): service = cluster.service_add(name=self.SAUCE_SERVICE) for _ in range(config_change_iterations): service.config_set_diff(get_random_config_map()) with allure.step(f"Change component's config {config_change_iterations} times"): component = service.component() for _ in range(config_change_iterations): component.config_set_diff(get_component_random_config_map()) return cluster @allure.step('Create cluster, add service {service_name} and add hosts to cluster') def _create_cluster_with_hosts( self, cluster_bundle: Bundle, hosts: Tuple[Host, ...], service_name: str = SAUCE_SERVICE ) -> Cluster: """ Create cluster with given amount of hosts. Cluster is not configured (can't run actions on it). Cluster has 1 service added. """ cluster = cluster_bundle.cluster_create(name='Cluster with hosts') cluster.service_add(name=service_name) for host in hosts: cluster.host_add(host) return cluster @allure.step("Run actions on provider's hosts and remove every 4th host by action on host") def _run_actions_on_host_and_delete_with_action(self, provider: Provider) -> None: """Run dummy actions on each second host and delete each fourth host after tasks are finished""" hosts = tuple(provider.host_list()) _wait_for_tasks(tuple((host.action(name='dummy_action').run() for host in hosts[::2]))) _wait_for_tasks(tuple((host.action(name='remove_host').run() for host in hosts[::4]))) def _run_actions_on_components(self, cluster: Cluster, service: Service, components: dict, hosts: tuple): """Utility function to run actions on components (host actions too)""" cluster.action(name='make_sauce').run( hc=tuple( ( {'host_id': host_id, 'service_id': service.id, 'component_id': component_id} for host_id, component_id in ( (hosts[1].id, components[self.SPICE_COMPONENT].id), (hosts[1].id, components[self.LEMON_COMPONENT].id), (hosts[2].id, components[self.TOMATO_COMPONENT].id), ) ) ) ).wait() cluster.hostcomponent_set( (hosts[0], components[self.MILK_COMPONENT]), *[ (cluster.host(id=hc['host_id']), service.component(id=hc['component_id'])) for hc in cluster.hostcomponent() ], ) _wait_for_tasks( ( components[self.TOMATO_COMPONENT].action(name='add_more').run(), components[self.SPICE_COMPONENT].action(name='add_more').run(), ) ) def _delete_simple_cluster_with_job(self, simple_clusters: List[Cluster]) -> None: """Delete one of simple clusters where at least one job was ran""" cluster_with_job = next( filter(lambda cluster: any(len(action.task_list()) for action in cluster.action_list()), simple_clusters), None, ) if cluster_with_job is None: raise ValueError('At least on of simple clusters should have a job') cluster_with_job.delete() def _get_object_fields(adcm_object: AnyADCMObject) -> dict: """ Save all common fields of an object to one big dict Useful for dirty upgrade """ return { 'name_or_fqdn': adcm_object.name if hasattr(adcm_object, 'name') else adcm_object.fqdn, 'display_name': getattr(adcm_object, 'display_name', None), 'edition': getattr(adcm_object, 'edition', None), 'state': adcm_object.state, 'config': get_config(adcm_object), # if visibility is changed, it may break 'actions': set(action.id for action in adcm_object.action_list()), } @allure.step('Wait for tasks') def _wait_for_tasks(tasks_to_wait: Iterable[Task]): """Iterate over `tasks_to_wait` and wait for each to be finished (results aren't checked)""" for task in tasks_to_wait: task.wait()
1.617188
2
application.py
NisseOscar/Spotify-RampUp-2020
0
12792142
import numpy as np from flask import Flask, render_template, redirect, url_for, jsonify, make_response, request from Frontend import application # Imports application details from a private file from details import client_id, client_secret import requests from Backend.RequestError import RequestError from Backend.SptfyApiHndler import SptfyApiHndler application.config.from_object('configurations.ProductionConfig') @application.route('/getPlaylists', methods = ['GET']) def getPlaylists(): try: assert request.path == '/getPlaylists' assert request.method == 'GET' tkn = request.args.get('tkn') ## Get user_id req = requests.get( "https://api.spotify.com/v1/me", headers={ 'authorization': "Bearer " + tkn }) if req.status_code != 200: print('An error occured getting user id occured, error code: '+str(req.status_code)) raise RequestError('An Error has occured') req_Json = req.json() usr_id = req_Json['id'] ## Get user Playlists playlists = [] i = 0 while(len(playlists)==i): req = requests.get("https://api.spotify.com/v1/users/"+usr_id+"/playlists?limit="+str(50)+"&offset="+str(i), headers={ 'authorization': "Bearer " + tkn }) if req.status_code != 200: print('An error occured getting user playlists, error code: '+str(req.status_code)) raise RequestError('An Error has occured') req_Json = req.json() for lst in req_Json['items']: images = lst['images'] if(len(images)==0): continue if(len(images)>=2): image_url = images[1]['url'] else: image_url = images[0]['url'] playlists.append({'id':lst['id'], 'isActive':False,'image_url':image_url, 'name':lst['name'], 'tracks':lst['tracks']['total']}) i = i+50 return jsonify({'ok':True, 'playlists':playlists}) except RequestError: return jsonify({'ok':False, 'message':"A requesterror has occured"}) except AssertionError: return jsonify({'ok':False, 'message':"An invalid request has been made"}) except Exception: return jsonify({'ok':False, 'message':"An unexpected error has occured"}) @application.route('/CheckMood', methods = ['GET']) def checkMood(): try: assert request.path == '/CheckMood' assert request.method == 'GET' tkn = request.args.get('tkn') mood = request.args.get('mood') ## Get playlists on mood req = requests.get( "https://api.spotify.com/v1/search?q="+mood+"&type=playlist&limit=5", headers={ 'authorization': "Bearer " + tkn }) if req.status_code != 200: raise RequestError('An Error has occured') req_Json = req.json() playlists = req_Json['playlists']['items'] if(len(playlists)<5): return jsonify({'ok':True, 'valid':False}) else: return jsonify({'ok':True, 'valid':True}) except RequestError as e: return jsonify({'ok':False, 'message':"A requesterror has occured"}) except AssertionError as e: return jsonify({'ok':False, 'message':"An invalid type of request has been made"}) except Exception as e: return jsonify({'ok':False, 'message':"An unexpected error has occured"}) @application.route('/createPlaylist', methods = ['GET']) def createPlaylist(): try: assert request.path == '/createPlaylist' assert request.method == 'GET' tkn = request.args.get('tkn') mood = request.args.get('mood') playlists = request.args.get('playlistIDs').split(',') ####### PUT BACKEND CODE METHOD HERE ################# sptfyApi = SptfyApiHndler() newPlaylistID = sptfyApi.filterPlaylists(client_id,tkn,mood,playlists) ######################## # newPlaylistID = 'https://open.spotify.com/embed/playlist/7xB5RIoWhp2RHVCT43GwWg?si=9XxgO-g9QIS0v4GcIaCH9Q' return jsonify({'ok':True, 'newPlaylistID':newPlaylistID}) except RequestError as e: print(e) return jsonify({'ok':False, 'message':"A requesterror has occured"}) except AssertionError as e: return jsonify({'ok':False, 'message':"An invalid type of request has been made"}) except Exception as e: print(e) return jsonify({'ok':False, 'message':"An unexpected error has occured"}) @application.route('/') def login(): scopes = ['user-read-private','user-read-email','playlist-read-private','playlist-read-collaborative','playlist-modify-public'] scope = '%20'.join(scopes) redirect_url = url_for('index') redirect_url = 'http://127.0.0.1:5000'+redirect_url url = 'https://accounts.spotify.com/authorize?client_id='+client_id+'&redirect_uri='+redirect_url+'&scope='+scope+'&response_type=token&state=123' return redirect(url) @application.route('/welcome') def index(): resp = make_response(render_template("index.html")) resp.set_cookie('cross-site-cookie', 'spotify1', domain='.spotify.com', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'spotify2', domain='.accounts.spotify.com', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'spotify3', domain='.community.spotify.com', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'spotify4', domain='.www.spotify.com', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'goadjust', domain='go.adjust.com', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'applicationadjust', domain='application.adjust.<EMAIL>', samesite=None, secure=True); resp.set_cookie('cross-site-cookie', 'general', samesite=None, secure=True); resp.headers.add('Set-Cookie','cross-site-cookie=spotify; SameSite=None; Secure') return resp if __name__=='__main__': application.run()
2.75
3
timberr/core/migrations/0006_auto_20200116_1824.py
luckyadogun/timberinvoice
1
12792143
<filename>timberr/core/migrations/0006_auto_20200116_1824.py # Generated by Django 2.1.15 on 2020-01-16 17:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0005_auto_20200116_1747'), ] operations = [ migrations.AlterField( model_name='invoice', name='invoice_id', field=models.CharField(max_length=10, unique=True), ), ]
1.328125
1
tests/test_context_manager.py
timgates42/tasktiger
1,143
12792144
<reponame>timgates42/tasktiger """Child context manager tests.""" import redis from tasktiger import Worker from .tasks import exception_task, simple_task from .test_base import BaseTestCase from .config import TEST_DB, REDIS_HOST class ContextManagerTester(object): """ Dummy context manager class. Uses Redis to track number of enter/exit calls """ def __init__(self, name): self.name = name self.conn = redis.Redis( host=REDIS_HOST, db=TEST_DB, decode_responses=True ) self.conn.set('cm:{}:enter'.format(self.name), 0) self.conn.set('cm:{}:exit'.format(self.name), 0) self.conn.set('cm:{}:exit_with_error'.format(self.name), 0) def __enter__(self): self.conn.incr('cm:{}:enter'.format(self.name)) def __exit__(self, exc_type, exc_val, exc_tb): self.conn.incr('cm:{}:exit'.format(self.name)) if exc_type is not None: self.conn.incr('cm:{}:exit_with_error'.format(self.name)) self.conn.close() class TestChildContextManagers(BaseTestCase): """Child context manager tests.""" def _get_context_managers(self, number): return [ContextManagerTester('cm' + str(i)) for i in range(number)] def _test_context_managers(self, num, task, should_fail=False): cms = self._get_context_managers(num) self.tiger.config['CHILD_CONTEXT_MANAGERS'] = cms self.tiger.delay(task) Worker(self.tiger).run(once=True) for i in range(num): assert self.conn.get('cm:{}:enter'.format(cms[i].name)) == '1' assert self.conn.get('cm:{}:exit'.format(cms[i].name)) == '1' if should_fail: assert ( self.conn.get('cm:{}:exit_with_error'.format(cms[i].name)) == '1' ) else: assert ( self.conn.get('cm:{}:exit_with_error'.format(cms[i].name)) == '0' ) def test_fixture(self): cms = self._get_context_managers(1).pop() with cms: pass assert self.conn.get('cm:{}:enter'.format(cms.name)) == '1' assert self.conn.get('cm:{}:exit'.format(cms.name)) == '1' def test_single_context_manager(self): self._test_context_managers(1, simple_task) self._test_context_managers(1, exception_task, should_fail=True) def test_multiple_context_managers(self): self._test_context_managers(10, simple_task) self._test_context_managers(10, exception_task, should_fail=True)
2.390625
2
docs/examples/save_geotiff.py
carderne/descarteslabs-python
167
12792145
""" ================================================== Save image to GeoTIFF ================================================== This example demonstrates how to save an image to your local machine in GeoTiff format. """ import descarteslabs as dl # Create an aoi feature to clip imagery to box = { "type": "Polygon", "coordinates": [ [ [-108.64292971398066, 33.58051349561343], [-108.27082685426221, 33.58051349561343], [-108.27082685426221, 33.83925599538719], [-108.64292971398066, 33.83925599538719], [-108.64292971398066, 33.58051349561343], ] ], } # Two predefined image IDs for mosaic and download. These can be obtained through a Metadata or Scenes API search images = [ "landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035037_20180602_20180602_01_RT_v1", "landsat:LC08:01:RT:TOAR:meta_LC08_L1TP_035036_20180602_20180602_01_RT_v1", ] # The Raster API call to download an image mosaic. Other parameters are available # The file is written in to the same directory as the script. raster_client = dl.Raster() raster_client.raster( inputs=images, bands=["red", "green", "blue", "alpha"], scales=[[0, 5500], [0, 5500], [0, 5500], None], data_type="Byte", cutline=box, save=True, outfile_basename="save_local", resolution=60, )
2.921875
3
setup.py
MedleyLabs/DeepAutonomic
0
12792146
<reponame>MedleyLabs/DeepAutonomic from setuptools import setup with open('README.md', 'r') as f: long_description = f.read() with open('LICENSE', 'r') as f: license_text = f.read() setup( name='DeepSomatics', version='0.0.1', description='An experimental, chatbot-driven therapist for somatic experiencing with biofeedback', long_description=long_description, license=license_text, author='<NAME>', author_email='<EMAIL>', url='https://github.com/MedleyLabs/DeepSomatics', packages=[], )
1.195313
1
c2wl_rocket/__main__.py
KerstenBreuer/C2WL-Rocket
0
12792147
from __future__ import absolute_import import os import argparse import cwltool.main import cwltool.argparser import cwltool.utils from .exec_profile import ExecProfileBase, LocalToolExec from cwltool.executors import MultithreadedJobExecutor, SingleJobExecutor from . import worker from .tool_handling import make_custom_tool from .log_handling import error_message from copy import copy import typing_extensions from inspect import isclass import importlib import functools import yaml ## get cwltool default args: cwltool_ap = cwltool.argparser.arg_parser() cwltool_default_args = cwltool_ap.parse_args([]) def main(args=None): if args is None: parser = argparse.ArgumentParser( prog="C2WL-Rocket", description='Customizable CWL Rocket - A highly flexible CWL execution engine.' ) subparser = parser.add_subparsers( help="CWLab sub-commands", dest='subcommand' ) ## subcommand launch: parser_launch = subparser.add_parser( "launch", help="Start execution of a CWL workflow given run input parameter." ) parser_launch.add_argument("--debug", action="store_true", help="Print debugging level messages." ) parser_launch.add_argument('-p', '--exec-profile', help="""Specify an exec profile. Please specify the name to a python module and a contained exec profile class sperated by \":\" (e.g. the default \"c2wl_rocket.exec_profile:LocalToolExec\"). Alternatively you can specify the full path to a python file containing an exec profile class (e.g. \"/path/to/my/exec_profiles.py:CustomExec\"). """, default="c2wl_rocket.exec_profile:LocalToolExec" ) parser_launch.add_argument('cwl_document', help="Provide a CWL workflow or tool." ) parser_launch.add_argument('input_params', nargs=argparse.REMAINDER, help="Provide input parameters in YAML or JSON format." ) parser_launch.add_argument("--outdir", type=typing_extensions.Text, help="Output directory, default current directory", default=os.path.abspath('.') ) exgroup = parser_launch.add_mutually_exclusive_group() exgroup.add_argument("--tmp-outdir-prefix", type=typing_extensions.Text, help="Path prefix for intermediate output directories", default=cwltool.utils.DEFAULT_TMP_PREFIX ) exgroup.add_argument("--cachedir", type=typing_extensions.Text, help="Directory to cache intermediate workflow outputs to avoid recomputing steps.", default="" ) exgroup = parser_launch.add_mutually_exclusive_group() exgroup.add_argument("--move-outputs", action="store_const", const="move", default="move", help="Move output files to the workflow output directory and delete " "intermediate output directories (default).", dest="move_outputs" ) exgroup.add_argument("--leave-outputs", action="store_const", const="leave", default="move", help="Leave output files in intermediate output directories.", dest="move_outputs" ) exgroup.add_argument("--copy-outputs", action="store_const", const="copy", default="move", help=""" Copy output files to the workflow output directory, don't delete intermediate output directories. """, dest="move_outputs" ) # subcommand start_worker: parser_start_worker = subparser.add_parser( "start_worker", help="Start a worker service instance." ) parser_start_worker.add_argument("-H", "--web_server_host", type=typing_extensions.Text, help=""" IP of webserver host. Specify \"0.0.0.0\" for remote availablity within the current network. """, default="localhost" ) parser_start_worker.add_argument("-P", "--web_server_port", type=typing_extensions.Text, help=""" Port of webserver. """, default="5000" ) args = parser.parse_args() if args.subcommand == "launch": if isinstance(args.exec_profile, str): exec_profile_invalid_message = error_message( "main", """ The specified exec profile is invalid. Please either specify a class inheriting from ExecProfileBase at c2wl_rocket.execprofile or if using the commandline specify the name or path to a module that containes such a class. Please see the commandline help for details. """, is_known=True ) assert ":" in args.exec_profile, \ exec_profile_invalid_message exec_profile_module_name = args.exec_profile.split(":")[0] exec_profile_class_name = args.exec_profile.split(":")[1] try: exec_profile_module = importlib.import_module(exec_profile_module_name) except: try: spec = importlib.util.spec_from_file_location( "exec_profile_module", exec_profile_module_name ) exec_profile_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(exec_profile_module) except: raise AssertionError( error_message( "main", """ The specified exec profile module \"{exec_profile_module_name}\" could not be imported. """, is_known=True ) ) assert hasattr(exec_profile_module, exec_profile_class_name), \ error_message( "main", f""" The specified exec profile module \"{exec_profile_module_name}\" has no class \"{exec_profile_class_name}\". """, is_known=True ) args.exec_profile = getattr(exec_profile_module, exec_profile_class_name) assert isclass(args.exec_profile) and issubclass(args.exec_profile, ExecProfileBase), \ error_message( "main", """ The specified exec profile class does not inherit from ExecProfileBase at c2wl_rocket.execprofile. """, is_known=True ) cwltool_args = copy(cwltool_default_args) cwltool_args.workflow = args.cwl_document cwltool_args.job_order = args.input_params cwltool_args.outdir = args.outdir cwltool_args.tmp_outdir_prefix = args.tmp_outdir_prefix cwltool_args.cachedir = args.cachedir cwltool_args.move_outputs = args.move_outputs cwltool_args.debug = args.debug loading_context = cwltool.main.LoadingContext(vars(cwltool_args)) with open(args.cwl_document, mode="r") as cwl: cwl_content = yaml.load(cwl) assert "cwlVersion" in cwl_content.keys(), error_message( "main", "No cwlVersion as specified in the CWL document.", is_known=True ) workflow_metadata = {"cwlVersion": cwl_content["cwlVersion"]} loading_context.construct_tool_object = functools.partial( make_custom_tool, exec_profile_class=args.exec_profile, workflow_metadata=workflow_metadata ) runtime_context = cwltool.main.RuntimeContext(vars(cwltool_args)) job_executor = MultithreadedJobExecutor() if cwltool_args.parallel \ else SingleJobExecutor() job_executor.max_ram = job_executor.max_cores = float("inf") # hand arguments over to main exec function: cwltool.main.main( args=cwltool_args, executor=job_executor, loadingContext=loading_context, runtimeContext=runtime_context ) elif args.subcommand == "start_worker": worker.start( web_server_host=args.web_server_host, web_server_port=int(args.web_server_port) ) def run( cwl_document:str, input_params:str, exec_profile=LocalToolExec, # please note here class not # the path to the module is required outdir=os.path.abspath('.'), tmp_outdir_prefix=cwltool.utils.DEFAULT_TMP_PREFIX, cachedir="", move_outputs="move", # one of "move", "copy", or "leave" debug=False ): """ Main API entry point. Executes c2wl_rocket.__main__.main" """ args = argparse.Namespace( debug=debug, exec_profile=exec_profile, cwl_document=cwl_document, input_params=[input_params], outdir=outdir, tmp_outdir_prefix=tmp_outdir_prefix, cachedir=cachedir, move_outputs=move_outputs ) main(args) if __name__ == "__main__": main()
2.09375
2
models/feature.py
Thesharing/lfesm
6
12792148
<reponame>Thesharing/lfesm # -*- coding: utf-8 -*- import re subject_pat = { 'lender_replace': re.compile(r'原告(?!:)'), 'borrower_replace': re.compile(r'被告(?!:)'), 'legal_represent_replace': re.compile(r'法定代表人(?!:)'), 'lender': re.compile(r'原告:(.+?)[。|,]'), 'borrower': re.compile(r'被告:(.+?)[。|,]'), 'legal_represent': re.compile(r'法定代表人:(.+?)[。|,]') } interest_pat = [ (re.compile(r'(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)([%分])'), 12), (re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)毛"), 120), (re.compile(r"(月)(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)"), 12), (re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)([%分])"), 1), (re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)毛"), 10), (re.compile(r"(年)(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)"), 1) ] interest_interval = [0.36, 0.24, 0] payment_list = [['微信转账', '微信支付', '支付宝'], ['银行转账', '手机银行'], ['现金']] agreement_list = [['合同', '协议'], ['收据', '凭据', '借条', '书面承诺', '承诺书'], ['流水'], [], ['微信', '短信', '聊天']] def extract_features_and_replace(text): # 1. Borrower, lender and legal representative info, main = text.split('\n\n') info = subject_pat['legal_represent_replace']. \ sub('法定代表人:', subject_pat['borrower_replace']. sub('被告:', subject_pat['lender_replace']. sub('原告:', info))) idx = 0 lenders = [] lender_pos = [] for item in subject_pat['lender'].finditer(info): lenders.append(item.group(1)) if idx: lender_pos.append((idx, item.span()[0])) idx = item.span()[1] lender_type = [0] * len(lenders) first = True borrowers = [] borrower_pos = [] for item in subject_pat['borrower'].finditer(info): borrowers.append(item.group(1)) if first: lender_pos.append((idx, item.span()[0])) first = False else: borrower_pos.append((idx, item.span()[0])) idx = item.span()[1] borrower_pos.append((idx, len(info))) borrower_type = [0] * len(borrowers) represents = [] for item in subject_pat['legal_represent'].finditer(info): represents.append(item.group(1)) span = item.span() for idx, pos in enumerate(lender_pos): if span[0] > pos[0] and span[1] <= pos[1]: lender_type[idx] = 1 for idx, pos in enumerate(borrower_pos): if span[0] >= pos[0] and span[1] <= pos[1]: borrower_type[idx] = 1 for name in lenders: main = main.replace('原告' + name, '原告').replace(name, '原告') for name in borrowers: main = main.replace('被告' + name, '被告').replace(name, '被告') for name in represents: main = main.replace('法定代表人' + name, '法定代表人').replace(name, '法定代表人') lender_type = [int(not all(lender_type)), int(any(lender_type))] borrower_type = [int(not all(borrower_type)), int(any(borrower_type))] lender_count = len(lenders) borrower_count = len(borrowers) # 2. guarantee guarantee = [0, 0, 0] if '抵押' in main: guarantee[1] = 1 if '担保' in main: guarantee[2] = 1 if not any(guarantee): guarantee[0] = 1 # 3. interest interest = 0 interest_value = 0 interest_type = [0] * 4 for pattern, factor in interest_pat: for item in pattern.finditer(main): interest = 1 interest_value = max(interest_value, round(float(item.group(2)) * factor, 2)) for idx, interval in enumerate(interest_interval): if interest_value > interval: interest_type[3 - idx] = 1 break if interest == 0: interest_type[0] = 1 # TODO: replace the interest? # 4. payment methods payment = [0] * 4 for idx, methods in enumerate(payment_list): for method in methods: if method in main: payment[idx + 1] = 1 break if not any(payment): payment[0] = 1 # 5. repayment repayment = [0] * 3 if '已还款' in main: repayment[1] = 1 if '尚未还款' in main: repayment[2] = 1 if not any(repayment): repayment[0] = 1 # 5. agreements agreement = [0] * 6 for idx, methods in enumerate(agreement_list): for method in methods: if method in main: agreement[idx + 1] = 1 break if guarantee[1] or guarantee[2]: agreement[4] = 1 if not any(agreement): agreement[0] = 1 # TODO: concat info + main ? return main.strip('\n'), lender_type + [lender_count] + borrower_type + [borrower_count] + guarantee + [interest] + [ interest_value] + interest_type + payment + repayment + agreement # TODO: QuantileTransformer?
2.28125
2
haplotype/dataset.py
zxChouSean/crispr_bedict_reproduce
5
12792149
import numpy as np import torch from torch import nn from torch.utils.data import Dataset from tqdm import tqdm class SeqMaskGenerator(object): def __init__(self, seqconfig): self.seqconfig = seqconfig def create_enc_mask(self, enc_inp): #enc_inp = [N, inp_seq_len] # N is total number of input sequences bsize, seq_len = enc_inp.shape # #enc_mask.shape = [1, 1, inp_seq_len, inp_seq_len] # enc_mask = np.ones((1, 1, seq_len, seq_len)) # #enc_mask.shape = [1, 1, inp_seq_len, inp_seq_len] # # enc_mask = enc_mask.reshape(1, 1, seq_len, seq_len) # #enc_mask.shape = [bsize, 1, inp_seq_len, inp_seq_len] # enc_mask = np.repeat(enc_mask, bsize, axis=0) enc_mask = np.full((bsize,1, seq_len, seq_len), 1) return enc_mask def create_enc_dec_mask(self, num_samples): inp_seqlen = self.seqconfig.seq_len outp_seqlen = self.seqconfig.ewindow_end+1 # enc_dec_mask = np.ones((1,1, outp_seqlen, inp_seqlen)) # enc_dec_mask = np.repeat(enc_dec_mask, num_samples, axis=0) enc_dec_mask = np.full((num_samples, 1, outp_seqlen, inp_seqlen), 1) return enc_dec_mask def create_dec_mask(self, mask_targetbase): # dec_inp = [num_haplotypes, outcome_seq_len] # outcome_seq_len is length of haplotype outcome sequence # mask_targetbase = [num_haplotyptes, outcome_seq_len] # generate causal mask seqconfig = self.seqconfig num_haplotypes = mask_targetbase.shape[0] ewindow_st, ewindow_end = seqconfig.ewindow_st, seqconfig.ewindow_end # ewindow_st = 0 # 6-13 # print('ewindow_st:', ewindow_st, 'ewindow_end:', ewindow_end) tm = mask_targetbase[:, ewindow_st:ewindow_end+1] tindx = np.where(tm.astype(np.bool)) # print('tindx:\n', tindx) # tindx (array(), array()) representing row and column indices where mask has 1 entries target_pos_st = tindx[1][0] # give the start of target base occurence in the sequence ew_seqlen = ewindow_end - (target_pos_st + ewindow_st) + 1 # print('ew_seqlen:', ew_seqlen) sub_mask = np.ones((ew_seqlen, ew_seqlen)) sub_mask_ind = np.triu_indices(ew_seqlen, k=0) sub_mask[sub_mask_ind[0], sub_mask_ind[1]] = 0 dec_causal_mask = np.ones((ewindow_end+1,ewindow_end+1)) # print('dec_causal_mask.shape', dec_causal_mask.shape) offset = target_pos_st + ewindow_st # print('offset:',offset) for i in range(ewindow_end+1): if i < offset: dec_causal_mask[i, offset:] = 0 else: dec_causal_mask[i, offset:] = sub_mask[i-offset,:] # print('dec_causal_mask:\n', dec_causal_mask) #dec_causal_mask.shape = [1, 0:ewindow_end+1, 0:ewindow_end+1] dec_causal_mask = dec_causal_mask.reshape(1, dec_causal_mask.shape[0], dec_causal_mask.shape[1]) dec_causal_mask = np.repeat(dec_causal_mask, num_haplotypes, axis=0) return dec_causal_mask class HaplotypeDataTensor(Dataset): def __init__(self, seqconfig): self.seqconfig = seqconfig # def _encode_to_one_hot(self, mask, n_dims=None): # """ turn matrix with labels into one-hot encoding using the max number of classes detected""" # original_mask_shape = mask.shape # mask = mask.type(torch.LongTensor).view(-1, 1) # if n_dims is None: # n_dims = int(torch.max(mask)) + 1 # one_hot = torch.zeros(mask.shape[0], n_dims).scatter_(1, mask, 1) # one_hot = one_hot.view(*original_mask_shape, -1) # return one_hot def generate_tensor_from_df(self, proc_df, tb_cb_nucl, outcome_prop_col): # create the tensors we need # N is total number of input sequences print('Generating tensors using sequence config:\n', self.seqconfig) Xinp_enc = [] # tensor, (N x inp_sequence_len) Xinp_dec = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) mask_inp_targetbase = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) target_conv = [] # list of tensors, (N x num_haplotypes x outp_sequence_len) target_conv_onehot = [] # list of tensors (i.e. one-hot encoding), (N x num_haplotypes x outp_sequence_len x 2 x 1) target_prob = [] # list of tensors, (N x num_haplotypes) mask_dec = [] indx_seqid_map = {} # dict, int_id:(seqid, target_seq) inpseq_outpseq_map = {} # dict([]), int_id:[outp_seq1, out_seq2, ....] seqconfig = self.seqconfig mask_generator = SeqMaskGenerator(seqconfig) seq_len = seqconfig.seq_len tb_nucl, cb_nucl = tb_cb_nucl # target base, conversion base (i.e. A->G for ABE base editor) # C->T for CBE base editor # output sequence will be from 0:end of editable window indx for gr_name, gr_df in tqdm(proc_df.groupby(by=['seq_id', 'Inp_seq'])): Xinp_enc.append(gr_df[[f'Inp_B{i}' for i in range(1,seq_len+1)]].values[0,:]) Xinp_dec.append(gr_df[[f'Outp_B{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1]) mask_inp_targetbase.append(gr_df[[f'Inp_M{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1]) conv = gr_df[[f'conv{tb_nucl}{cb_nucl}_{i}' for i in range(1,seq_len+1)]].values[:,0:seqconfig.ewindow_end+1] target_conv.append(conv) if outcome_prop_col is not None: target_prob.append(gr_df[outcome_prop_col].values) # print(target_prob[-1]) # compute mask_enc and mask_dec # print(mask_targetbase[-1]) mask_dec.append(mask_generator.create_dec_mask(mask_inp_targetbase[-1])) inpseq_id = len(indx_seqid_map) indx_seqid_map[inpseq_id] = gr_name inpseq_outpseq_map[inpseq_id] = gr_df['Outp_seq'].values.tolist() mask_enc = None mask_encdec = None # tensorize print('--- tensorizing ---') device_cpu = torch.device('cpu') self.Xinp_enc = torch.tensor(Xinp_enc).long().to(device_cpu) self.Xinp_enc = self.Xinp_enc.reshape(self.Xinp_enc.shape[0], 1, self.Xinp_enc.shape[1]) self.Xinp_dec = [torch.from_numpy(arr).long().to(device_cpu) for arr in Xinp_dec] self.mask_inp_targetbase = [torch.from_numpy(arr).long().to(device_cpu) for arr in mask_inp_targetbase] self.target_conv_onehot = [torch.nn.functional.one_hot(torch.from_numpy(arr).long().to(device_cpu), num_classes=2) for arr in target_conv] if outcome_prop_col is not None: self.target_prob = [torch.from_numpy(arr).float().to(device_cpu) for arr in target_prob] else: self.target_prob = None self.mask_enc = mask_enc self.mask_encdec = mask_encdec self.mask_dec = [torch.from_numpy(arr).long().to(device_cpu) for arr in mask_dec] self.num_samples = len(self.Xinp_enc) # int, number of sequences self.indx_seqid_map = indx_seqid_map self.inpseq_outpseq_map = inpseq_outpseq_map print('--- end ---') def hap_collate(self, batch): # pack batches in a list for now # to be used in dataloader object return [item for item in batch] def __getitem__(self, indx): if self.target_prob is None: return_target_prob = None else: return_target_prob = self.target_prob[indx] return(self.Xinp_enc[indx], self.Xinp_dec[indx], self.mask_enc, self.mask_dec[indx], self.mask_encdec, self.mask_inp_targetbase[indx], self.target_conv_onehot[indx], return_target_prob, indx, self.indx_seqid_map[indx], self.inpseq_outpseq_map[indx]) def __len__(self): return(self.num_samples) class PartitionDataTensor(Dataset): def __init__(self, dtensor, partition_ids, dsettype, run_num): self.dtensor = dtensor # instance of :class:`HaplotypeDataTensor` self.partition_ids = partition_ids # list of sequence indices self.dsettype = dsettype # string, dataset type (i.e. train, validation, test) self.run_num = run_num # int, run number self.num_samples = len(self.partition_ids[:]) # int, number of docs in the partition def __getitem__(self, indx): target_id = self.partition_ids[indx] return self.dtensor[target_id] def __len__(self): return(self.num_samples) def print_data_example(elm): Xinp_enc, Xinp_dec, mask_enc, mask_dec, mask_encdec, mask_targetbase_enc, target_conv_onehot, target_prob, indx, seqid = elm print('Xinp_enc:\n', Xinp_enc, 'shape:', Xinp_enc.shape) print('Xinp_dec:\n',Xinp_dec, 'shape:',Xinp_dec.shape) if mask_enc is not None: print('mask_enc:\n', mask_enc, 'shape:',mask_enc.shape) print('mask_dec:\n',mask_dec, 'shape:',mask_dec.shape) if mask_encdec is not None: print('mask_encdec:\n', mask_encdec, 'shape:',mask_encdec.shape) print('mask_targetbase_enc:\n', mask_targetbase_enc,'shape:', mask_targetbase_enc.shape) print('target_conv_onehot:\n',target_conv_onehot, 'shape:',target_conv_onehot.shape) if target_prob is not None: print('target_prob:\n',target_prob, 'shape:',target_prob.shape) else: print('target_prob:None') print('indx:', indx) print('seqid:', seqid) def hap_collate(batch): # pack batches in a list for now # to be used in dataloader object return [item for item in batch]
2.421875
2
mmdet/models/anchor_heads/ttfwh_head.py
mrsempress/mmdetection
0
12792150
<reponame>mrsempress/mmdetection import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import normal_init, kaiming_init, constant_init import math import numpy as np from mmdet.ops import ModulatedDeformConvPack, soft_nms from mmdet.core import multi_apply, force_fp32 from mmdet.models.losses import ct_focal_loss, giou_loss from mmdet.models.utils import ( build_conv_layer, build_norm_layer, bias_init_with_prob, ConvModule) from ..registry import HEADS from .anchor_head import AnchorHead class UpsamplingLayers(nn.Sequential): def __init__(self, in_channels, out_channels, norm_cfg=dict(type='BN'), no_upsample=False): mdcn = ModulatedDeformConvPack( in_channels, out_channels, 3, stride=1, padding=1, dilation=1, deformable_groups=1) layers = [] layers.append(mdcn) if norm_cfg: layers.append(build_norm_layer(norm_cfg, out_channels)[1]) layers.append(nn.ReLU(inplace=True)) if not no_upsample: layers.append(nn.UpsamplingBilinear2d(scale_factor=2)) super(UpsamplingLayers, self).__init__(*layers) class ShortcutConnection(nn.Module): def __init__(self, in_channels, out_channels, kernel_sizes, conv_cfg): super(ShortcutConnection, self).__init__() layers = [] for i, kernel_size in enumerate(kernel_sizes): inc = in_channels if i == 0 else out_channels padding = (kernel_size - 1) // 2 if conv_cfg: layers.append( build_conv_layer(conv_cfg, inc, out_channels, kernel_size, padding=padding)) else: layers.append( nn.Conv2d(inc, out_channels, kernel_size, padding=padding)) if i < len(kernel_sizes) - 1: layers.append(nn.ReLU(inplace=True)) self.layers = nn.Sequential(*layers) def forward(self, x): return self.layers(x) @HEADS.register_module class TTFLevelHead(AnchorHead): def __init__(self, inplanes=(64, 128, 256, 512), planes=(256, 128, 64), down_ratio_b1=8, down_ratio_b2=4, hm_head_channels=256, wh_head_channels=(64, 64), hm_head_conv_num=2, wh_head_conv_num=(2, 2), num_classes=81, shortcut_cfg=(1, 2, 3), wh_scale_factor_b1=16., wh_scale_factor_b2=16., alpha=0.54, beta=0.54, hm_weight=1., wh_weight_b1=5., wh_weight_b2=5., b1_min_length=32, b2_max_length=64, level_base_area=True, inf_branch=['b1', 'b2'], use_simple_nms=True, focal_loss_beta=4, focal_b2_only=False, shortcut_conv_cfg=None, head_conv_cfg=None, inf_branch_filter=False, max_objs=128, conv_cfg=None, norm_cfg=dict(type='BN')): super(AnchorHead, self).__init__() assert len(inplanes) == 4 and len(planes) == 3 and len(shortcut_cfg) == 3 self.inplanes = inplanes self.planes = planes self.down_ratio_b1 = down_ratio_b1 self.down_ratio_b2 = down_ratio_b2 self.hm_head_channels = hm_head_channels self.wh_head_channels = wh_head_channels self.hm_head_conv_num = hm_head_conv_num self.wh_head_conv_num = wh_head_conv_num self.num_classes = num_classes self.num_fg = num_classes - 1 self.shortcut_cfg = shortcut_cfg self.wh_scale_factor_b1 = wh_scale_factor_b1 self.wh_scale_factor_b2 = wh_scale_factor_b2 self.alpha = alpha self.beta = beta self.max_objs = max_objs self.wh_weight_b1 = wh_weight_b1 self.wh_weight_b2 = wh_weight_b2 self.b1_min_length = b1_min_length self.b2_max_length = b2_max_length self.level_base_area = level_base_area self.inf_branch = inf_branch self.use_simple_nms = use_simple_nms self.focal_loss_beta = focal_loss_beta self.focal_b2_only = focal_b2_only self.shortcut_conv_cfg = shortcut_conv_cfg self.head_conv_cfg = head_conv_cfg self.inf_branch_filter = inf_branch_filter self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.base_loc_b1 = None self.base_loc_b2 = None self.fp16_enabled = False self._init_layers() def _init_branch_layers(self, planes): wh_layers, wh2_layers, hm_layers = [], [], [] inp = planes for i in range(self.wh_head_conv_num[0]): wh_layers.append( ConvModule( inp, self.wh_head_channels[0], 3, padding=1, conv_cfg=self.conv_cfg)) inp = self.wh_head_channels[0] if self.head_conv_cfg: wh_layers.append( build_conv_layer( self.head_conv_cfg, self.wh_head_channels[0], 4, kernel_size=3, padding=1 ) ) else: wh_layers.append(nn.Conv2d(self.wh_head_channels[0], 4, 3, padding=1)) inp = planes for i in range(self.wh_head_conv_num[1]): wh2_layers.append( ConvModule( inp, self.wh_head_channels[1], 3, padding=1, conv_cfg=self.conv_cfg)) inp = self.wh_head_channels[1] if self.head_conv_cfg: wh2_layers.append( build_conv_layer( self.head_conv_cfg, self.wh_head_channels[1], 4, kernel_size=3, padding=1 ) ) else: wh2_layers.append(nn.Conv2d(self.wh_head_channels[1], 4, 3, padding=1)) inp = planes for i in range(self.hm_head_conv_num): hm_layers.append( ConvModule( inp, self.hm_head_channels, 3, padding=1, conv_cfg=self.conv_cfg)) inp = self.hm_head_channels if self.head_conv_cfg: hm_layers.append( build_conv_layer( self.head_conv_cfg, self.hm_head_channels, self.num_fg, kernel_size=3, padding=1 ) ) else: hm_layers.append(nn.Conv2d(self.hm_head_channels, self.num_fg, 3, padding=1)) wh_layers = nn.Sequential(*wh_layers) wh2_layers = nn.Sequential(*wh2_layers) hm_layers = nn.Sequential(*hm_layers) return wh_layers, wh2_layers, hm_layers def _init_layers(self): self.upsample_layers = nn.ModuleList([ UpsamplingLayers( self.inplanes[-1], self.planes[0], norm_cfg=self.norm_cfg), UpsamplingLayers( self.planes[0], self.planes[1], norm_cfg=self.norm_cfg), UpsamplingLayers( self.planes[1], self.planes[2], norm_cfg=self.norm_cfg) ]) self.shortcut_layers = nn.ModuleList() for (inp, outp, layer_num) in zip(self.inplanes[::-1][1:], self.planes, self.shortcut_cfg): assert layer_num > 0, "Shortcut connection must be included." self.shortcut_layers.append( ShortcutConnection(inp, outp, [3] * layer_num, self.shortcut_conv_cfg)) self.wh_b1, self.wh_b2, self.hm = self._init_branch_layers(self.planes[-1]) def init_weights(self): for m in self.upsample_layers.modules(): if isinstance(m, nn.BatchNorm2d): constant_init(m, 1) for m in self.shortcut_layers.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) bias_cls = bias_init_with_prob(0.01) for m in self.hm.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.01) normal_init(self.hm[-1], std=0.01, bias=bias_cls) for wh in [self.wh_b1, self.wh_b2]: for m in wh.modules(): if isinstance(m, nn.Conv2d): normal_init(m, std=0.001) for m in self.modules(): if isinstance(m, ModulatedDeformConvPack): constant_init(m.conv_offset, 0) def forward(self, feats): """ Args: feats: list(tensor). Returns: hm: list(tensor), (batch, 80, h, w). wh: list(tensor), (batch, 4, h, w). """ y, shortcuts = [], [] x = feats[-1] for i, shortcut_layer in enumerate(self.shortcut_layers): shortcuts.append(shortcut_layer(feats[-i - 2])) for i, upsampling_layer in enumerate(self.upsample_layers): x = upsampling_layer(x) x = x + shortcuts[i] y.append(x) y_s4 = y[-1] hm = self.hm(y_s4) wh_b1 = F.relu(self.wh_b1(y_s4)) * self.wh_scale_factor_b1 wh_b2 = F.relu(self.wh_b2(y_s4)) * self.wh_scale_factor_b2 return hm, wh_b1, wh_b2 def get_bboxes_single(self, pred_hm, pred_wh, down_ratio, topk, idx=0): batch, cat, height, width = pred_hm.size() pred_hm = pred_hm.detach().sigmoid_() wh = pred_wh.detach() # used maxpool to filter the max score heat = self.simple_nms(pred_hm) # (batch, topk) scores, inds, clses, ys, xs = self._topk(heat, topk=topk) xs = xs.view(batch, topk, 1) * down_ratio ys = ys.view(batch, topk, 1) * down_ratio wh = wh.permute(0, 2, 3, 1).contiguous() wh = wh.view(wh.size(0), -1, wh.size(3)) inds = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), wh.size(2)) wh = wh.gather(1, inds) wh_filter = wh.new_ones((batch, topk), dtype=torch.bool) if self.inf_branch_filter: area = (wh[..., 2] + wh[..., 0] + 1) * (wh[..., 3] + wh[..., 1] + 1) if idx == 0: wh_filter = area >= self.b1_min_length ** 2 / 2 elif idx == 1: wh_filter = area <= self.b2_max_length ** 2 * 2 wh = wh.view(batch, topk, 4) clses = clses.view(batch, topk, 1).float() scores = scores.view(batch, topk, 1) bboxes = torch.cat([ xs - wh[..., [0]], ys - wh[..., [1]], xs + wh[..., [2]], ys + wh[..., [3]] ], dim=2) return heat, inds, clses, scores, bboxes, xs, ys, wh_filter @force_fp32(apply_to=('pred_hm_b1', 'pred_hm_b2', 'pred_wh_b1', 'pred_wh_b2')) def get_bboxes(self, pred_hm, pred_wh_b1, pred_wh_b2, img_metas, cfg, rescale=False): topk = getattr(cfg, 'max_per_img', 100) heat_b1, inds_b1, clses_b1, scores_b1, bboxes_b1, xs_b1, ys_b1, wh_filter_b1 = \ self.get_bboxes_single(pred_hm_b1, pred_wh_b1, self.down_ratio_b1, topk, idx=0) heat_b2, inds_b2, clses_b2, scores_b2, bboxes_b2, xs_b2, ys_b2, wh_filter_b2 = \ self.get_bboxes_single(pred_hm_b2, pred_wh_b2, self.down_ratio_b2, topk, idx=1) # heat_b2 = nn.functional.max_pool2d(heat_b2, (3, 3), stride=1, padding=1) # heat_b2 = nn.functional.max_pool2d(heat_b2, (2, 2), stride=2) # collide_heat = ((heat_b1 > 0) & (heat_b2 > 0)).max(1)[0] # collide_heat = collide_heat.view(collide_heat.size(0), -1) # (batch, h * w) # collide_b1 = collide_heat.gather(-1, ) result_list = [] score_thr = getattr(cfg, 'score_thr', 0.01) if 'b2' not in self.inf_branch: bboxes = bboxes_b1 scores = scores_b1 clses = clses_b1 wh_filter = wh_filter_b1 elif 'b1' not in self.inf_branch: bboxes = bboxes_b2 scores = scores_b2 clses = clses_b2 wh_filter = wh_filter_b2 else: bboxes = torch.cat([bboxes_b1, bboxes_b2], dim=1) scores = torch.cat([scores_b1, scores_b2], dim=1) clses = torch.cat([clses_b1, clses_b2], dim=1) wh_filter = torch.cat([wh_filter_b1, wh_filter_b2], dim=1) for batch_i in range(bboxes.shape[0]): scores_per_img = scores[batch_i] wh_filter_per_img = wh_filter[batch_i] scores_keep = (scores_per_img > score_thr).squeeze(-1) & wh_filter_per_img scores_per_img = scores_per_img[scores_keep] bboxes_per_img = bboxes[batch_i][scores_keep] labels_per_img = clses[batch_i][scores_keep].squeeze(-1) img_shape = img_metas[batch_i]['pad_shape'] bboxes_per_img[:, 0::2] = bboxes_per_img[:, 0::2].clamp( min=0, max=img_shape[1] - 1) bboxes_per_img[:, 1::2] = bboxes_per_img[:, 1::2].clamp( min=0, max=img_shape[0] - 1) if rescale: scale_factor = img_metas[batch_i]['scale_factor'] bboxes_per_img /= bboxes_per_img.new_tensor(scale_factor) if self.use_simple_nms: bboxes_per_img = torch.cat([bboxes_per_img, scores_per_img], dim=1) else: labels_int_flatten = labels_per_img.int() unique_cls_ids = list(set(list(labels_int_flatten.cpu().numpy()))) bboxes_per_img_per_cls = bboxes_per_img.new_zeros((0, 5)) labels_per_img_per_cls = labels_int_flatten.new_zeros((0,)) for cls_id in unique_cls_ids: cls_id_idx = (labels_int_flatten == cls_id) soft_bboxes, ori_idx = soft_nms(torch.cat(( bboxes_per_img[cls_id_idx], scores_per_img[cls_id_idx]), dim=1), iou_thr=0.6) unique_labels = labels_int_flatten[cls_id_idx][ori_idx] bboxes_per_img_per_cls = torch.cat((bboxes_per_img_per_cls, soft_bboxes), dim=0) labels_per_img_per_cls = torch.cat((labels_per_img_per_cls, unique_labels)) bboxes_per_img = bboxes_per_img_per_cls labels_per_img = labels_per_img_per_cls.float() labels_per_img = labels_per_img.squeeze(-1) result_list.append((bboxes_per_img, labels_per_img)) return result_list def loss_single(self, pred_hm, pred_wh, heatmap, box_target, wh_weight, down_ratio, base_loc_name, hm_weight_factor, wh_weight_factor, focal_loss_beta): H, W = pred_hm.shape[2:] pred_hm = torch.clamp(pred_hm.sigmoid_(), min=1e-4, max=1 - 1e-4) loss_cls = ct_focal_loss(pred_hm, heatmap, beta=focal_loss_beta) * hm_weight_factor if getattr(self, base_loc_name) is None or H != getattr(self, base_loc_name).shape[ 1] or W != getattr(self, base_loc_name).shape[2]: base_step = down_ratio shifts_x = torch.arange( 0, (W - 1) * base_step + 1, base_step, dtype=torch.float32, device=heatmap.device) shifts_y = torch.arange( 0, (H - 1) * base_step + 1, base_step, dtype=torch.float32, device=heatmap.device) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) setattr(self, base_loc_name, torch.stack((shift_x, shift_y), dim=0)) # (2, h, w) # (batch, h, w, 4) pred_boxes = torch.cat((getattr(self, base_loc_name) - pred_wh[:, [0, 1]], getattr(self, base_loc_name) + pred_wh[:, [2, 3]]), dim=1).permute(0, 2, 3, 1) boxes = box_target.permute(0, 2, 3, 1) mask = wh_weight.view(-1, H, W) avg_factor = mask.sum() + 1e-4 loss_bbox = giou_loss( pred_boxes, boxes, mask, avg_factor=avg_factor) * wh_weight_factor return loss_cls, loss_bbox @force_fp32(apply_to=('pred_hm_b1', 'pred_hm_b2', 'pred_wh_b1', 'pred_wh_b2')) def loss(self, pred_hm_b1, pred_hm_b2, pred_wh_b1, pred_wh_b2, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): h_b1, h_b2, b_b1, b_b2, r_b1, r_b2 = self.ttf_target( gt_bboxes, gt_labels, img_metas) loss_cls_b1, loss_bbox_b1 = self.loss_single( pred_hm_b1, pred_wh_b1, h_b1, b_b1, r_b1, self.down_ratio_b1, 'base_loc_b1', self.hm_weight_b1, self.wh_weight_b1, 4 if self.focal_b2_only else self.focal_loss_beta) loss_cls_b2, loss_bbox_b2 = self.loss_single( pred_hm_b2, pred_wh_b2, h_b2, b_b2, r_b2, self.down_ratio_b2, 'base_loc_b2', self.hm_weight_b2, self.wh_weight_b2, self.focal_loss_beta) return {'losses/ttfnetv2_loss_hm_b1': loss_cls_b1, 'losses/ttfnetv2_loss_wh_b1': loss_bbox_b1, 'losses/ttfnetv2_loss_hm_b2': loss_cls_b2, 'losses/ttfnetv2_loss_wh_b2': loss_bbox_b2} def _topk(self, scores, topk): batch, cat, height, width = scores.size() # (batch, 80, topk) topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), topk) topk_inds = topk_inds % (height * width) topk_ys = (topk_inds / width).int().float() topk_xs = (topk_inds % width).int().float() # (batch, topk). select topk from 80*topk topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), topk) topk_clses = (topk_ind / topk).int() topk_ind = topk_ind.unsqueeze(2) topk_inds = topk_inds.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk) topk_ys = topk_ys.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk) topk_xs = topk_xs.view(batch, -1, 1).gather(1, topk_ind).view(batch, topk) return topk_score, topk_inds, topk_clses, topk_ys, topk_xs def gaussian_2d(self, shape, sigma_x=1, sigma_y=1): m, n = [(ss - 1.) / 2. for ss in shape] y, x = np.ogrid[-m:m + 1, -n:n + 1] h = np.exp(-(x * x / (2 * sigma_x * sigma_x) + y * y / (2 * sigma_y * sigma_y))) h[h < np.finfo(h.dtype).eps * h.max()] = 0 return h def draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius, k=1): h, w = 2 * h_radius + 1, 2 * w_radius + 1 sigma_x = w / 6 sigma_y = h / 6 gaussian = self.gaussian_2d((h, w), sigma_x=sigma_x, sigma_y=sigma_y) gaussian = heatmap.new_tensor(gaussian) x, y = int(center[0]), int(center[1]) height, width = heatmap.shape[0:2] left, right = min(x, w_radius), min(width - x, w_radius + 1) top, bottom = min(y, h_radius), min(height - y, h_radius + 1) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian[h_radius - top:h_radius + bottom, w_radius - left:w_radius + right] if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) return heatmap def ttf_target_single_single(self, heatmap, box_target, reg_weight, fake_heatmap, boxes_area_topk_log, gt_boxes, gt_labels, boxes_ind, feat_shape, down_ratio): output_h, output_w = feat_shape feat_gt_boxes = gt_boxes / down_ratio feat_gt_boxes[:, [0, 2]] = torch.clamp( feat_gt_boxes[:, [0, 2]], min=0, max=output_w - 1) feat_gt_boxes[:, [1, 3]] = torch.clamp( feat_gt_boxes[:, [1, 3]], min=0, max=output_h - 1) feat_hs, feat_ws = (feat_gt_boxes[:, 3] - feat_gt_boxes[:, 1], feat_gt_boxes[:, 2] - feat_gt_boxes[:, 0]) ct_ints = (torch.stack([(gt_boxes[:, 0] + gt_boxes[:, 2]) / 2, (gt_boxes[:, 1] + gt_boxes[:, 3]) / 2], dim=1) / down_ratio).to(torch.int) h_radiuses_alpha = (feat_hs / 2. * self.alpha).int() w_radiuses_alpha = (feat_ws / 2. * self.alpha).int() if self.alpha != self.beta: h_radiuses_beta = (feat_hs / 2. * self.beta).int() w_radiuses_beta = (feat_ws / 2. * self.beta).int() # larger boxes have lower priority than small boxes. for k in range(boxes_ind.shape[0]): cls_id = gt_labels[k] - 1 fake_heatmap = fake_heatmap.zero_() self.draw_truncate_gaussian(fake_heatmap, ct_ints[k], h_radiuses_alpha[k].item(), w_radiuses_alpha[k].item()) heatmap[cls_id] = torch.max(heatmap[cls_id], fake_heatmap) if self.alpha != self.beta: fake_heatmap = fake_heatmap.zero_() self.draw_truncate_gaussian(fake_heatmap, ct_ints[k], h_radiuses_beta[k].item(), w_radiuses_beta[k].item()) box_target_inds = fake_heatmap > 0 box_target[:, box_target_inds] = gt_boxes[k][:, None] cls_id = 0 local_heatmap = fake_heatmap[box_target_inds] ct_div = local_heatmap.sum() local_heatmap *= boxes_area_topk_log[k] reg_weight[cls_id, box_target_inds] = local_heatmap / ct_div return heatmap, box_target, reg_weight def ttf_target_single(self, gt_boxes, gt_labels, feat_shape): """ Args: gt_boxes: tensor, tensor <=> img, (num_gt, 4). gt_labels: tensor, tensor <=> img, (num_gt,). feat_shape: tuple. Returns: heatmap: tensor, tensor <=> img, (80, h, w). box_target: tensor, tensor <=> img, (4, h, w). reg_weight: tensor, same as box_target """ output_h_b1, output_w_b1, output_h_b2, output_w_b2 = feat_shape heatmap_channel = self.num_fg heatmap_b1 = gt_boxes.new_zeros((heatmap_channel, output_h_b1, output_w_b1)) fake_heatmap_b1 = gt_boxes.new_zeros((output_h_b1, output_w_b1)) box_target_b1 = gt_boxes.new_ones((4, output_h_b1, output_w_b1)) * -1 reg_weight_b1 = gt_boxes.new_zeros((1, output_h_b1, output_w_b1)) heatmap_b2 = gt_boxes.new_zeros((heatmap_channel, output_h_b2, output_w_b2)) fake_heatmap_b2 = gt_boxes.new_zeros((output_h_b2, output_w_b2)) box_target_b2 = gt_boxes.new_ones((4, output_h_b2, output_w_b2)) * -1 reg_weight_b2 = gt_boxes.new_zeros((1, output_h_b2, output_w_b2)) boxes_areas_log = self.bbox_areas(gt_boxes).log() boxes_area_topk_log, boxes_ind = torch.topk(boxes_areas_log, boxes_areas_log.size(0)) gt_boxes = gt_boxes[boxes_ind] gt_labels = gt_labels[boxes_ind] if self.level_base_area: gt_b1_idx = boxes_area_topk_log >= math.log(self.b1_min_length ** 2) gt_b2_idx = boxes_area_topk_log <= math.log(self.b2_max_length ** 2) else: gt_b1_idx = gt_boxes.max(-1)[0] >= self.b1_min_length gt_b2_idx = gt_boxes.max(-1)[0] <= self.b2_max_length heatmap_b1, box_target_b1, reg_weight_b1 = self.ttf_target_single_single( heatmap_b1, box_target_b1, reg_weight_b1, fake_heatmap_b1, boxes_area_topk_log[gt_b1_idx], gt_boxes[gt_b1_idx], gt_labels[gt_b1_idx], boxes_ind[gt_b1_idx], [output_h_b1, output_w_b1], self.down_ratio_b1) heatmap_b2, box_target_b2, reg_weight_b2 = self.ttf_target_single_single( heatmap_b2, box_target_b2, reg_weight_b2, fake_heatmap_b2, boxes_area_topk_log[gt_b2_idx], gt_boxes[gt_b2_idx], gt_labels[gt_b2_idx], boxes_ind[gt_b2_idx], [output_h_b2, output_w_b2], self.down_ratio_b2) return heatmap_b1, heatmap_b2, box_target_b1, box_target_b2, reg_weight_b1, reg_weight_b2 def ttf_target(self, gt_boxes, gt_labels, img_metas): """ Args: gt_boxes: list(tensor). tensor <=> image, (gt_num, 4). gt_labels: list(tensor). tensor <=> image, (gt_num,). img_metas: list(dict). Returns: heatmap: tensor, (batch, 80, h, w). box_target: tensor, (batch, 4, h, w). reg_weight: tensor, (batch, 1, h, w). """ with torch.no_grad(): feat_shape = (img_metas[0]['pad_shape'][0] // self.down_ratio_b1, img_metas[0]['pad_shape'][1] // self.down_ratio_b1, img_metas[0]['pad_shape'][0] // self.down_ratio_b2, img_metas[0]['pad_shape'][1] // self.down_ratio_b2) h_b1, h_b2, b_b1, b_b2, r_b1, r_b2 = multi_apply( self.ttf_target_single, gt_boxes, gt_labels, feat_shape=feat_shape) h_b1, h_b2, b_b1, b_b2, r_b1, r_b2 = [ torch.stack(t, dim=0).detach() for t in [h_b1, h_b2, b_b1, b_b2, r_b1, r_b2] ] return h_b1, h_b2, b_b1, b_b2, r_b1, r_b2 def simple_nms(self, heat, kernel=3, out_heat=None): pad = (kernel - 1) // 2 hmax = nn.functional.max_pool2d( heat, (kernel, kernel), stride=1, padding=pad) keep = (hmax == heat).float() out_heat = heat if out_heat is None else out_heat return out_heat * keep def bbox_areas(self, bboxes, keep_axis=False): x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], \ bboxes[:, 2], bboxes[:, 3] areas = (y_max - y_min + 1) * (x_max - x_min + 1) if keep_axis: return areas[:, None] return areas
1.804688
2