metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JorinRBM/booksoup",
"score": 3
} |
#### File: booksoup/booksoup/Conversation.py
```python
from bs4 import BeautifulSoup
from FbTime import FbTime
from Sentiment import Sentiment
from Message import Message
class Conversation:
def __init__(self, path, interval="month"):
with open(path, 'r') as f:
self.__soup = BeautifulSoup(f.read(), "html.parser")
self.messages = []
self.name = self.__soup.find("title").text.replace("Conversation with ", "")
message_headers = self.__soup.find_all("div", class_="message_header")
self.__span_meta = [m.find("span", class_="meta").text for m in message_headers]
self.__fbt = FbTime(self.__span_meta)
for m in self.__soup.find_all("div", class_="message"):
span = m.find("span", class_="meta")
self.messages.append(Message(m.find("span", class_="user").text, self.__fbt.span_meta_to_date(span.text, interval), span.text, m.next_sibling.text))
self.__sent = Sentiment(self.messages, self.__fbt)
self.participants = self.__scrape_participants()
def interaction_freq(self):
return self.__fbt.interaction_freq()
def interaction_timeline(self, name):
return self.__fbt.interaction_timeline(name, self.messages)
def sentiment_timeline(self, name, interval):
return self.__sent.sentiment_timeline(name, interval)
def avg_sentiment(self, name):
return self.__sent.avg_sentiment(name)
def get24HourTime(self, elem):
return self.__fbt.get24HourTime(elem)
# Returns a list of participants in the conversation.
def __scrape_participants(self):
users = []
for user_span in self.__soup.find_all("span", "user"):
user_name = user_span.text
if user_name not in users:
users.append(user_name)
return users
``` |
{
"source": "JorinTielen/anom-py",
"score": 2
} |
#### File: anom/adapters/datastore_adapter.py
```python
import logging
from functools import partial
from gcloud_requests import DatastoreRequestsProxy, enter_transaction, exit_transaction
from google.cloud import datastore
from threading import local
from .. import Adapter, Key
from ..adapter import QueryResponse
from ..model import KeyLike
from ..transaction import Transaction, TransactionFailed
_logger = logging.getLogger(__name__)
class _DeferredKey(KeyLike):
def __init__(self, ds_entity):
self.ds_entity = ds_entity
self._value = None
@property
def _anom_key(self):
if self._value is None or self._value.is_partial:
self._value = DatastoreAdapter._convert_key_from_datastore(self.ds_entity.key)
return self._value
def __getattr__(self, name):
return getattr(self._anom_key, name)
def __repr__(self):
return repr(self._anom_key)
class _DatastoreOuterTransaction(Transaction):
def __init__(self, adapter):
self.adapter = adapter
self.ds_transaction = adapter.client.transaction()
def begin(self):
_logger.debug("Beginning transaction...")
self.ds_transaction.begin()
self.adapter.client._push_batch(self.ds_transaction)
enter_transaction()
def commit(self):
try:
_logger.debug("Committing transaction...")
self.ds_transaction.commit()
except Exception as e:
_logger.debug("Transaction failed: %s", e)
raise TransactionFailed("Failed to commit transaction.", cause=e)
def rollback(self):
_logger.debug("Rolling transaction back...")
self.ds_transaction.rollback()
def end(self):
_logger.debug("Ending transaction...")
exit_transaction()
self.adapter.client._pop_batch()
self.adapter._transactions.remove(self)
class _DatastoreInnerTransaction(Transaction):
def __init__(self, parent):
self.parent = parent
def begin(self):
_logger.debug("Beginning inner transaction...")
def commit(self):
_logger.debug("Committing inner transaction...")
def rollback(self):
_logger.debug("Rolling back inner transaction...")
def end(self):
_logger.debug("Ending inner transaction...")
self.adapter._transactions.remove(self)
def __getattr__(self, name):
return getattr(self.parent, name)
class DatastoreAdapter(Adapter):
"""A Google Cloud Datastore adapter based on :mod:`google.cloud.datastore`.
Parameters:
project(str, optional): The project this Adapter should connect to.
Adapter should operate by default. Individual Datastore Keys
may specify their own namespaces and override this.
credentials(datastore.Credentials): The OAuth2 Credentials to
use for this client. If not passed, falls back to the default
inferred from the environment.
"""
_state = local()
def __init__(self, *, project=None, credentials=None):
self.project = project
self.credentials = credentials
self.proxy = DatastoreRequestsProxy(credentials=credentials)
self.client = datastore.Client(
credentials=self.credentials,
project=self.project,
_http=self.proxy,
_use_grpc=False,
)
@property
def _transactions(self):
"list[Transaction]: The current stack of Transactions."
transactions = getattr(self._state, "transactions", None)
if transactions is None:
transactions = self._state.transactions = []
return transactions
def delete_multi(self, keys):
self.client.delete_multi(self._convert_key_to_datastore(key) for key in keys)
def get_multi(self, keys):
get_multi = self.client.get_multi
if self.in_transaction:
transaction = self.current_transaction
get_multi = partial(get_multi, transaction=transaction.ds_transaction)
datastore_keys = [self._convert_key_to_datastore(key) for key in keys]
request_keys = set(datastore_keys)
found, missing, deferred = [], [], []
while True:
found.extend(get_multi(request_keys, missing=missing, deferred=deferred))
if not deferred:
break
for entity in found: # pragma: no cover
request_keys.remove(entity.key)
for key in missing: # pragma: no cover
request_keys.remove(key)
results = [None] * len(keys)
for entity in found:
index = datastore_keys.index(entity.key)
results[index] = self._prepare_to_load(entity)
return results
def put_multi(self, requests):
entities = [self._prepare_to_store(*request) for request in requests]
self.client.put_multi(entities)
if self.in_transaction:
return [_DeferredKey(entity) for entity in entities]
return [self._convert_key_from_datastore(entity.key) for entity in entities]
def query(self, query, options):
ancestor = None
if query.ancestor:
ancestor = self._convert_key_to_datastore(query.ancestor)
filters = self._convert_filters_to_datastore(query.filters)
query = self.client.query(
kind=query.kind,
ancestor=ancestor,
namespace=query.namespace,
projection=query.projection,
order=query.orders,
filters=filters,
)
if options.keys_only:
query.keys_only()
result_iterator = query.fetch(
limit=options.batch_size,
offset=options.offset,
start_cursor=options.cursor,
)
entities = []
for entity in result_iterator:
key, data = self._convert_key_from_datastore(entity.key), None
if not options.keys_only:
data = self._prepare_to_load(entity)
entities.append((key, data))
return QueryResponse(entities=entities, cursor=result_iterator.next_page_token)
def transaction(self, propagation):
if propagation == Transaction.Propagation.Independent:
transaction = _DatastoreOuterTransaction(self)
self._transactions.append(transaction)
return transaction
elif propagation == Transaction.Propagation.Nested:
if self._transactions:
transaction = _DatastoreInnerTransaction(self.current_transaction)
else:
transaction = _DatastoreOuterTransaction(self)
self._transactions.append(transaction)
return transaction
else: # pragma: no cover
raise ValueError(f"Invalid propagation option {propagation!r}.")
@property
def in_transaction(self):
return bool(self._transactions)
@property
def current_transaction(self):
return self._transactions[-1]
def _convert_filters_to_datastore(self, filters):
for property_filter in filters:
prop, op, value = property_filter
if isinstance(value, Key):
value = self._convert_key_to_datastore(property_filter.value)
yield prop, op, value
def _convert_key_to_datastore(self, anom_key):
return self.client.key(*anom_key.path, namespace=anom_key.namespace or None)
@staticmethod
def _convert_key_from_datastore(datastore_key):
return Key.from_path(*datastore_key.flat_path, namespace=datastore_key.namespace)
def _prepare_to_store(self, key, unindexed, data):
datastore_key = self._convert_key_to_datastore(key)
entity = datastore.Entity(datastore_key, unindexed)
entity.update({name: self._prepare_to_store_value(value) for name, value in data})
return entity
def _prepare_to_store_value(self, value):
if isinstance(value, Key):
return self._convert_key_to_datastore(value)
elif isinstance(value, (tuple, list)):
return [self._prepare_to_store_value(v) for v in value]
else:
return value
def _prepare_to_load(self, entity):
data = {}
for name, value in entity.items():
if isinstance(value, datastore.Key):
value = self._convert_key_from_datastore(value)
elif isinstance(value, list) and all(isinstance(v, datastore.Key) for v in value):
value = [self._convert_key_from_datastore(v) for v in value]
data[name] = value
return data
```
#### File: anom-py/anom/model.py
```python
from collections import namedtuple
from threading import RLock
from weakref import WeakValueDictionary
from .adapter import PutRequest, get_adapter
from .namespaces import get_namespace
from .query import PropertyFilter, Query
#: The set of known models. This is used to look up model classes at
#: runtime by their kind.
_known_models = WeakValueDictionary()
_known_models_lock = RLock()
#: Canary value for unset properties.
NotFound = type("NotFound", (object,), {})()
#: Canary value for properties that should be skipped when loading
#: entities from Datastore.
Skip = type("Skip", (object,), {})()
def classname(ob):
"Returns the name of ob's class."
return type(ob).__name__
class KeyLike:
"""Base class for objects that should be treated as if they are
datastore keys (for example, when comparing two objects with one
another).
"""
class Key(KeyLike, namedtuple("Key", ("kind", "id_or_name", "parent", "namespace"))):
"""A Datastore key.
Parameters:
kind(str or model): The Datastore kind this key represents.
id_or_name(int or str): The id or name of this key.
parent(anom.Key, optional): This key's ancestor.
namespace(str, optional): This key's namespace.
Attributes:
kind(str): This key's kind.
id_or_name(int or str or None): This key's integer id or string
name. This is ``None`` for partial keys.
parent(anom.Key or None): This key's ancestor.
namespace(str or None): This key's namespace.
"""
def __new__(cls, kind, id_or_name=None, parent=None, namespace=None):
if isinstance(kind, model):
kind = kind._kind
if parent and parent.is_partial:
raise ValueError("Cannot use partial Keys as parents.")
if parent:
if namespace is not None and namespace != parent.namespace:
raise ValueError(f"Namespace {namespace!r} is different from parent namespace {parent.namespace!r}.")
namespace = parent.namespace
elif namespace is None:
namespace = get_namespace()
return super().__new__(cls, kind, id_or_name, parent, namespace)
@classmethod
def from_path(cls, *path, namespace=None):
"""Build up a Datastore key from a path.
Parameters:
\*path(tuple[str or int]): The path segments.
namespace(str): An optional namespace for the key. This is
applied to each key in the tree.
Returns:
anom.Key: The Datastore represented by the given path.
"""
parent = None
for i in range(0, len(path), 2):
parent = cls(*path[i:i + 2], parent=parent, namespace=namespace)
return parent
@property
def path(self):
"tuple: The full Datastore path represented by this key."
prefix = ()
if self.parent:
prefix = self.parent.path
if self.id_or_name:
return prefix + (self.kind, self.id_or_name)
return prefix + (self.kind,)
@property
def is_partial(self):
"bool: ``True`` if this key doesn't have an id yet."
return len(self.path) % 2 != 0
@property
def int_id(self):
"int: This key's numeric id."
id_or_name = self.id_or_name
if id_or_name is not None and isinstance(id_or_name, int):
return id_or_name
return None
@property
def str_id(self):
"str: This key's string id."
id_or_name = self.id_or_name
if id_or_name is not None and isinstance(id_or_name, str):
return id_or_name
return None
def get_model(self):
"""Get the model class for this Key.
Raises:
RuntimeError: If a model isn't registered for the Key's
kind.
Returns:
type: A Model class.
"""
return lookup_model_by_kind(self.kind)
def delete(self):
"""Delete the entity represented by this Key from Datastore.
"""
return delete_multi([self])
def get(self):
"""Get the entity represented by this Key from Datastore.
Returns:
Model: The entity or ``None`` if it does not exist.
"""
return get_multi([self])[0]
def __repr__(self):
return f"Key({self.kind!r}, {self.id_or_name!r}, parent={self.parent!r}, namespace={self.namespace!r})"
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
if not isinstance(other, KeyLike):
return False
if self is other:
return True
if other.parent != self.parent or \
other.namespace != self.namespace:
return False
return self.path == other.path
def __ne__(self, other):
return not (self == other)
class Property:
"""Base class for Datastore model properties.
The property lifecycle is as follows:
* :meth:`__init__` is called every time a property is defined on a
model class.
* :meth:`validate` is called every time a value is assigned to a
property on a model instance.
* :meth:`prepare_to_load` is called before a property is assigned
to a model instance that is being loaded from Datastore.
* :meth:`prepare_to_store` is called before a property is
persisted from a model instance to Datastore.
Parameters:
name(str, optional): The name of this property on the Datastore
entity. Defaults to the name of this property on the model.
default(object, optional): The property's default value.
indexed(bool, optional): Whether or not this property should be
indexed. Defaults to ``False``.
indexed_if(callable, optional): Whether or not this property
should be indexed when the callable returns ``True``.
Defaults to ``None``.
optional(bool, optional): Whether or not this property is
optional. Defaults to ``False``. Required-but-empty values
cause models to raise an exception before data is persisted.
repeated(bool, optional): Whether or not this property is
repeated. Defaults to ``False``. Optional repeated
properties default to an empty list.
"""
#: The types of values that may be assigned to this property.
_types = ()
def __init__(self, *, name=None, default=None, indexed=False, indexed_if=None, optional=False, repeated=False):
self.indexed = indexed or bool(indexed_if)
self.indexed_if = indexed_if
self.optional = optional
self.repeated = repeated
self.default = self.validate(default) if default is not None else None
self._name_on_entity = name
self._name_on_model = None
@property
def name_on_entity(self):
"str: The name of this Property inside the Datastore entity."
return self._name_on_entity
@property
def name_on_model(self):
"str: The name of this Property on the Model instance."
return self._name_on_model
@property
def is_none(self):
"PropertyFilter: A filter that checks if this value is None."
if not self.optional:
raise TypeError("Required properties cannot be compared against None.")
return self == None # noqa
def validate(self, value):
"""Validates that `value` can be assigned to this Property.
Parameters:
value: The value to validate.
Raises:
TypeError: If the type of the assigned value is invalid.
Returns:
The value that should be assigned to the entity.
"""
if isinstance(value, self._types):
return value
elif self.optional and value is None:
return [] if self.repeated else None
elif self.repeated and isinstance(value, (tuple, list)) and all(isinstance(x, self._types) for x in value):
return value
else:
raise TypeError(f"Value of type {classname(value)} assigned to {classname(self)} property.")
def prepare_to_load(self, entity, value):
"""Prepare `value` to be loaded into a Model. Called by the
model for each Property, value pair contained in the persisted
data when loading it from an adapter.
Parameters:
entity(Model): The entity to which the value belongs.
value: The value being loaded.
Returns:
The value that should be assigned to the entity.
"""
return value
def prepare_to_store(self, entity, value):
"""Prepare `value` for storage. Called by the Model for each
Property, value pair it contains before handing the data off
to an adapter.
Parameters:
entity(Model): The entity to which the value belongs.
value: The value being stored.
Raises:
RuntimeError: If this property is required but no value was
assigned to it.
Returns:
The value that should be persisted.
"""
if value is None and not self.optional:
raise RuntimeError(f"Property {self.name_on_model} requires a value.")
return value
def __set_name__(self, ob, name):
self._name_on_entity = self.name_on_entity or name
self._name_on_model = name
def __get__(self, ob, obtype):
if ob is None:
return self
value = ob._data.get(self.name_on_model, NotFound)
if value is NotFound:
if self.default is not None:
return self.default
elif self.repeated:
value = ob._data[self.name_on_model] = []
else:
return None
return value
def __set__(self, ob, value):
ob._data[self.name_on_model] = self.validate(value)
def __delete__(self, ob):
del ob._data[self.name_on_model]
def _build_filter(self, op, value):
if not self.indexed:
raise TypeError(f"{self.name_on_model} is not indexed.")
return PropertyFilter(self.name_on_entity, op, self.validate(value))
def __eq__(self, value):
return self._build_filter("=", value)
def __le__(self, value):
return self._build_filter("<=", value)
def __ge__(self, value):
return self._build_filter(">=", value)
def __lt__(self, value):
return self._build_filter("<", value)
def __gt__(self, value):
return self._build_filter(">", value)
def __neg__(self):
return "-" + self.name_on_entity
def __pos__(self):
return self.name_on_entity
class EmbedLike(Property):
"""Base class for properties that Embed other models.
"""
def get_unindexed_properties(self, entity): # pragma: no cover
"""tuple[str]: The set of unindexed properties belonging to
the underlying model and all nested models.
"""
raise NotImplementedError
class _adapter:
def __get__(self, ob, obtype):
return get_adapter()
class model(type):
"""Metaclass of Model classes.
Parameters:
poly(bool, optional): Determines if the model should be
polymorphic or not. Subclasses of polymorphic models are all
stored under the same kind.
Attributes:
_adapter(Adapter): A computed property that returns the adapter
for this model class.
_is_child(bool): Whether or not this is a child model in a
polymorphic hierarchy.
_is_root(bool): Whether or not this is the root model in a
polymorphic hierarchy.
_kind(str): The underlying Datastore kind of this model.
_kinds(list[str]): The list of kinds in this model's hierarchy.
_properties(dict): A dict of all of the properties defined on
this model.
"""
#: The name of the field that holds the flattened class hierarchy
#: on polymodel entities.
_kinds_name = "^k"
def __new__(cls, classname, bases, attrs, poly=False, **kwargs):
attrs["_adapter"] = _adapter()
attrs["_is_child"] = is_child = False
attrs["_is_root"] = poly
attrs["_kind"] = kind = attrs.pop("_kind", classname)
attrs["_kinds"] = kinds = [kind]
# Collect all of the properties defined on this model.
attrs["_properties"] = properties = {}
for name, value in attrs.items():
if isinstance(value, Property):
properties[name] = value
# Collect all of the properties and kinds defined on parents
# of this model.
for base in bases:
if not isinstance(base, model):
continue
# Avoid adding the base Model class to kinds.
if "Model" in globals() and base is Model:
continue
kinds.extend(base._kinds)
if base._is_polymorphic: # Poly bases "infect" everything below them.
attrs["_is_child"] = is_child = True
attrs["_kind"] = base._kind
for name, prop in base._properties.items():
if name not in properties:
properties[name] = prop
clazz = type.__new__(cls, classname, bases, attrs)
# Ensure that a single model maps to a single kind at runtime.
with _known_models_lock:
if kind in _known_models and not is_child:
raise TypeError(f"Multiple models for kind {kind!r}.")
_known_models[kind] = clazz
return clazz
@property
def _is_polymorphic(self):
"bool: True if this child belongs to a polymorphic hierarchy."
return self._is_root or self._is_child
class Model(metaclass=model):
"""Base class for Datastore models.
Attributes:
key(anom.Key): The Datastore Key for this entity. If the entity
was never stored then the Key is going to be partial.
Note:
Hooks are only called when dealing with individual entities via
their keys. They *do not* run when entities are loaded from a
query.
"""
def __init__(self, *, key=None, **properties):
self.key = key or Key(self._kind)
self._data = {}
for name, value in properties.items():
if name not in self._properties:
raise TypeError(f"{classname(self)}() does not take a {name!r} parameter.")
setattr(self, name, value)
def __iter__(self):
for name, prop in self._properties.items():
value = getattr(self, name)
if isinstance(prop, EmbedLike):
yield from prop.prepare_to_store(self, value)
else:
yield prop.name_on_entity, prop.prepare_to_store(self, value)
# Polymorphic models need to keep track of their bases.
if type(self)._is_polymorphic:
yield model._kinds_name, self._kinds
@classmethod
def _load(cls, key, data):
# Polymorphic models need to instantiate leaf classes.
if cls._is_polymorphic and model._kinds_name in data:
name = data[model._kinds_name][0]
cls = lookup_model_by_kind(name)
instance = cls()
instance.key = key
for name, prop in instance._properties.items():
if isinstance(prop, EmbedLike):
instance._data[name] = prop.prepare_to_load(instance, data)
else:
value = prop.prepare_to_load(instance, data.get(name))
if value is not Skip:
instance._data[name] = value
return instance
@property
def unindexed_properties(self):
"tuple[str]: The names of all the unindexed properties on this entity."
properties = ()
for name, prop in self._properties.items():
if isinstance(prop, EmbedLike):
embedded_entity = getattr(self, name, None)
if embedded_entity:
properties += prop.get_unindexed_properties(embedded_entity)
elif not prop.indexed or prop.indexed_if and not prop.indexed_if(self, prop, name):
properties += (prop.name_on_entity,)
return properties
@classmethod
def pre_get_hook(cls, key):
"""A hook that runs before an entity is loaded from Datastore.
Raising an exception here will prevent the entity from being
loaded.
Parameters:
key(anom.Key): The datastore Key of the entity being loaded.
"""
def post_get_hook(self):
"""A hook that runs after an entity has been loaded from
Datastore.
"""
@classmethod
def get(cls, id_or_name, *, parent=None, namespace=None):
"""Get an entity by id.
Parameters:
id_or_name(int or str): The entity's id.
parent(anom.Key, optional): The entity's parent Key.
namespace(str, optional): The entity's namespace.
Returns:
Model: An entity or ``None`` if the entity doesn't exist in
Datastore.
"""
return Key(cls, id_or_name, parent=parent, namespace=namespace).get()
@classmethod
def pre_delete_hook(cls, key):
"""A hook that runs before an entity is deleted. Raising an
exception here will prevent the entity from being deleted.
Parameters:
key(anom.Key): The datastore Key of the entity being deleted.
"""
@classmethod
def post_delete_hook(cls, key):
"""A hook that runs after an entity has been deleted.
Parameters:
key(anom.Key): The datastore Key of the entity being deleted.
"""
def delete(self):
"""Delete this entity from Datastore.
Raises:
RuntimeError: If this entity was never stored (i.e. if its
key is partial).
"""
return delete_multi([self.key])
def pre_put_hook(self):
"""A hook that runs before this entity is persisted. Raising
an exception here will prevent the entity from being persisted.
"""
def post_put_hook(self):
"""A hook that runs after this entity has been persisted.
"""
def put(self):
"""Persist this entity to Datastore.
"""
return put_multi([self])[0]
@classmethod
def query(cls, **options):
"""Return a new query for this Model.
Parameters:
\**options(dict): Parameters to pass to the :class:`Query`
constructor.
Returns:
Query: The new query.
"""
return Query(cls, **options)
def __repr__(self):
constructor = type(self).__name__
properties = ("key",) + tuple(self._properties.keys())
props = ", ".join(f"{name}={getattr(self, name)!r}" for name in properties)
return f"{constructor}({props})"
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if self is other:
return True
if self.key != other.key:
return False
for name in self._properties:
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
return not (self == other)
def lookup_model_by_kind(kind):
"""Look up the model instance for a given Datastore kind.
Parameters:
kind(str)
Raises:
RuntimeError: If a model for the given kind has not been
defined.
Returns:
model: The model class.
"""
model = _known_models.get(kind)
if model is None:
raise RuntimeError(f"Model for kind {kind!r} not found.")
return model
def delete_multi(keys):
"""Delete a set of entitites from Datastore by their
respective keys.
Note:
This uses the adapter that is tied to the first model in the list.
If the keys have disparate adapters this function may behave in
unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to delete.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
"""
if not keys:
return
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_delete_hook(key)
adapter.delete_multi(keys)
for key in keys:
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
model.post_delete_hook(key)
def get_multi(keys):
"""Get a set of entities from Datastore by their respective keys.
Note:
This uses the adapter that is tied to the first model in the
list. If the keys have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to get.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
Returns:
list[Model]: Entities that do not exist are going to be None
in the result list. The order of results matches the order
of the input keys.
"""
if not keys:
return []
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_get_hook(key)
entities_data, entities = adapter.get_multi(keys), []
for key, entity_data in zip(keys, entities_data):
if entity_data is None:
entities.append(None)
continue
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
entity = model._load(key, entity_data)
entities.append(entity)
entity.post_get_hook()
return entities
def put_multi(entities):
"""Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites.
"""
if not entities:
return []
adapter, requests = None, []
for entity in entities:
if adapter is None:
adapter = entity._adapter
entity.pre_put_hook()
requests.append(PutRequest(entity.key, entity.unindexed_properties, entity))
keys = adapter.put_multi(requests)
for key, entity in zip(keys, entities):
entity.key = key
entity.post_put_hook()
return entities
```
#### File: anom-py/anom/query.py
```python
from collections import namedtuple
from .namespaces import get_namespace
DEFAULT_BATCH_SIZE = 300
class PropertyFilter(namedtuple("PropertyFilter", ("name", "operator", "value"))):
"""Represents an individual filter on a Property within a Query.
"""
class QueryOptions(dict):
"""Options that determine how data is fetched from the Datastore
on a per Query basis.
Parameters:
batch_size(int, optional): The number of results to fetch per batch.
keys_only(bool, optional): Whether or not the results should be
Keys or Entities.
limit(int, optional): The maximum number of results to return.
offset(int, optional): The number of results to skip.
cursor(str, optional): A url-safe cursor representing where in
the result set the query should start.
"""
def __init__(self, query, **options):
super().__init__(**options)
self.query = query
def replace(self, **options):
"""Update this options object in place.
Parameters:
\**options(QueryOptions)
Returns:
QueryOptions: The updated instance.
"""
self.update(options)
return self
@property
def batch_size(self):
"""int: The number of results to fetch per batch. Clamped to
limit if limit is set and is smaller than the given batch
size.
"""
batch_size = self.get("batch_size", DEFAULT_BATCH_SIZE)
if self.limit is not None:
return min(self.limit, batch_size)
return batch_size
@property
def keys_only(self):
"bool: Whether or not the results should be Keys or Entities."
return self.get("keys_only", False)
@property
def limit(self):
"int: The maximum number of results to return."
return self.get("limit", self.query.limit)
@property
def offset(self):
"int: The number of results to skip."
return self.get("offset", self.query.offset)
@property
def cursor(self):
"bytes: The url-safe cursor for a query."
return self.get("cursor", b"")
@cursor.setter
def cursor(self, value):
self["cursor"] = value
class Resultset:
"""An iterator for datastore query results.
Parameters:
query(Query): The query that was run to create this resultset.
options(QueryOptions): Options that determine how entities are
fetched from Datastore.
"""
def __init__(self, query, options):
self._query = query
self._options = options
self._complete = False
self._entities = self._get_entities()
@property
def cursor(self):
"str: The url-safe cursor for the next batch of results."
return self._options.cursor
@property
def has_more(self):
"bool: Whether or not there are more results."
return not self._complete
def __iter__(self):
return self
def __next__(self):
return next(self._entities)
def _get_batches(self):
from .adapter import get_adapter
remaining = self._options.limit
while True:
adapter = self._query.model._adapter if self._query.model else get_adapter()
entities, self._options.cursor = adapter.query(self._query, self._options)
if remaining is not None:
remaining -= len(entities)
if remaining < 0:
entities = entities[:remaining]
if not entities:
break
# If we received fewer entities than we asked for then we
# can safely say that we've finished iterating. We have
# to do this before yielding, however.
if len(entities) < self._options.batch_size:
self._complete = True
if self._options.keys_only:
yield (key for key, _ in entities)
else:
yield (key.get_model()._load(key, data) for key, data in entities)
if remaining is not None and remaining <= 0:
break
self._complete = True
def _get_entities(self):
for batch in self._get_batches():
yield from batch
class Page:
"""An iterator that represents a single page of entities or keys.
Parameters:
cursor(str): The url-safe cursor for the next page of results.
batch(iterator[Model or anom.Key]): The batch of results backing
this Page.
"""
def __init__(self, cursor, batch):
self._cursor = cursor
self._batch = batch
@property
def cursor(self):
"str: The url-safe cursor for the next page of results."
return self._cursor
def __iter__(self):
return self
def __next__(self):
return next(self._batch)
class Pages:
"""An iterator for :class:`Pages<Page>` of results.
Parameters:
query(Query): The query that was run to create this resultset.
options(QueryOptions): Options that determine how entities are
fetched from Datastore.
"""
def __init__(self, query, page_size, options):
options = QueryOptions(query, **options)
options.update(batch_size=page_size)
self._resultset = Resultset(query, options)
self._pages = self._get_pages()
@property
def has_more(self):
"bool: Whether or not there are more pages."
return self._resultset.has_more
@property
def cursor(self):
"str: The url-safe cursor for the next page of results."
return self._resultset.cursor
def fetch_next_page(self):
"""Fetch the next Page of results.
Returns:
Page: The next page of results.
"""
for page in self:
return page
else:
return Page(self._resultset.cursor, iter(()))
def __iter__(self):
return self
def __next__(self):
return next(self._pages)
def _get_pages(self):
for batch in self._resultset._get_batches():
yield Page(self._resultset.cursor, batch)
class Query(namedtuple("Query", (
"model", "kind", "ancestor", "namespace", "projection", "filters", "orders", "offset", "limit",
))):
"""An immutable Datastore query.
Parameters:
kind(str or model): The Datastore kind to query.
ancestor(anom.Key, optional): The ancestor to which this query should be scoped.
namespace(str, optional): The namespace to which this query should be scoped.
projection(tuple[str], optional): The tuple or tuple of fields to project.
filters(tuple[PropertyFilter], optional): The tuple of filters to apply.
orders(tuple[str], optional): The tuple of sort orders to apply.
offset(int, optional): The number of query results to skip.
limit(int, optional): The maximum number of results to return.
Example:
You can construct queries declaratively::
people_query = Query(Person)
.where(Person.email == "<EMAIL>")
.and_where(Person.enabled.is_true)
.with_limit(10)
Then run them to iterate over all the results::
all_people = people_query.run()
for person in all_people:
print(person)
Or paginate over them::
for page in people_query.paginate(page_size=10):
print("Cursor: ", page.cursor)
for person in page:
print(person)
Or get individual pages of results::
page_1 = people_query.paginate(page_size=10).fetch_next_page()
page_2 = people_query.paginate(page_size=10, cursor=page_1.cursor).fetch_next_page()
"""
def __new__(
cls, kind=None, *, ancestor=None, namespace=None,
projection=(), filters=(), orders=(), offset=0, limit=None,
):
from .model import lookup_model_by_kind
if kind is None:
model = None
elif isinstance(kind, str):
model = lookup_model_by_kind(kind)
else:
model, kind = kind, kind._kind
if namespace is None:
namespace = get_namespace()
return super().__new__(
cls, model=model, kind=kind, ancestor=ancestor, namespace=namespace,
projection=_prepare_projection(projection), filters=tuple(filters), orders=tuple(orders),
offset=offset, limit=limit,
)
def select(self, *projection):
"""Return a new query with its projection replaced.
Parameters:
\*projection(str): The fields to project.
Returns:
Query: The derived Query.
"""
return self._replace(projection=_prepare_projection(projection))
def where(self, *filters):
"""Return a new query, replacing the current set of filters.
Parameters:
\*filters(PropertyFilter): The filters to add.
Returns:
Query: The derived Query.
"""
return self._replace(filters=filters)
def and_where(self, *filters):
"""Return a new query, adding the given filters with the
current query's filters to form an "and".
Parameters:
\*filters(PropertyFilter): The filters to add.
Returns:
Query: The derived Query.
"""
return self._replace(filters=self.filters + filters)
def order_by(self, *orders):
"""Returns a new query containing an additional set of orders.
Parameters:
\*orders(str): The sort orders to add.
Returns:
Query: The derived Query.
"""
return self._replace(orders=self.orders + orders)
def with_ancestor(self, ancestor):
"""Returns a new query with its ancestor updated.
Parameters:
ancestor(anom.Key): The new ancestor.
Returns:
Query: The derived Query.
"""
return self._replace(ancestor=ancestor)
def with_namespace(self, namespace):
"""Returns a new query with its namespace updated.
Parameters:
namespace(str): The new namespace.
Returns:
Query: The derived Query.
"""
return self._replace(namespace=namespace)
def with_offset(self, offset):
"""Returns a new query with its offset updated.
Parameters:
offset(int): The new offset.
Returns:
Query: The derived Query.
"""
return self._replace(offset=offset)
def with_limit(self, limit):
"""Returns a new query with its limit updated.
Parameters:
limit(int): The new limit.
Returns:
Query: The derived Query.
"""
return self._replace(limit=limit)
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Counts the number of entities that match this query.
Note:
Since Datastore doesn't provide a native way to count
entities by query, this method paginates through all the
entities' keys and counts them.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of entities.
"""
entities = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
entities += len(list(page))
return entities
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Deletes all the entities that match this query.
Note:
Since Datasotre doesn't provide a native way to delete
entities by query, this method paginates through all the
entities' keys and issues a single delete_multi call per
page.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of deleted entities.
"""
from .model import delete_multi
deleted = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
keys = list(page)
deleted += len(keys)
delete_multi(keys)
return deleted
def get(self, **options):
"""Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results.
"""
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None
def run(self, **options):
"""Run this query and return a result iterator.
Parameters:
\**options(QueryOptions, optional)
Returns:
Resultset: An iterator for this query's results.
"""
return Resultset(self._prepare(), QueryOptions(self, **options))
def paginate(self, *, page_size, **options):
"""Run this query and return a page iterator.
Parameters:
page_size(int): The number of entities to fetch per page.
\**options(QueryOptions, optional)
Returns:
Pages: An iterator for this query's pages of results.
"""
return Pages(self._prepare(), page_size, QueryOptions(self, **options))
def _prepare(self):
# Polymorphic children need to be able to query for themselves
# and their subclasses.
if self.model and self.model._is_child:
kind_filter = (self.model._kinds_name, "=", self.model._kinds[0])
return self._replace(filters=(kind_filter,) + self.filters)
return self
def _prepare_projection(projection):
return tuple(f if isinstance(f, str) else f.name_on_entity for f in projection)
```
#### File: anom-py/anom/transaction.py
```python
import logging
from enum import Enum, auto
from functools import wraps
from .adapter import get_adapter
_logger = logging.getLogger(__name__)
class Transaction: # pragma: no cover
"""Abstract base class for Datastore transactions.
"""
class Propagation(Enum):
"""An enum of the various modes transactions can be run in.
"""
#: Nested transactions should be grouped together into a single
#: transaction.
Nested = auto()
#: Nested transcations should be run independently of their
#: parent transactions.
Independent = auto()
def begin(self):
"Start this transaction."
raise NotImplementedError
def commit(self):
"Commit this Transaction to Datastore."
raise NotImplementedError
def rollback(self):
"Roll this Transaction back."
raise NotImplementedError
def end(self):
"Clean up this Transaction object."
raise NotImplementedError
class TransactionError(Exception):
"""Base class for Transaction errors.
"""
class TransactionFailed(TransactionError):
"""Raised by Adapters when a Transaction cannot be applied.
Parameters:
message(str): A message.
cause(Exception or None): The exception that caused this
Transaction to fail.
"""
def __init__(self, message, cause=None):
self.message = message
self.cause = cause
def __str__(self): # pragma: no cover
return self.message
class RetriesExceeded(TransactionError):
"""Raised by the transactional decorator when it runs out of
retries while trying to apply a transaction.
Parameters:
cause(TransactionError): The last transaction error that caused
a retry.
"""
def __init__(self, cause):
self.cause = cause
def __str__(self): # pragma: no cover
return str(self.cause)
def transactional(*, adapter=None, retries=3, propagation=Transaction.Propagation.Nested):
"""Decorates functions so that all of their operations (except for
queries) run inside a Datastore transaction.
Parameters:
adapter(Adapter, optional): The Adapter to use when running the
transaction. Defaults to the current adapter.
retries(int, optional): The number of times to retry the
transaction if it couldn't be committed.
propagation(Transaction.Propagation, optional): The propagation
strategy to use. By default, transactions are nested, but you
can force certain transactions to always run independently.
Raises:
anom.RetriesExceeded: When the decorator runbs out of retries
while trying to commit the transaction.
Returns:
callable: The decorated function.
"""
def decorator(fn):
@wraps(fn)
def inner(*args, **kwargs):
nonlocal adapter
adapter = adapter or get_adapter()
attempts, cause = 0, None
while attempts <= retries:
attempts += 1
transaction = adapter.transaction(propagation)
try:
transaction.begin()
res = fn(*args, **kwargs)
transaction.commit()
return res
except TransactionFailed as e:
cause = e
continue
except Exception as e:
transaction.rollback()
raise e
finally:
transaction.end()
raise RetriesExceeded(cause)
return inner
return decorator
```
#### File: examples/blog/models.py
```python
from anom import Model, props
from markdown import markdown
from passlib.context import CryptContext
from slugify import slugify
# [START password-property]
ctx = CryptContext(schemes=["sha256_crypt"])
class Password(props.String):
def validate(self, value):
return ctx.hash(super().validate(value))
# [END password-property]
# [START user-model]
class User(Model, poly=True):
username = props.String(indexed=True)
password = Password()
created_at = props.DateTime(indexed=True, auto_now_add=True)
updated_at = props.DateTime(indexed=True, auto_now=True)
@classmethod
def login(cls, username, password):
user = User.query().where(User.username == username).get()
if user is None:
return None
if not ctx.verify(password, user.password):
return None
if ctx.needs_update(user.password):
user.password = password
return user.put()
return user
@property
def permissions(self):
return ()
# [END user-model]
# [START editor-and-reader-models]
class Editor(User):
@property
def permissions(self):
return ("create", "read", "edit", "delete")
class Reader(User):
@property
def permissions(self):
return ("read",)
# [END editor-and-reader-models]
# [START post-model]
class Post(Model):
author = props.Key(indexed=True, kind=User)
title = props.String()
def __compute_slug(self):
if self.title is None:
return None
return slugify(self.title)
def __compute_body(self):
if self.body is None:
return None
return markdown(self.body)
slug = props.Computed(__compute_slug)
body = props.Text()
body_markdown = props.Computed(__compute_body)
tags = props.String(indexed=True, repeated=True)
created_at = props.DateTime(indexed=True, auto_now_add=True)
updated_at = props.DateTime(indexed=True, auto_now=True)
# [END post-model]
# [START init-database]
def init_database():
users = list(User.query().run(keys_only=True))
if users:
return
Editor(username="editor", password="<PASSWORD>").put()
Reader(username="viewer", password="<PASSWORD>").put()
init_database()
# [END init-database]
```
#### File: anom-py/tests/test_embed.py
```python
import pytest
from anom import Model, props
from anom.model import PropertyFilter
class Nested(Model):
y = props.Integer()
z = props.Integer(indexed=True)
class Outer(Model):
x = props.Float(indexed=True)
nested = props.Embed(name="child", kind=Nested)
def test_embed_properties_cannot_have_defaults():
# When I try to make an Embed property with a default
# Then I should get back a type error
with pytest.raises(TypeError):
props.Embed(kind=Nested, default=42)
def test_embed_properties_proxy_their_models(adapter):
# When I access an embedded model's property via an Embed
# Then I should get back that model's property
assert Outer.nested._kind == "Nested"
def test_can_embed_entities_inside_other_entities(adapter):
# Given that I have an entity w/ another entity inside it
outer = Outer(x=1.0, nested=Nested(y=42, z=43))
# When I save that entity
outer.put()
# Then I should be able to retrieve the same data from datastore
assert outer == outer.key.get()
# And only the x and nested.z properties should be indexed
assert outer.unindexed_properties == ("child.y",)
def test_embedded_entities_are_required_by_default(adapter):
# Given that I have an entity w/ an empty embedded value
outer = Outer(x=1.0)
# When I try to save that enttiy
# Then I should get back a runtime error
with pytest.raises(RuntimeError):
outer.put()
def test_embedded_entities_validate_the_data_they_are_given(adapter):
# Given that I have an entity w/ an empty embedded value
outer = Outer(x=1.0)
# When I try to assign an int to the nested property
# Then I should get back a type error
with pytest.raises(TypeError):
outer.nested = 24
class OptionalNested(Model):
x = props.Integer()
class OptionalOuter(Model):
nested = props.Embed(kind=OptionalNested, optional=True)
def test_embedded_properties_can_be_optional(adapter):
# Given that I have an outer entity w/o its nested property
outer = OptionalOuter()
# When I save that entity
outer.put()
# Then I should be able to get back the same entity from datastore
assert outer == outer.key.get()
def test_optional_embedded_properties_can_be_assigned_None(adapter):
# Given that I have an outer entity w/ a nested property
outer = OptionalOuter(nested=OptionalNested(x=42))
# When I assign none to that nested property
outer.nested = None
# And save that entity
outer.put()
# Then I should be able to get back the same entity from datastore
assert outer == outer.key.get()
class Point(Model):
x = props.Float(indexed=False)
y = props.Float(indexed=False)
class Place(Model):
name = props.String()
points = props.Embed(kind=Point, optional=True, repeated=True)
def test_optional_repeated_embed_properties_can_be_assigned_none(adapter):
# Given that i have an place entity w/ a optional repeated embed property
place = Place(
name="New York",
points=[Point(x=40.7128, y=74.0060)]
)
# When I assign none to that nested property
place.points = None
# Then its repeated property should become the empty list
assert place.points == []
# When I save that entity
place.put()
# Then I should be able to get back the same entity from datastore
assert place == place.key.get()
def test_optional_repeated_embed_properties_can_be_created_with_none(adapter):
# Given that i have an place entity w/ a optional repeated embed property,
# but don't assign it in the constructor
place = Place(
name="New York"
)
# And save that entity
place.put()
# Then I should be able to get back the same entity from datastore
assert place == place.key.get()
def test_optional_repeated_embed_properties_can_be_created_with_empty_list(adapter):
# Given that i have an place entity w/ a optional repeated embed property,
# but assign it an empty list in the constructor
place = Place(
name="New York",
points=[]
)
# And save that entity with the empty list
place.put()
# Then I should be able to get back the same entity from datastore
assert place == place.key.get()
class Variation(Model):
weight = props.Integer(indexed=True)
class SplitTest(Model):
name = props.String(indexed=True)
slug = props.String(indexed=True)
variations = props.Embed(kind=Variation, repeated=True)
def test_can_embed_lists_of_entities_inside_other_entities(adapter):
# Given that I have a split test with multiple variations
split_test = SplitTest(name="A split test", slug="a-split-test")
split_test.variations = [Variation(weight=20), Variation(weight=80)]
# When I save that split test
split_test.put()
# Then I should be able to retrieve that same data from datastore
assert split_test == split_test.key.get()
# And all the properties should be indexed
assert split_test.unindexed_properties == ()
def test_embedded_entities_validate_the_repeated_data_they_are_given(adapter):
# Given that I have an entity w/ an empty embedded value
split_test = SplitTest(name="A split test", slug="a-split-test")
# When I try to assign a list of ints to the variations property
# Then I should get back a type error
with pytest.raises(TypeError):
split_test.variations = [1, 2]
class DeepD(Model):
x = props.Integer()
class DeepC(Model):
child = props.Embed(kind=DeepD)
class DeepB(Model):
child = props.Embed(kind=DeepC)
class DeepA(Model):
child = props.Embed(kind=DeepB)
def test_can_deeply_embed_entitites_inside_other_entities(adapter):
# Given that I have a deeply nested entitity
e = DeepA(child=DeepB(child=DeepC(child=DeepD(x=42))))
# When I save that entity
e.put()
# Then I should be able to retrieve that same data from datastore
assert e == e.key.get()
# And the deeply nested property should not be indexed
assert e.unindexed_properties == ("child.child.child.x",)
def test_embed_properties_can_generate_filters():
assert (Outer.nested.z == 1) == PropertyFilter("child.z", "=", 1)
assert (SplitTest.variations.weight >= 10) == PropertyFilter("variations.weight", ">=", 10)
with pytest.raises(TypeError):
DeepA.child.child.child.x > 10
def test_embed_properties_can_generate_sort_orders():
assert +Outer.nested.z == "child.z"
assert -Outer.nested.z == "-child.z"
assert +SplitTest.variations.weight == "variations.weight"
assert -SplitTest.variations.weight == "-variations.weight"
assert +DeepA.child.child.child.x == "child.child.child.x"
assert -DeepA.child.child.child.x == "-child.child.child.x"
```
#### File: anom-py/tests/test_transactions.py
```python
import pytest
from anom import Transaction, RetriesExceeded, get_multi, put_multi, transactional
from concurrent.futures import ThreadPoolExecutor
from unittest.mock import patch
from .models import BankAccount, Person
def test_transactions_are_serializable(adapter):
@transactional(retries=128)
def transfer_money(source_account_key, target_account_key, amount):
source_account, target_account = get_multi([source_account_key, target_account_key])
source_account.balance -= amount
target_account.balance += amount
put_multi([source_account, target_account])
source = BankAccount(balance=100).put()
target = BankAccount(balance=0).put()
futures = []
with ThreadPoolExecutor(max_workers=10) as e:
for _ in range(10):
futures.append(e.submit(transfer_money, source.key, target.key, 10))
for future in futures:
future.result()
source, target = get_multi([source.key, target.key])
assert source.balance == 0
assert target.balance == 100
def test_transactions_can_delete_data(person):
@transactional()
def successful(person_key):
person_key.delete()
successful(person.key)
assert person.key.get() is None
def test_transactions_can_be_rolled_back(person):
@transactional()
def failing(person_key):
person = person_key.get()
person.first_name = "Johan"
person.put()
raise RuntimeError("some error")
with pytest.raises(RuntimeError):
first_name = person.first_name
failing(person.key)
person = person.key.get()
assert person.first_name == first_name
def test_transactions_can_be_nested(person):
@transactional()
def successful_inner(person_key):
person = person_key.get()
person.first_name = "Johan"
person.put()
@transactional()
def successful_outer(person_key):
person = person_key.get()
person.first_name = "Iohan"
person.put()
successful_inner(person.key)
successful_outer(person.key)
person = person.key.get()
assert person.first_name == "Johan"
def test_nested_transactions_roll_back_the_outer_transaction(person):
@transactional()
def failing_inner(person_key):
person = person_key.get()
person.first_name = "Johan"
person.put()
raise RuntimeError("some error")
@transactional()
def successful_outer(person_key):
person = person_key.get()
person.first_name = "Iohan"
person.put()
failing_inner(person.key)
with pytest.raises(RuntimeError):
first_name = person.first_name
successful_outer(person.key)
person = person.key.get()
assert person.first_name == first_name
def test_transactions_can_be_independent(person):
@transactional(propagation=Transaction.Propagation.Independent)
def failing_inner(person_key):
person = person_key.get()
person.first_name = "Johan"
person.put()
raise RuntimeError("some error")
@transactional()
def successful_outer(person_key):
person = person_key.get()
person.first_name = "Iohan"
person.put()
with pytest.raises(RuntimeError):
failing_inner(person.key)
successful_outer(person.key)
person = person.key.get()
assert person.first_name == "Iohan"
def test_transactions_can_run_out_of_retries(person):
@transactional()
def failing(person_key):
pass
with patch("google.cloud.datastore.Transaction.commit") as commit_mock:
commit_mock.side_effect = RuntimeError
with pytest.raises(RetriesExceeded):
failing(person.key)
def test_can_get_entity_that_was_stored_in_a_txn(adapter):
@transactional()
def store():
return Person(email="<EMAIL>", first_name="Someone").put()
person = store()
assert person.key.get() == person
def test_deferred_keys_behave_like_normal_keys(adapter):
@transactional()
def store():
return Person(email="<EMAIL>", first_name="Someone").put()
person_1 = store()
person_2 = person_1.key.get()
assert person_1.key == person_2.key
assert person_1.key.path == person_2.key.path
assert str(person_1.key) == str(person_2.key)
``` |
{
"source": "JorisAndrade/helm-charts",
"score": 2
} |
#### File: helm-charts/helpers/release.py
```python
import glob
import os
import subprocess
import yaml
try:
raw_input
except NameError: # Python 3
raw_input = input
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
bucket = 'gs://' + os.environ['GCS_BUCKET']
def run(cmd):
if 'DEBUG' in os.environ:
print(' '.join(cmd))
else:
subprocess.check_call(cmd)
# Cleanup existing releases
for release in glob.glob('*/*.tgz'):
print('Removing: ' + release)
os.remove(release)
for filepath in glob.iglob('*/Chart.yaml'):
chart = os.path.split(os.path.dirname(filepath))[-1]
# Package up the chart
run(['helm', 'package', chart, '--destination', chart])
# Upload it to the GCS bucket if it doesn't exist
source = '{chart}/{chart}-*.tgz'.format(**locals())
destination = '{bucket}/helm/{chart}/'.format(**locals())
run(['gsutil', 'cp', '-n', source, destination])
# Grab the current index so we can merge it with the latest releases
run(['gsutil', 'cp',
bucket + '/index.yaml', 'index.yaml.old'])
# Merge it with the old index to include the older releases
run(['helm', 'repo', 'index',
'--merge', 'index.yaml.old',
'--url', 'https://helm.elastic.co/helm/', '.'])
with open('index.yaml', 'r') as index:
print('=' * 80)
print(index.read())
print('=' * 80)
answer = raw_input('Upload new index.yaml? ("y" or "yes")\n')
if answer in ['y', 'yes']:
run(['gsutil', 'cp',
'index.yaml', bucket + '/index.yaml'])
``` |
{
"source": "JorisCos/asteroid_gan_exps",
"score": 2
} |
#### File: asteroid_gan_exps/data/metricGAN_dataset.py
```python
import numpy as np
import soundfile as sf
import torch
from asteroid.data.librimix_dataset import LibriMix
import random as random
from scipy import signal
class MetricGAN(LibriMix):
def __getitem__(self, idx):
# Get the row in dataframe
row = self.df.iloc[idx]
# Get mixture path
self.mixture_path = row['mixture_path']
sources_list = []
# If there is a seg start point is set randomly
if self.seg_len is not None:
start = random.randint(0, row['length'] - self.seg_len)
stop = start + self.seg_len
else:
start = 0
stop = None
# If task is enh_both then the source is the clean mixture
if 'enh_both' in self.task:
mix_clean_path = self.df_clean.iloc[idx]['mixture_path']
s, _ = sf.read(mix_clean_path, dtype='float32', start=start,
stop=stop)
sources_list.append(s)
else:
# Read sources
for i in range(self.n_src):
source_path = row[f'source_{i + 1}_path']
s, _ = sf.read(source_path, dtype='float32', start=start,
stop=stop)
sources_list.append(s)
# Read the mixture
mixture, _ = sf.read(self.mixture_path, dtype='float32', start=start,
stop=stop)
# Convert to torch tensor
mixture = torch.from_numpy(mixture).unsqueeze(0)
# Stack sources
sources = np.vstack(sources_list)
# Convert sources to tensor
sources = torch.from_numpy(sources)
return mixture, sources
```
#### File: asteroid_gan_exps/data/SEGAN_dataset.py
```python
import numpy as np
import soundfile as sf
import torch
from asteroid.data.librimix_dataset import LibriMix
import random as random
from scipy import signal
class SEGAN(LibriMix):
def __getitem__(self, idx):
# Get the row in dataframe
row = self.df.iloc[idx]
# Get mixture path
self.mixture_path = row['mixture_path']
sources_list = []
# If there is a seg start point is set randomly
if self.seg_len is not None:
start = random.randint(0, row['length'] - self.seg_len)
stop = start + self.seg_len
else:
start = 0
stop = None
# If task is enh_both then the source is the clean mixture
if 'enh_both' in self.task:
mix_clean_path = self.df_clean.iloc[idx]['mixture_path']
s, _ = sf.read(mix_clean_path, dtype='float32', start=start,
stop=stop)
sources_list.append(s)
else:
# Read sources
for i in range(self.n_src):
source_path = row[f'source_{i + 1}_path']
s, _ = sf.read(source_path, dtype='float32', start=start,
stop=stop)
sources_list.append(s)
# Read the mixture
mixture, _ = sf.read(self.mixture_path, dtype='float32', start=start,
stop=stop)
mixture = self.pre_emphasis(mixture).astype('float32')
# Convert to torch tensor
mixture = torch.from_numpy(mixture).unsqueeze(0)
# Stack sources
sources = np.vstack(sources_list)
sources = self.pre_emphasis(sources).astype('float32')
# Convert sources to tensor
sources = torch.from_numpy(sources)
if self.segment is not None and self.segment > 16384:
raise ValueError
if self.segment is None:
return self.slicer(mixture), sources
return mixture, sources
def slicer(self,sources, window=16384):
len_s = len(sources[0, :])
if len_s > window:
nb_slices = int(len_s // window) + 1
sliced = torch.zeros((sources.size()[0], nb_slices * window))
sliced = sliced.reshape((sources.size()[0], nb_slices, window))
for n in range(sources.size(0)):
for j in range(nb_slices - 1):
sliced[n, j, :] = sources[n,j * window: (j + 1) * window]
sliced[n, -1, : len_s - (j + 1) * window] = sources[n,
(j + 1) * window:]
return sliced
return sources.unsqueeze(1)
def pre_emphasis(self,signal_batch, emph_coeff=0.95) -> np.array:
"""
Pre-emphasis of higher frequencies given a batch of signal.
Args:
signal_batch(np.array): batch of signals, represented as numpy arrays
emph_coeff(float): emphasis coefficient
Returns:
result: pre-emphasized signal batch
"""
return signal.lfilter([1, -emph_coeff], [1], signal_batch)
```
#### File: egs/MetricGAN/discriminator.py
```python
import torch
import torch.nn as nn
from asteroid.engine.optimizers import make_optimizer
from torch.nn.modules.loss import _Loss
from asteroid.filterbanks import make_enc_dec
from asteroid.filterbanks.transforms import take_mag
from pystoi import stoi
from pb_bss_eval.evaluation.module_pesq import pesq
class Discriminator(nn.Module):
"""Discriminator also mentioned ad D """
def __init__(self, encoder, decoder, negative_slope=0.3):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.conv = nn.Sequential(
nn.BatchNorm2d(2),
nn.utils.spectral_norm(nn.Conv2d(2, 15, 5, 1)),
nn.LeakyReLU(negative_slope),
nn.utils.spectral_norm(nn.Conv2d(15, 25, 7, 1)),
nn.LeakyReLU(negative_slope),
nn.utils.spectral_norm(nn.Conv2d(25, 40, 9, 1)),
nn.LeakyReLU(negative_slope),
nn.utils.spectral_norm(nn.Conv2d(40, 50, 11, 1)),
nn.LeakyReLU(negative_slope))
self.pool = nn.AdaptiveAvgPool2d(1)
self.linear = nn.Sequential(
nn.utils.spectral_norm(nn.Linear(50, 50)),
nn.LeakyReLU(negative_slope),
nn.utils.spectral_norm(nn.Linear(50, 10)),
nn.LeakyReLU(negative_slope),
nn.utils.spectral_norm(nn.Linear(10, 1)),
)
def forward(self, x, z):
"""
Forward pass of discriminator.
Args:
x: inputs
z: clean
"""
# Encode
x = self.encoder(x)
x = take_mag(x)
x = x.unsqueeze(1)
# Encode
z = self.encoder(z)
z = take_mag(z)
z = z.unsqueeze(1)
x = torch.cat((x, z), dim=1)
x = self.conv(x)
x = self.pool(x).squeeze()
x = self.linear(x)
return x
class DiscriminatorLoss(_Loss):
""" This class implements a generic loss for the discriminator.
However, computation of some metrics can break the code (eg PESQ).
For now, we recommend to use only STOI"""
def __init__(self, metric, rate):
super().__init__()
self.metric = metric
self.rate = rate
def forward(self, noisy, clean, estimates, est_labels, labels):
# Behaves differently if estimates come from the generated data or not
#
if labels:
loss = torch.mean((est_labels - torch.ones_like(est_labels)) ** 2)
else:
loss = torch.mean((est_labels - get_metric(self.metric, noisy,
clean, estimates,
self.rate))**2)
return loss
def get_metric(metric, noisy, clean, estimates, rate):
""" Compute the metric """
noisy_np = noisy.cpu().squeeze(1).data.numpy()
clean_np = clean.cpu().squeeze(1).data.numpy()
estimates_np = estimates.cpu().squeeze(1).data.numpy()
metrics = torch.zeros(noisy.size(0))
if metric == 'stoi':
f = stoi
else:
f = pesq
for i in range(noisy_np.shape[0]):
# print(clean_np[i],estimates_np[i])
m = f(clean_np[i], estimates_np[i], rate)
metrics[i] += m
if metric == 'pesq':
metrics = (metrics + 0.5)/5.0
return metrics.to(noisy.device)
def make_discriminator_and_optimizer(conf):
""" Function to define the model and optimizer for a config dictionary.
Args:
conf: Dictionary containing the output of hierachical argparse.
Returns:
model, optimizer.
The main goal of this function is to make reloading for resuming
and evaluation very simple.
"""
# Define building blocks for local model
encoder, decoder = make_enc_dec(**conf['filterbank'])
model = Discriminator(encoder, decoder)
# Define optimizer of this model
optimizer = make_optimizer(model.parameters(), **conf['optim'])
d_loss = DiscriminatorLoss(conf['metric_to_opt']['metric'],
conf['data']['rate'])
return model, optimizer, d_loss
```
#### File: tests/engine/gan_system_test.py
```python
import torch
from torch import nn, optim
from torch.utils import data
from pytorch_lightning import Trainer
from torch.optim.lr_scheduler import ReduceLROnPlateau
from asteroid_gan_exps.engine.gan_system import GanSystem
from torch.nn.modules.loss import _Loss
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
class DummyDataset(data.Dataset):
def __init__(self):
self.inp_dim = 10
self.out_dim = 10
def __len__(self):
return 20
def __getitem__(self, idx):
return torch.randn(1, self.inp_dim), torch.randn(1, self.out_dim)
class GeneratorLoss(_Loss):
def forward(self, est_labels):
loss = torch.mean((est_labels - torch.ones_like(est_labels)) ** 2)
return loss
class DiscriminatorLoss(_Loss):
def forward(self, est_labels, labels):
# Behaves differently if estimates come from the generated data or not
if labels:
loss = torch.mean((est_labels - torch.ones_like(est_labels)) ** 2)
else:
loss = torch.mean(est_labels ** 2)
return loss
class Discriminator(nn.Module):
"""D"""
def __init__(self):
super().__init__()
self.model = nn.Sequential(nn.Linear(10, 1), nn.Sigmoid())
def forward(self, x):
"""
Forward pass of discriminator.
Args:
x: batch of estimates
"""
return self.model(x)
class TrainGAN (GanSystem):
def training_step(self, batch, batch_nb, optimizer_idx):
# Get data from data_loader
inputs, targets = batch
# Forward inputs
estimates = self(inputs)
# Train discriminator
if optimizer_idx == 0:
# Compute D loss for targets
est_true_labels = self.discriminator(targets)
true_loss = self.d_loss(est_true_labels, True)
# Compute D loss for self.estimates
est_false_labels = self.discriminator(estimates.detach())
fake_loss = self.d_loss(est_false_labels, False)
# Overall, the loss is the mean of these
d_loss = (true_loss + fake_loss) * 0.5
tqdm_dict = {'d_loss': d_loss}
output = {
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
}
return output
# Train generator
if optimizer_idx == 1:
# The generator is supposed to fool the discriminator.
est_labels = self.discriminator(estimates)
adversarial_loss = self.g_loss(est_labels)
tqdm_dict = {'g_loss': adversarial_loss}
output = {
'loss': adversarial_loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
}
return output
def validation_step(self, batch, batch_nb):
""" Need to overwrite PL validation_step to do validation.
Args:
batch: the object returned by the loader (a list of torch.Tensor
in most cases) but can be something else.
batch_nb (int): The number of the batch in the epoch.
Returns:
dict:
``'val_loss'``: loss
"""
inputs, targets = batch
est_targets = self(inputs)
val_loss = self.validation_loss(est_targets, targets)
return {'val_loss': val_loss}
def validation_epoch_end(self, outputs):
""" How to aggregate outputs of `validation_step` for logging.
Args:
outputs (list[dict]): List of validation losses, each with a
``'val_loss'`` key
Returns:
dict: Average loss
``'val_loss'``: Average loss on `outputs`
``'log'``: Tensorboard logs
``'progress_bar'``: Tensorboard logs
"""
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs,
'progress_bar': tensorboard_logs}
def test_system():
discriminator = Discriminator()
generator = nn.Sequential(nn.Linear(10, 10), nn.ReLU())
opt_d = optim.Adam(discriminator.parameters(), lr=1e-3)
opt_g = optim.Adam(generator.parameters(), lr=1e-3)
scheduler_d = ReduceLROnPlateau(optimizer=opt_d, factor=0.5, patience=5)
scheduler_g = ReduceLROnPlateau(optimizer=opt_g, factor=0.5, patience=5)
g_loss = GeneratorLoss()
d_loss = DiscriminatorLoss()
validation_loss = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
dataset = DummyDataset()
loader = data.DataLoader(dataset, batch_size=4, num_workers=4)
gan = TrainGAN(discriminator=discriminator, generator=generator,
opt_d=opt_d, opt_g=opt_g, discriminator_loss=d_loss,
generator_loss=g_loss, validation_loss=validation_loss,
train_loader=loader, val_loader=loader,
scheduler_d=scheduler_d, scheduler_g=scheduler_g)
trainer = Trainer(max_epochs=1, fast_dev_run=True)
trainer.fit(gan)
``` |
{
"source": "JorisCos/scaper",
"score": 2
} |
#### File: scaper/scaper/core.py
```python
try:
import soxbindings as sox
except: # pragma: no cover
import sox # pragma: no cover
import soundfile
import os
import warnings
import jams
from collections import namedtuple
import logging
import tempfile
import numpy as np
import shutil
import csv
from copy import deepcopy
from .scaper_exceptions import ScaperError
from .scaper_warnings import ScaperWarning
from .util import _close_temp_files
from .util import _set_temp_logging_level
from .util import _get_sorted_files
from .util import _validate_folder_path
from .util import _populate_label_list
from .util import _check_random_state
from .util import _sample_trunc_norm
from .util import _sample_uniform
from .util import _sample_choose
from .util import _sample_normal
from .util import _sample_const
from .util import max_polyphony
from .util import polyphony_gini
from .util import is_real_number, is_real_array
from .audio import get_integrated_lufs
from .audio import peak_normalize
from .version import version as scaper_version
SUPPORTED_DIST = {"const": _sample_const,
"choose": _sample_choose,
"uniform": _sample_uniform,
"normal": _sample_normal,
"truncnorm": _sample_trunc_norm}
# Define single event spec as namedtuple
EventSpec = namedtuple(
'EventSpec',
['label', 'source_file', 'source_time', 'event_time', 'event_duration',
'snr', 'role', 'pitch_shift', 'time_stretch'])
'''
Container for storing event specifications, either probabilistic (i.e. using
distribution tuples to specify possible values) or instantiated (i.e. storing
constants directly).
'''
def generate_from_jams(jams_infile,
audio_outfile=None,
fg_path=None,
bg_path=None,
jams_outfile=None,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True,
txt_path=None,
txt_sep='\t'):
'''
Generate a soundscape based on an existing scaper JAMS file and return as
an audio file, a JAMS annotation, a simplified annotation list, and a
list containing the audio samples of each individual background and
foreground event. If output paths are provided, these objects will also
be saved to disk.
Parameters
----------
jams_infile : str
Path to JAMS file (must be a file previously generated by Scaper).
audio_outfile : str
Path for saving the generated soundscape audio.
fg_path : str or None
Specifies a different path for foreground audio than the one stored in
the input jams file. For the reconstruction to be successful the folder
and file structure inside this path must be identical the one that was
used to create the input jams file. If None (default), the fg_path from
the input jams file will be used.
bg_path : str or None
Specifies a different path for background audio than the one stored in
the input jams file. For the reconstruction to be successful the folder
and file structure inside this path must be identical the one that was
used to create the input jams file. If None (default), the bg_path from
the input jams file will be used.
jams_outfile : str or None
Path for saving new JAMS file, if None (default) a new JAMS is not
saved. Useful when either fg_path or bg_path is not None, as it saves
a new JAMS files where the source file paths match the new fg_path
and/or bg_path.
save_isolated_events : bool
If True, this will save the isolated event audio in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The audio of the isolated events sum
up to the mixture if reverb is not applied. Isolated events can be found
(by default) at `<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`, where idx
is the index of the isolated event in
self.fg_spec or self.bg_spec (this allows events of the same label to be added more than
once to the soundscape without breaking things). Role is "background" or "foreground".
For example: `foreground0_siren.wav` or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_outfile parent folder>/<audio_outfile name>_events`.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``. If you're experiencing issues related
to audio I/O setting this parameter to False may help with debugging.
txt_path: str or None
Path for saving a simplified annotation in a space separated format
[onset offset label] where onset and offset are in seconds. Good
for loading labels in e.g. Audacity. If None, does not save txt
annotation to disk.
txt_sep: str
The separator to use when saving a simplified annotation as a text
file (default is tab for compatibility with Audacity label files).
Only relevant if txt_path is not None.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape. Returns None if
no_audio=True.
soundscape_jam: jams.JAMS
The JAMS object containing the full soundscape annotation.
annotation_list : list
A simplified annotation in a space-separated format
[onset offset label] where onset and offset are in seconds.
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
Raises
------
ScaperError
If jams_infile does not point to a valid JAMS file that was previously
generated by Scaper and contains an annotation of the scaper
namespace.
'''
soundscape_jam = jams.load(jams_infile)
anns = soundscape_jam.search(namespace='scaper')
if len(anns) == 0:
raise ScaperError(
'JAMS file does not contain any annotation with namespace '
'scaper.')
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
# Update paths
if fg_path is None:
new_fg_path = ann.sandbox.scaper['fg_path']
else:
new_fg_path = os.path.expanduser(fg_path)
# Update source files
for obs in ann.data:
if obs.value['role'] == 'foreground':
sourcefile = obs.value['source_file']
sourcefilename = os.path.basename(sourcefile)
parent = os.path.dirname(sourcefile)
parentname = os.path.basename(parent)
newsourcefile = os.path.join(
new_fg_path, parentname, sourcefilename)
obs.value['source_file'] = newsourcefile # hacky
# Update sandbox
ann.sandbox.scaper['fg_path'] = new_fg_path
if bg_path is None:
new_bg_path = ann.sandbox.scaper['bg_path']
else:
new_bg_path = os.path.expanduser(bg_path)
# Update source files
for obs in ann.data:
if obs.value['role'] == 'background':
sourcefile = obs.value['source_file']
sourcefilename = os.path.basename(sourcefile)
parent = os.path.dirname(sourcefile)
parentname = os.path.basename(parent)
newsourcefile = os.path.join(
new_bg_path, parentname, sourcefilename)
obs.value['source_file'] = newsourcefile # hacky
# Update sandbox
ann.sandbox.scaper['bg_path'] = new_bg_path
# Create scaper object
if 'original_duration' in ann.sandbox.scaper:
duration = ann.sandbox.scaper['original_duration']
else:
duration = ann.sandbox.scaper['duration']
warnings.warn(
"Couldn't find original_duration field in the scaper sandbox, "
"using duration field instead. This can lead to incorrect behavior "
"if generating from a jams file that has been trimmed previously.",
ScaperWarning)
protected_labels = ann.sandbox.scaper['protected_labels']
sc = Scaper(duration, new_fg_path, new_bg_path, protected_labels)
# Set synthesis parameters
if 'sr' in ann.sandbox.scaper: # backwards compatibility
sc.sr = ann.sandbox.scaper['sr']
# sc.forced_protected_labels = ann.sandbox.scaper['forced_protected_labels']
sc.ref_db = ann.sandbox.scaper['ref_db']
sc.n_channels = ann.sandbox.scaper['n_channels']
sc.fade_in_len = ann.sandbox.scaper['fade_in_len']
sc.fade_out_len = ann.sandbox.scaper['fade_out_len']
# Pull generation parameters from annotation
reverb = ann.sandbox.scaper['reverb']
if 'fix_clipping' in ann.sandbox.scaper.keys():
fix_clipping = ann.sandbox.scaper['fix_clipping']
else:
fix_clipping = False
if 'peak_normalization' in ann.sandbox.scaper.keys():
peak_normalization = ann.sandbox.scaper['peak_normalization']
else:
peak_normalization = False
if 'quick_pitch_time' in ann.sandbox.scaper.keys():
quick_pitch_time = ann.sandbox.scaper['quick_pitch_time']
else:
quick_pitch_time = False
# Cast ann.sandbox.scaper to a Sandbox object
ann.sandbox.scaper = jams.Sandbox(**ann.sandbox.scaper)
# Generate audio
soundscape_audio, event_audio_list, scale_factor, ref_db_change = \
sc._generate_audio(audio_outfile,
ann,
reverb=reverb,
fix_clipping=fix_clipping,
peak_normalization=peak_normalization,
quick_pitch_time=quick_pitch_time,
save_isolated_events=save_isolated_events,
isolated_events_path=isolated_events_path,
disable_sox_warnings=disable_sox_warnings)
# TODO: Stick to heavy handed overwriting for now, in the future we
# should consolidate this with what happens inside _instantiate().
ann.sandbox.scaper.reverb = reverb
ann.sandbox.scaper.fix_clipping = fix_clipping
ann.sandbox.scaper.peak_normalization = peak_normalization
ann.sandbox.scaper.quick_pitch_time = quick_pitch_time
ann.sandbox.scaper.save_isolated_events = save_isolated_events
ann.sandbox.scaper.isolated_events_path = isolated_events_path
ann.sandbox.scaper.disable_sox_warnings = disable_sox_warnings
ann.sandbox.scaper.peak_normalization_scale_factor = scale_factor
ann.sandbox.scaper.ref_db_change = ref_db_change
ann.sandbox.scaper.ref_db_generated = sc.ref_db + ref_db_change
# If there are slice (trim) operations, need to perform them!
# Need to add this logic for the isolated events too.
if 'slice' in ann.sandbox.keys():
for sliceop in ann.sandbox['slice']:
# must use temp file in order to save to same file
tmpfiles = []
audio_files = [audio_outfile] + ann.sandbox.scaper.isolated_events_audio_path
with _close_temp_files(tmpfiles):
for audio_file in audio_files:
# Create tmp file
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=False))
# Save trimmed result to temp file
tfm = sox.Transformer()
tfm.trim(sliceop['slice_start'], sliceop['slice_end'])
tfm.build(audio_file, tmpfiles[-1].name)
# Copy result back to original file
shutil.copyfile(tmpfiles[-1].name, audio_file)
# Optionally save new jams file
if jams_outfile is not None:
soundscape_jam.save(jams_outfile)
# Create annotation list
annotation_list = []
for obs in ann.data:
if obs.value['role'] == 'foreground':
annotation_list.append(
[obs.time, obs.time + obs.duration, obs.value['label']])
if txt_path is not None:
with open(txt_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=txt_sep)
writer.writerows(annotation_list)
return soundscape_audio, soundscape_jam, annotation_list, event_audio_list
def trim(audio_infile, jams_infile, audio_outfile, jams_outfile, start_time,
end_time, no_audio=False):
'''
Trim an audio file and corresponding Scaper JAMS file and save to disk.
Given an input audio file and corresponding jams file, trim both the audio
and all annotations in the jams file to the time range ``[start_time,
end_time]`` and save the result to ``audio_outfile`` and ``jams_outfile``
respectively. This function uses ``jams.slice()`` for trimming the jams
file while ensuring the start times of the jam's annotations and
observations they contain match the trimmed audio file.
Parameters
----------
audio_infile : str
Path to input audio file
jams_infile : str
Path to input jams file
audio_outfile : str
Path to output trimmed audio file
jams_outfile : str
Path to output trimmed jams file
start_time : float
Start time for trimmed audio/jams
end_time : float
End time for trimmed audio/jams
no_audio : bool
If true, operates on the jams only. Audio input and output paths
don't have to point to valid files.
'''
# First trim jams (might raise an error)
jam = jams.load(jams_infile)
jam_sliced = jam.slice(start_time, end_time, strict=False)
# Special work for annotations of the scaper 'scaper' namespace
for ann in jam_sliced.annotations:
if ann.namespace == 'scaper':
# DON'T MODIFY event's value dict! Keeps original instantiated
# values for reconstruction / reproducibility.
# Count number of FG events
n_events = 0
for obs in ann.data:
if obs.value['role'] == 'foreground':
n_events += 1
# Re-compute max polyphony
poly = max_polyphony(ann)
# Re-compute polyphony gini
gini = polyphony_gini(ann)
# Update specs in sandbox
ann.sandbox.scaper['n_events'] = n_events
ann.sandbox.scaper['polyphony_max'] = poly
ann.sandbox.scaper['polyphony_gini'] = gini
ann.sandbox.scaper['duration'] = ann.duration
# Save result to output jams file
jam_sliced.save(jams_outfile)
# Next, trim audio
if not no_audio:
tfm = sox.Transformer()
tfm.trim(start_time, end_time)
if audio_outfile != audio_infile:
tfm.build(audio_infile, audio_outfile)
else:
# must use temp file in order to save to same file
tmpfiles = []
with _close_temp_files(tmpfiles):
# Create tmp file
tmpfiles.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# Save trimmed result to temp file
tfm.build(audio_infile, tmpfiles[-1].name)
# Copy result back to original file
shutil.copyfile(tmpfiles[-1].name, audio_outfile)
def _get_value_from_dist(dist_tuple, random_state):
'''
Sample a value from the provided distribution tuple.
Given a distribution tuple, validate its format/values and then sample
and return a single value from the distribution specified by the tuple.
Parameters
----------
dist_tuple : tuple
Distribution tuple to be validated. See ``Scaper.add_event`` for
details about the expected format for the distribution tuple.
Returns
-------
value
A value from the specified distribution.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
_validate_distribution : Check whether a tuple specifying a parameter
distribution has a valid format, if not raise an error.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(dist_tuple)
return SUPPORTED_DIST[dist_tuple[0]](*dist_tuple[1:], random_state=random_state)
def _validate_distribution(dist_tuple):
'''
Check whether a tuple specifying a parameter distribution has a valid
format, if not raise an error.
Parameters
----------
dist_tuple : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
Raises
------
ScaperError
If the tuple does not have a valid format.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
# Make sure it's a tuple
if not isinstance(dist_tuple, tuple):
raise ScaperError('Distribution tuple must be of type tuple.')
# Make sure the tuple contains at least 2 items
if len(dist_tuple) < 2:
raise ScaperError('Distribution tuple must be at least of length 2.')
# Make sure the first item is one of the supported distribution names
if dist_tuple[0] not in SUPPORTED_DIST.keys():
raise ScaperError(
"Unsupported distribution name: {:s}".format(dist_tuple[0]))
# If it's a constant distribution, tuple must be of length 2
if dist_tuple[0] == 'const':
if len(dist_tuple) != 2:
raise ScaperError('"const" distribution tuple must be of length 2')
# If it's a choose, tuple must be of length 2 and second item of type list
elif dist_tuple[0] == 'choose':
if len(dist_tuple) != 2 or not isinstance(dist_tuple[1], list):
raise ScaperError(
'The "choose" distribution tuple must be of length 2 where '
'the second item is a list.')
# If it's a uniform distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be real and greater/equal to the 2nd.
elif dist_tuple[0] == 'uniform':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[1] > dist_tuple[2]):
raise ScaperError(
'The "uniform" distribution tuple be of length 2, where the '
'2nd item is a real number and the 3rd item is a real number '
'and greater/equal to the 2nd item.')
# If it's a normal distribution, tuple must be of length 3, 2nd item must
# be a real number and 3rd item must be a non-negative real
elif dist_tuple[0] == 'normal':
if (len(dist_tuple) != 3 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
dist_tuple[2] < 0):
raise ScaperError(
'The "normal" distribution tuple must be of length 3, where '
'the 2nd item (mean) is a real number and the 3rd item (std '
'dev) is real and non-negative.')
elif dist_tuple[0] == 'truncnorm':
if (len(dist_tuple) != 5 or
not is_real_number(dist_tuple[1]) or
not is_real_number(dist_tuple[2]) or
not is_real_number(dist_tuple[3]) or
not is_real_number(dist_tuple[4]) or
dist_tuple[2] < 0 or
dist_tuple[4] < dist_tuple[3]):
raise ScaperError(
'The "truncnorm" distribution tuple must be of length 5, '
'where the 2nd item (mean) is a real number, the 3rd item '
'(std dev) is real and non-negative, the 4th item (trunc_min) '
'is a real number and the 5th item (trun_max) is a real '
'number that is equal to or greater than trunc_min.')
def _ensure_satisfiable_source_time_tuple(source_time, source_duration, event_duration):
'''
Modify a source_time distribution tuple according to the duration of the
source and the duration of the event. This allows you to sample from
anywhere in a source file without knowing the exact duration of every
source file.
Parameters
----------
source_time : tuple
Tuple specifying a distribution to sample from. See Scaper.add_event
for details about the expected format of the tuple and allowed values.
source_duration : float
Duration of the source audio file.
event_duration : float
Duration of the event to be extracted from the source file.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
_validate_distribution(source_time)
old_source_time = deepcopy(source_time)
source_time = list(source_time)
# If it's a constant distribution, just make sure it's within bounds.
if source_time[0] == 'const':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
# If it's a choose, iterate through the list to make sure it's all in bounds.
# Some logic here so we don't add stuff out of bounds more than once.
elif source_time[0] == 'choose':
for i, t in enumerate(source_time[1]):
if t + event_duration > source_duration:
source_time[1][i] = max(0, source_duration - event_duration)
source_time[1] = list(set(source_time[1]))
# If it's a uniform distribution, tuple must be of length 3, We change the 3rd
# item to source_duration - event_duration so that we stay in bounds. If the min
# out of bounds, we change it to be source_duration - event_duration.
elif source_time[0] == 'uniform':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
if source_time[2] + event_duration > source_duration:
source_time[2] = max(0, source_duration - event_duration)
if (source_time[1] == source_time[2]):
# switch to const
source_time = ['const', source_time[1]]
# If it's a normal distribution, we change the mean of the distribution to
# source_duration - event_duration if source_duration - mean < event_duration.
elif source_time[0] == 'normal':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
# If it's a truncated normal distribution, we change the mean as we did above for a
# normal distribution, and change the max (5th item) to
# source_duration - event_duration if it's bigger. If the min is out of bounds, we
# change it like in the uniform case.
elif source_time[0] == 'truncnorm':
if source_time[1] + event_duration > source_duration:
source_time[1] = max(0, source_duration - event_duration)
if source_time[3] + event_duration > source_duration:
source_time[3] = max(0, source_duration - event_duration)
if source_time[4] + event_duration > source_duration:
source_time[4] = max(0, source_duration - event_duration)
if (source_time[3] == source_time[4]):
# switch to const
source_time = ['const', source_time[1]]
source_time = tuple(source_time)
# check if the source_time changed from the old_source_time to throw a warning.
# it gets set here but the warning happens after the return from this call
warn = (source_time != old_source_time)
return tuple(source_time), warn
def _validate_label(label, allowed_labels):
'''
Validate that a label tuple is in the right format and that it's values
are valid.
Parameters
----------
label : tuple
Label tuple (see ```Scaper.add_event``` for required format).
allowed_labels : list
List of allowed labels.
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(label)
# Make sure it's one of the allowed distributions for a label and that the
# label value is one of the allowed labels.
if label[0] == "const":
if not label[1] in allowed_labels:
raise ScaperError(
'Label value must match one of the available labels: '
'{:s}'.format(str(allowed_labels)))
elif label[0] == "choose":
if label[1]: # list is not empty
if not set(label[1]).issubset(set(allowed_labels)):
raise ScaperError(
'Label list provided must be a subset of the available '
'labels: {:s}'.format(str(allowed_labels)))
else:
raise ScaperError(
'Label must be specified using a "const" or "choose" tuple.')
def _validate_source_file(source_file_tuple, label_tuple):
'''
Validate that a source_file tuple is in the right format a that it's values
are valid.
Parameters
----------
source_file : tuple
Source file tuple (see ```Scaper.add_event``` for required format).
label : str
Label tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(source_file_tuple)
_validate_distribution(label_tuple)
# If source file is specified explicitly
if source_file_tuple[0] == "const":
# 1. the filepath must point to an existing file
if not os.path.isfile(source_file_tuple[1]):
raise ScaperError(
"Source file not found: {:s}".format(source_file_tuple[1]))
# 2. the label must match the file's parent folder name
parent_name = os.path.basename(os.path.dirname(source_file_tuple[1]))
if label_tuple[0] != "const" or label_tuple[1] != parent_name:
raise ScaperError(
"Source file's parent folder name does not match label.")
# Otherwise it must be specified using "choose"
elif source_file_tuple[0] == "choose":
if source_file_tuple[1]: # list is not empty
if not all(os.path.isfile(x) for x in source_file_tuple[1]):
raise ScaperError(
'Source file list must either be empty or all paths in '
'the list must point to valid files.')
else:
raise ScaperError(
'Source file must be specified using a "const" or "choose" tuple.')
def _validate_time(time_tuple):
'''
Validate that a time tuple has the right format and that the
specified distribution cannot result in a negative time.
Parameters
----------
time_tuple : tuple
Time tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(time_tuple)
# Ensure the values are valid for time
if time_tuple[0] == "const":
if (time_tuple[1] is None or
not is_real_number(time_tuple[1]) or
time_tuple[1] < 0):
raise ScaperError(
'Time must be a real non-negative number.')
elif time_tuple[0] == "choose":
if (not time_tuple[1] or
not is_real_array(time_tuple[1]) or
not all(x is not None for x in time_tuple[1]) or
not all(x >= 0 for x in time_tuple[1])):
raise ScaperError(
'Time list must be a non-empty list of non-negative real '
'numbers.')
elif time_tuple[0] == "uniform":
if time_tuple[1] < 0:
raise ScaperError(
'A "uniform" distribution tuple for time must have '
'min_value >= 0')
elif time_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for time can result in '
'negative values, in which case the distribution will be '
're-sampled until a positive value is returned: this can result '
'in an infinite loop!',
ScaperWarning)
elif time_tuple[0] == "truncnorm":
if time_tuple[3] < 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time must specify a non-'
'negative trunc_min value.')
def _validate_duration(duration_tuple):
'''
Validate that a duration tuple has the right format and that the
specified distribution cannot result in a negative or zero value.
Parameters
----------
duration : tuple
Duration tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(duration_tuple)
# Ensure the values are valid for duration
if duration_tuple[0] == "const":
if (not is_real_number(duration_tuple[1]) or
duration_tuple[1] <= 0):
raise ScaperError(
'Duration must be a real number greater than zero.')
elif duration_tuple[0] == "choose":
if (not duration_tuple[1] or
not is_real_array(duration_tuple[1]) or
not all(x > 0 for x in duration_tuple[1])):
raise ScaperError(
'Duration list must be a non-empty list of positive real '
'numbers.')
elif duration_tuple[0] == "uniform":
if duration_tuple[1] <= 0:
raise ScaperError(
'A "uniform" distribution tuple for duration must have '
'min_value > 0')
elif duration_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for duration can result in '
'non-positives values, in which case the distribution will be '
're-sampled until a positive value is returned: this can result '
'in an infinite loop!',
ScaperWarning)
elif duration_tuple[0] == "truncnorm":
if duration_tuple[3] <= 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time must specify a '
'positive trunc_min value.')
def _validate_snr(snr_tuple):
'''
Validate that an snr distribution tuple has the right format.
Parameters
----------
snr : tuple
SNR tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# Make sure it's a valid distribution tuple
_validate_distribution(snr_tuple)
# Ensure the values are valid for SNR
if snr_tuple[0] == "const":
if not is_real_number(snr_tuple[1]):
raise ScaperError(
'SNR must be a real number.')
elif snr_tuple[0] == "choose":
if (not snr_tuple[1] or
not is_real_array(snr_tuple[1])):
raise ScaperError(
'SNR list must be a non-empty list of real numbers.')
# No need to check for "uniform" and "normal" since they must produce a
# real number and technically speaking any real number is a valid SNR.
# TODO: do we want to impose limits on the possible SNR values?
def _validate_pitch_shift(pitch_shift_tuple):
'''
Validate that a pitch_shift distribution tuple has the right format.
Parameters
----------
pitch_shift_tuple : tuple
Pitch shift tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# If the tuple is none then it's valid
if pitch_shift_tuple is not None:
# Make sure it's a valid distribution tuple
_validate_distribution(pitch_shift_tuple)
# Ensure the values are valid for pitch shift
if pitch_shift_tuple[0] == "const":
if not is_real_number(pitch_shift_tuple[1]):
raise ScaperError(
'Pitch shift must be a real number.')
elif pitch_shift_tuple[0] == "choose":
if (not pitch_shift_tuple[1] or
not is_real_array(pitch_shift_tuple[1])):
raise ScaperError(
'Pitch shift list must be a non-empty list of real '
'numbers.')
# No need to check for "uniform" and "normal" since they must produce a
# real number and technically speaking any real number is a valid pitch
# shift
# TODO: do we want to impose limits on the possible pitch shift values?
def _validate_time_stretch(time_stretch_tuple):
'''
Validate that a time_stretch distribution tuple has the right format.
Parameters
----------
time_stretch_tuple: tuple
Time stretch tuple (see ```Scaper.add_event``` for required format).
Raises
------
ScaperError
If the validation fails.
'''
# if the tuple is none then its valid
if time_stretch_tuple is not None:
# Make sure it's a valid distribution tuple
_validate_distribution(time_stretch_tuple)
# Ensure the values are valid for time stretch
if time_stretch_tuple[0] == "const":
if (not is_real_number(time_stretch_tuple[1]) or
time_stretch_tuple[1] <= 0):
raise ScaperError(
'Time stretch must be a real number greater than zero.')
elif time_stretch_tuple[0] == "choose":
if (not time_stretch_tuple[1] or
not is_real_array(time_stretch_tuple[1]) or
not all(x > 0 for x in time_stretch_tuple[1])):
raise ScaperError(
'Time stretch list must be a non-empty list of positive '
'real numbers.')
elif time_stretch_tuple[0] == "uniform":
if time_stretch_tuple[1] <= 0:
raise ScaperError(
'A "uniform" distribution tuple for time stretch must have '
'min_value > 0')
elif time_stretch_tuple[0] == "normal":
warnings.warn(
'A "normal" distribution tuple for time stretch can result in '
'non-positives values, in which case the distribution will be '
're-sampled until a positive value is returned: this can '
'result in an infinite loop!',
ScaperWarning)
elif time_stretch_tuple[0] == "truncnorm":
if time_stretch_tuple[3] <= 0:
raise ScaperError(
'A "truncnorm" distirbution tuple for time stretch must '
'specify a positive trunc_min value.')
# TODO: do we want to impose limits on the possible time stretch
# values?
def _validate_event(label, source_file, source_time, event_time,
event_duration, snr, allowed_labels, pitch_shift,
time_stretch):
'''
Check that event parameter values are valid.
Parameters
----------
label : tuple
source_file : tuple
source_time : tuple
event_time : tuple
event_duration : tuple
snr : tuple
allowed_labels : list
List of allowed labels for the event.
pitch_shift : tuple or None
time_stretch: tuple or None
Raises
------
ScaperError :
If any of the input parameters has an invalid format or value.
See Also
--------
Scaper.add_event : Add a foreground sound event to the foreground
specification.
'''
# allowed_labels must be a list. All other parameters will be validated
# individually.
if not isinstance(allowed_labels, list):
raise ScaperError('allowed_labels must be of type list.')
# SOURCE FILE
_validate_source_file(source_file, label)
# LABEL
_validate_label(label, allowed_labels)
# SOURCE TIME
_validate_time(source_time)
# EVENT TIME
_validate_time(event_time)
# EVENT DURATION
_validate_duration(event_duration)
# SNR
_validate_snr(snr)
# Pitch shift
_validate_pitch_shift(pitch_shift)
# Time stretch
_validate_time_stretch(time_stretch)
class Scaper(object):
'''
Create a Scaper object.
Parameters
----------
duration : float
Duration of the soundscape, in seconds.
fg_path : str
Path to foreground folder.
bg_path : str
Path to background folder.
protected_labels : list
Provide a list of protected foreground labels. When a foreground
label is in the protected list it means that when a sound event
matching the label gets added to a soundscape instantiation the
duration of the source audio file cannot be altered, and the
duration value that was provided in the specification will be
ignored. Adding labels to the protected list is useful for sound events
whose semantic validity would be lost if the sound were trimmed
before the sound event ends, for example an animal vocalization
such as a dog bark.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Note that if the random state is passed as a
RandomState instance, it is passed by reference, not value. This will lead to
the Scaper object advancing the state of the random state object if you use
it elsewhere.
'''
def __init__(self, duration, fg_path, bg_path, protected_labels=[],
forced_protected_labels=None, random_state=None):
'''
Create a Scaper object.
Parameters
----------
duration : float
Duration of the soundscape, in seconds.
fg_path : str
Path to foreground folder.
bg_path : str
Path to background folder.
protected_labels : list
Provide a list of protected foreground labels. When a foreground
label is in the protected list it means that when a sound event
matching the label gets added to a soundscape instantiation the
duration of the source audio file cannot be altered, and the
duration value that was provided in the specification will be
ignored. Adding labels to the protected list is useful for sound events
whose semantic validity would be lost if the sound were trimmed
before the sound event ends, for example an animal vocalization
such as a dog bark.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random. Note that if the random state is passed as a
RandomState instance, it is passed by reference, not value. This will lead to
the Scaper object advancing the state of the random state object if you use
it elsewhere.
'''
# Duration must be a positive real number
if np.isrealobj(duration) and duration > 0:
self.duration = duration
else:
raise ScaperError('Duration must be a positive real value')
# Initialize parameters
self.sr = 44100
self.ref_db = -12
self.n_channels = 1
self.fade_in_len = 0.01 # 10 ms
self.fade_out_len = 0.01 # 10 ms
# Start with empty specifications
self.fg_spec = []
self.bg_spec = []
# Start with empty used source_file
self.global_used_source_files = []
# Start with empty used labels
self.global_used_labels = []
# Store initial duration
self.ini_duration = self.duration
# Validate paths and set
expanded_fg_path = os.path.expanduser(fg_path)
expanded_bg_path = os.path.expanduser(bg_path)
_validate_folder_path(expanded_fg_path)
_validate_folder_path(expanded_bg_path)
self.fg_path = expanded_fg_path
self.bg_path = expanded_bg_path
# Populate label lists from folder paths
self.fg_labels = []
self.bg_labels = []
_populate_label_list(self.fg_path, self.fg_labels)
_populate_label_list(self.bg_path, self.bg_labels)
# forced_protected_labels behave as protected_labels but they override
# the soundscape if they are longer.
if forced_protected_labels is None:
self.forced_protected_labels = []
else:
self.forced_protected_labels = forced_protected_labels
# Copy list of protected labels
self.protected_labels = protected_labels + self.forced_protected_labels
# Get random number generator
self.random_state = _check_random_state(random_state)
def reset_fg_event_spec(self):
'''
Resets the foreground event specification to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_bg_event_spec : Same functionality but resets the background
event specification instead of the foreground specification.
'''
self.fg_spec = []
def reset_bg_event_spec(self):
'''
Resets the background event specification to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_fg_event_spec : Same functionality but resets the foreground
event specification instead of the background specification.
'''
self.bg_spec = []
def reset_global_used(self):
'''
Resets the global_used_source_files to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_fg_event_spec : Same functionality but resets the foreground
event specification instead of the foreground specification.
'''
self.global_used_source_files = []
def reset_global_used_labels(self):
'''
Resets the global_used_labels to be an empty list as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_fg_event_spec : Same functionality but resets the foreground
event specification instead of the foreground specification.
'''
self.global_used_labels = []
def reset_duration(self):
'''
Resets the duration soundscape specification to as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_bg_event_spec : Same functionality but resets the background
event specification instead of the foreground specification.
'''
self.duration = self.ini_duration
def reset_all(self):
'''
Resets the all lists to be an empty lists as it is when
the Scaper object is initialized in the first place. This allows the same
Scaper object to be used over and over again to generate new soundscapes
with the same underlying settings (e.g. `ref_db`, `num_channels`, and so on.)
See Also
--------
Scaper.reset_fg_event_spec : Same functionality but resets the foreground
event specification instead of all the lists.
'''
self.reset_fg_event_spec()
self.reset_bg_event_spec()
self.reset_global_used()
self.reset_global_used_labels()
self.reset_duration()
def set_random_state(self, random_state):
'''
Allows the user to set the random state after creating the Scaper object.
Parameters
----------
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by np.random.
'''
self.random_state = _check_random_state(random_state)
def add_background(self, label, source_file, source_time):
'''
Add a background recording to the background specification.
The background duration will be equal to the duration of the
soundscape ``Scaper.duration`` specified when initializing the Scaper
object. If the source file is shorter than this duration then it will
be concatenated to itself as many times as necessary to produce the
specified duration when calling ``Scaper.generate``.
Parameters
----------
label : tuple
Specifies the label of the background. See Notes below for the
expected format of this tuple and the allowed values.
NOTE: The label specified by this tuple must match one
of the labels in the Scaper's background label list
``Scaper.bg_labels``. Furthermore, if ``source_file`` is
specified using "const" (see Notes), then ``label`` must also be
specified using "const" and its value (see Notes) must
match the source file's parent folder's name.
source_file : tuple
Specifies the audio file to use as the source. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: If ``source_file`` is specified using "const" (see Notes),
then ``label`` must also be specified using "const" and its
value (see Notes) must match the source file's parent folder's
name.
source_time : tuple
Specifies the desired start time in the source file. See Notes
below for the expected format of this tuple and the allowed values.
NOTE: the source time specified by this tuple should be equal to or
smaller than ``<source file duration> - <soundscape duration>``.
Larger values will be automatically changed to fulfill this
requirement when calling ``Scaper.generate``.
Notes
-----
Each parameter of this function is set by passing a distribution
tuple, whose first item is always the distribution name and subsequent
items are distribution specific. The supported distribution tuples are:
* ``("const", value)`` : a constant, given by ``value``.
* ``("choose", valuelist)`` : choose a value from
``valuelist`` at random (uniformly). The ``label`` and
``source_file`` parameters also support providing an empty
``valuelist`` i.e. ``("choose", [])``, in which case the
value will be chosen at random from all available labels or files
as determined automatically by Scaper by examining the file
structure of ``bg_path`` provided during initialization.
* ``("uniform", min_value, max_value)`` : sample a random
value from a uniform distribution between ``min_value``
and ``max_value``.
* ``("normal", mean, stddev)`` : sample a random value from a
normal distribution defined by its mean ``mean`` and
standard deviation ``stddev``.
IMPORTANT: not all parameters support all distribution tuples. In
particular, ``label`` and ``source_file`` only support ``"const"`` and
``"choose"``, whereas ``source_time`` supports all distribution tuples.
As noted above, only ``label`` and ``source_file`` support providing an
empty ``valuelist`` with ``"choose"``.
'''
# These values are fixed for the background sound
event_time = ("const", 0)
event_duration = ("const", self.duration)
snr = ("const", 0)
role = 'background'
pitch_shift = None
time_stretch = None
# Validate parameter format and values
_validate_event(label, source_file, source_time, event_time,
event_duration, snr, self.bg_labels, None, None)
# Create background sound event
bg_event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role=role,
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Add event to background spec
self.bg_spec.append(bg_event)
def add_event(self, label, source_file, source_time, event_time,
event_duration, snr, pitch_shift, time_stretch):
'''
Add a foreground sound event to the foreground specification.
Parameters
----------
label : tuple
Specifies the label of the sound event. See Notes below for the
expected format of this tuple and the allowed values.
NOTE: The label specified by this tuple must match one
of the labels in the Scaper's foreground label list
``Scaper.fg_labels``. Furthermore, if ``source_file`` is
specified using "const" (see Notes), then ``label`` must also be
specified using "const" and its ``value `` (see Notes) must
match the source file's parent folder's name.
source_file : tuple
Specifies the audio file to use as the source. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: If ``source_file`` is specified using "const" (see Notes),
then ``label`` must also be specified using "const" and its
``value`` (see Notes) must match the source file's parent
folder's name.
source_time : tuple
Specifies the desired start time in the source file. See Notes
below for the expected format of this tuple and the allowed values.
NOTE: the source time specified by this tuple should be equal to or
smaller than ``<source file duration> - event_duration``. Larger
values will be automatically changed to fulfill this requirement
when calling ``Scaper.generate``.
event_time : tuple
Specifies the desired start time of the event in the soundscape.
See Notes below for the expected format of this tuple and the
allowed values.
NOTE: The value specified by this tuple should be equal to or
smaller than ``<soundscapes duration> - event_duration``, and
larger values will be automatically changed to fulfill this
requirement when calling ``Scaper.generate``.
event_duration : tuple
Specifies the desired duration of the event. See Notes below for
the expected format of this tuple and the allowed values.
NOTE: The value specified by this tuple should be equal to or
smaller than the source file's duration, and larger values will be
automatically changed to fulfill this requirement when calling
``Scaper.generate``.
snr : tuple
Specifies the desired signal to noise ratio (SNR) between the event
and the background. See Notes below for the expected format of
this tuple and the allowed values.
pitch_shift : tuple
Specifies the number of semitones to shift the event by. None means
no pitch shift.
time_stretch: tuple
Specifies the time stretch factor (value>1 will make it slower and
longer, value<1 will makes it faster and shorter).
Notes
-----
Each parameter of this function is set by passing a distribution
tuple, whose first item is always the distribution name and subsequent
items are distribution specific. The supported distribution tuples are:
* ``("const", value)`` : a constant, given by ``value``.
* ``("choose", valuelist)`` : choose a value from
``valuelist`` at random (uniformly). The ``label`` and
``source_file`` parameters also support providing an empty
``valuelist`` i.e. ``("choose", [])``, in which case the
value will be chosen at random from all available labels or
source files as determined automatically by Scaper by examining
the file structure of ``fg_path`` provided during
initialization.
* ``("uniform", min_value, max_value)`` : sample a random
value from a uniform distribution between ``min_value``
and ``max_value`` (including ``max_value``).
* ``("normal", mean, stddev)`` : sample a random value from a
normal distribution defined by its mean ``mean`` and
standard deviation ``stddev``.
IMPORTANT: not all parameters support all distribution tuples. In
particular, ``label`` and ``source_file`` only support ``"const"`` and
``"choose"``, whereas the remaining parameters support all distribution
tuples. As noted above, only ``label`` and ``source_file`` support
providing an empty ``valuelist`` with ``"choose"``.
See Also
--------
_validate_event : Check that event parameter values are valid.
Scaper.generate : Generate a soundscape based on the current
specification and save to disk as both an audio file and a JAMS file
describing the soundscape.
'''
# SAFETY CHECKS
_validate_event(label, source_file, source_time, event_time,
event_duration, snr, self.fg_labels, pitch_shift,
time_stretch)
# Create event
event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role='foreground',
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Add event to foreground specification
self.fg_spec.append(event)
def _instantiate_event(self, event, isbackground=False,
allow_repeated_label=True,
allow_repeated_source=True,
allow_global_repeated_source=True,
used_labels=[],
used_source_files=[],
disable_instantiation_warnings=False):
'''
Instantiate an event specification.
Given an event specification containing distribution tuples,
instantiate the event, i.e. samples values for the label, source_file,
source_time, event_time, event_duration and snr from their respective
distribution tuples, and return the sampled values in as a new event
specification.
Parameters
----------
event : EventSpec
Event specification containing distribution tuples.
isbackground : bool
Flag indicating whether the event to instantiate is a background
event or not (False implies it is a foreground event).
allow_repeated_label : bool
When True (default) any label can be used, including a label that
has already been used for another event. When False, only a label
that is not already in ``used_labels`` can be selected.
allow_repeated_source : bool
When True (default) any source file matching the selected label can
be used, including a source file that has already been used for
another event. When False, only a source file that is not already
in ``used_source_files`` can be selected.
allow_global_repeated_source : bool
When True (default) any source file matching the selected label can
be used, including a source file that has already been used for
another event. When False, only a source file that is not already
in ``used_source_files`` can be selected.
used_labels : list
List labels that have already been used in the current soundscape
instantiation. The label selected for instantiating the event will
be appended to this list unless its already in it.
used_source_files : list
List of full paths to source files that have already been used in
the current soundscape instantiation. The source file selected for
instantiating the event will be appended to this list unless its
already in it.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
instantiated_event : EventSpec
Event specification containing values sampled from the distribution
tuples of the input event specification.
Raises
------
ScaperError
If allow_repeated_source is False and there is no valid source file
to select.
'''
# set paths and labels depending on whether its a foreground/background
# event
if isbackground:
file_path = self.bg_path
allowed_labels = self.bg_labels
else:
file_path = self.fg_path
allowed_labels = self.fg_labels
# determine label
if event.label[0] == "choose" and not event.label[1]:
label_tuple = list(event.label)
label_tuple[1] = allowed_labels
label_tuple = tuple(label_tuple)
else:
label_tuple = event.label
label = _get_value_from_dist(label_tuple, self.random_state)
# Make sure we can use this label
if (not allow_repeated_label) and (label in used_labels):
if (len(allowed_labels) == len(used_labels) or
label_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available labels "
"have already been used and "
"allow_repeated_label=False.".format(label))
else:
while label in used_labels:
label = _get_value_from_dist(label_tuple, self.random_state)
# Make sure we can globally use this label
if (not allow_global_repeated_source) and (label in self.global_used_labels):
if (set(allowed_labels).issubset(set(self.global_used_labels)) or
label_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available labels "
"have already globally been used and "
"allow_global_repeated_source=False.".format(label))
else:
while label in self.global_used_labels:
label = _get_value_from_dist(label_tuple, self.random_state)
# Update the used labels list
if label not in used_labels:
used_labels.append(label)
# determine source file
if event.source_file[0] == "choose" and not event.source_file[1]:
source_files = _get_sorted_files(
os.path.join(file_path, label))
source_file_tuple = list(event.source_file)
source_file_tuple[1] = source_files
source_file_tuple = tuple(source_file_tuple)
else:
source_file_tuple = event.source_file
source_file = _get_value_from_dist(source_file_tuple, self.random_state)
# Make sure we can use this source file
if (not allow_repeated_source) and (source_file in used_source_files):
source_files = _get_sorted_files(os.path.join(file_path, label))
if (len(source_files) == len(used_source_files) or
source_file_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available source "
"files have already been used and "
"allow_repeated_source=False.".format(label))
else:
while source_file in used_source_files:
source_file = _get_value_from_dist(source_file_tuple, self.random_state)
# Make sure sure we can globally use this source file
if (not allow_global_repeated_source) and (source_file in self.global_used_source_files):
source_files = _get_sorted_files(os.path.join(file_path, label))
if (len(set(source_files) - set(self.global_used_source_files)) ==0
or source_file_tuple[0] == "const"):
raise ScaperError(
"Cannot instantiate event {:s}: all available source "
"files have globally already been used and "
"allow_repeated_source=False.".format(label))
else:
source_file_tuple = list(source_file_tuple)
source_file_tuple[1] = list(set(source_files)-set(self.global_used_source_files))
source_file_tuple = tuple(source_file_tuple)
while source_file in self.global_used_source_files:
source_file = _get_value_from_dist(source_file_tuple, self.random_state)
# Update the used source files list
if source_file not in used_source_files:
used_source_files.append(source_file)
# Update the global used source files list
if not allow_global_repeated_source:
self.global_used_source_files.append(source_file)
source_files = _get_sorted_files(os.path.join(file_path, label))
# Update global used labels list
if set(source_files).issubset(set(self.global_used_source_files)):
self.global_used_labels.append(label)
# Get the duration of the source audio file
source_duration = soundfile.info(source_file).duration
# If this is a background event, the event duration is the
# duration of the soundscape.
if isbackground:
event_duration = self.duration
# If the foreground event's label is in the forced_protected list,
# use the source file's duration without modification and change the
# soundscape duration to match the longest foreground
elif label in self.forced_protected_labels:
event_duration = source_duration
if self.duration < source_duration:
self.duration = source_duration
# If the foreground event's label is in the protected list, use the
# source file's duration without modification.
elif label in self.protected_labels:
event_duration = source_duration
else:
# determine event duration
# For background events the duration is fixed to self.duration
# (which must be > 0), but for foreground events it could
# potentially be non-positive, hence the loop.
event_duration = -np.Inf
while event_duration <= 0:
event_duration = _get_value_from_dist(
event.event_duration, self.random_state
)
# Check if chosen event duration is longer than the duration of the
# selected source file, if so adjust the event duration.
if (event_duration > source_duration):
old_duration = event_duration # for warning
event_duration = source_duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) is greater that source "
"duration ({:.2f}), changing to {:.2f}".format(
label, old_duration, source_duration, event_duration),
ScaperWarning)
# Get time stretch value
if event.time_stretch is None:
time_stretch = None
event_duration_stretched = event_duration
else:
time_stretch = -np.Inf
while time_stretch <= 0:
time_stretch = _get_value_from_dist(
event.time_stretch, self.random_state
)
# compute duration after stretching
event_duration_stretched = event_duration * time_stretch
# If the event duration is longer than the soundscape we can trim it
# without losing validity (since the event will end when the soundscape
# ends).
if time_stretch is None:
if event_duration > self.duration:
old_duration = event_duration # for warning
event_duration = self.duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) is greater than the "
"soundscape duration ({:.2f}), changing to "
"{:.2f}".format(
label, old_duration, self.duration, self.duration),
ScaperWarning)
else:
if event_duration_stretched > self.duration:
old_duration = event_duration # for warning
event_duration = self.duration / float(time_stretch)
event_duration_stretched = self.duration
if not disable_instantiation_warnings:
warnings.warn(
"{:s} event duration ({:.2f}) with stretch factor "
"{:.2f} gives {:.2f} which is greater than the "
"soundscape duration ({:.2f}), changing to "
"{:.2f} ({:.2f} after time stretching)".format(
label, old_duration, time_stretch,
old_duration * time_stretch, self.duration,
event_duration, event_duration_stretched),
ScaperWarning)
# Modify event.source_time so that sampling from the source time distribution
# stays within the bounds of the audio file - event_duration. This allows users
# to sample from anywhere in a source file without knowing the exact duration
# of every source file. Only modify if label is not in protected labels.
if label not in self.protected_labels:
tuple_still_invalid = False
modified_source_time, warn = _ensure_satisfiable_source_time_tuple(
event.source_time, source_duration, event_duration
)
# determine source time and also check again just in case (for normal dist).
# if it happens again, just use the old method.
source_time = -np.Inf
while source_time < 0:
source_time = _get_value_from_dist(
modified_source_time, self.random_state)
if source_time + event_duration > source_duration:
source_time = max(0, source_duration - event_duration)
warn = True
tuple_still_invalid = True
if warn and not disable_instantiation_warnings:
old_source_time = ', '.join(map(str, event.source_time))
new_source_time = ', '.join(map(str, modified_source_time))
if not tuple_still_invalid:
warnings.warn(
"{:s} source time tuple ({:s}) could not be satisfied given "
"source duration ({:.2f}) and event duration ({:.2f}), "
"source time tuple changed to ({:s})".format(
label, old_source_time, source_duration,
event_duration, new_source_time),
ScaperWarning)
else:
warnings.warn(
"{:s} source time tuple ({:s}) could not be satisfied given "
"source duration ({:.2f}) and event duration ({:.2f}), "
"source time tuple changed to ({:s}) but was still not "
"satisfiable, likely due to using 'normal' distribution with "
"bounds too close to the start or end of the audio file".format(
label, old_source_time, source_duration,
event_duration, new_source_time),
ScaperWarning)
else:
source_time = 0.0
# determine event time
# for background events the event time is fixed to 0, but for
# foreground events it's not.
event_time = -np.Inf
while event_time < 0:
event_time = _get_value_from_dist(
event.event_time, self.random_state
)
# Make sure the selected event time + event duration are is not greater
# than the total duration of the soundscape, if it is adjust the event
# time. This means event duration takes precedence over the event
# start time.
if time_stretch is None:
if event_time + event_duration > self.duration:
old_event_time = event_time
event_time = self.duration - event_duration
if not disable_instantiation_warnings:
warnings.warn(
'{:s} event time ({:.2f}) is too great given event '
'duration ({:.2f}) and soundscape duration ({:.2f}), '
'changed to {:.2f}.'.format(
label, old_event_time, event_duration,
self.duration, event_time),
ScaperWarning)
else:
if event_time + event_duration_stretched > self.duration:
old_event_time = event_time
event_time = self.duration - event_duration_stretched
if not disable_instantiation_warnings:
warnings.warn(
'{:s} event time ({:.2f}) is too great given '
'stretched event duration ({:.2f}) and soundscape '
'duration ({:.2f}), changed to {:.2f}.'.format(
label, old_event_time, event_duration_stretched,
self.duration, event_time),
ScaperWarning)
# determine snr
snr = _get_value_from_dist(event.snr, self.random_state)
# get role (which can only take "foreground" or "background" and
# is set internally, not by the user).
role = event.role
# determine pitch_shift
if event.pitch_shift is not None:
pitch_shift = _get_value_from_dist(event.pitch_shift, self.random_state)
else:
pitch_shift = None
# pack up instantiated values in an EventSpec
instantiated_event = EventSpec(label=label,
source_file=source_file,
source_time=source_time,
event_time=event_time,
event_duration=event_duration,
snr=snr,
role=role,
pitch_shift=pitch_shift,
time_stretch=time_stretch)
# Return
return instantiated_event
def _instantiate(self, allow_repeated_label=True,
allow_repeated_source=True,
allow_global_repeated_source=True,
reverb=None,
disable_instantiation_warnings=False):
'''
Instantiate a specific soundscape in JAMS format based on the current
specification.
Any non-deterministic event values (i.e. distribution tuples) will be
sampled randomly from based on the distribution parameters.
Parameters
----------
allow_repeated_label : bool
When True (default) the same label can be used more than once
in a soundscape instantiation. When False every label can
only be used once.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in a soundscape instantiation. When False every source file can
only be used once.
reverb : float or None
Has no effect on this function other than being documented in the
instantiated annotation's sandbox. Passed by ``Scaper.generate``.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
jam : JAMS object
A JAMS object containing a scaper annotation representing the
instantiated soundscape.
See Also
--------
Scaper.generate
'''
jam = jams.JAMS()
ann = jams.Annotation(namespace='scaper')
# Set annotation duration (might be changed later due to cropping)
ann.duration = self.duration
# INSTANTIATE BACKGROUND AND FOREGROUND EVENTS AND ADD TO ANNOTATION
# NOTE: logic for instantiating bg and fg events is NOT the same.
# Add background sounds
bg_labels = []
bg_source_files = []
for event in self.bg_spec:
value = self._instantiate_event(
event,
isbackground=True,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
used_labels=bg_labels,
used_source_files=bg_source_files,
disable_instantiation_warnings=disable_instantiation_warnings)
# Note: add_background doesn't allow to set a time_stretch, i.e.
# it's hardcoded to time_stretch=None, so we don't need to check
# if value.time_stretch is not None, since it always will be.
ann.append(time=value.event_time,
duration=value.event_duration,
value=value._asdict(),
confidence=1.0)
# Add foreground events
fg_labels = []
fg_source_files = []
for event in self.fg_spec:
value = self._instantiate_event(
event,
isbackground=False,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
used_labels=fg_labels,
used_source_files=fg_source_files,
disable_instantiation_warnings=disable_instantiation_warnings)
if value.time_stretch is not None:
event_duration_stretched = (
value.event_duration * value.time_stretch)
else:
event_duration_stretched = value.event_duration
ann.append(time=value.event_time,
duration=event_duration_stretched,
value=value._asdict(),
confidence=1.0)
# Compute max polyphony
poly = max_polyphony(ann)
# Compute the number of foreground events
n_events = len(self.fg_spec)
# Compute gini
gini = polyphony_gini(ann)
# Add specs and other info to sandbox
ann.sandbox.scaper = jams.Sandbox(
duration=self.duration,
original_duration=self.duration,
fg_path=self.fg_path,
bg_path=self.bg_path,
fg_spec=self.fg_spec,
bg_spec=self.bg_spec,
fg_labels=self.fg_labels,
bg_labels=self.bg_labels,
protected_labels=self.protected_labels,
# TODO : add forced_protected_labels to the annotation and edit
# tests accordingly
# forced_protected_labels=self.forced_protected_labels,
sr=self.sr,
ref_db=self.ref_db,
n_channels=self.n_channels,
fade_in_len=self.fade_in_len,
fade_out_len=self.fade_out_len,
n_events=n_events,
polyphony_max=poly,
polyphony_gini=gini,
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
allow_global_repeated_source=allow_global_repeated_source,
reverb=reverb,
scaper_version=scaper_version,
soundscape_audio_path=None,
isolated_events_audio_path=[],
# Initialize missing generate parameters
audio_path=None,
jams_path=None,
fix_clipping=None,
peak_normalization=None,
save_isolated_events=None,
isolated_events_path=None,
disable_sox_warnings=None,
no_audio=None,
txt_path=None,
txt_sep=None,
disable_instantiation_warnings=None,
peak_normalization_scale_factor=None,
ref_db_change=None,
ref_db_generated=None)
# Add annotation to jams
jam.annotations.append(ann)
# Set jam metadata
jam.file_metadata.duration = ann.duration
# Return
return jam
def _generate_audio(self,
audio_path,
ann,
reverb=None,
fix_clipping=False,
peak_normalization=False,
quick_pitch_time=False,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True):
'''
Generate audio based on a scaper annotation and save to disk.
Parameters
----------
audio_path : str
Path for saving soundscape audio file.
ann : jams.Annotation
Annotation of the scaper namespace.
reverb : float or None
Amount of reverb to apply to the generated soundscape between 0
(no reverberation) and 1 (maximum reverberation). Use None
(default) to prevent the soundscape from going through the reverb
module at all.
fix_clipping: bool
When True (default=False), checks the soundscape audio for clipping
(abs(sample) > 1). If so, the soundscape waveform is peak normalized,
i.e., scaled such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The scaling
factor that was used is returned.
peak_normalization : bool
When True (default=False), normalize the generated soundscape audio
such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The scaling
factor that was used is returned.
quick_pitch_time : bool
When True (default=False), time stretching and pitch shifting will be
applied with `quick=True`. This is much faster but the resultant
audio is generally of lower audio quality.
save_isolated_events : bool
If True, this will save the isolated foreground events and
backgrounds in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The
audio of the isolated events sum up to the mixture if reverb is not
applied. Isolated events can be found (by default) at
`<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`,
where idx is the index of the isolated event in self.fg_spec or
self.bg_spec (this allows events of the same label to be added more
than once to the soundscape without breaking things). Role is
"background" or "foreground". For example: `foreground0_siren.wav`
or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_path parent folder>/<audio_path name>_events`.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
scale_factor : float
If peak_normalization is True, or fix_clipping is True and the
soundscape audio needs to be scaled to avoid clipping, scale_factor
is the value used to scale the soundscape audio and the audio of the
isolated events. Otherwise will return 1.0.
ref_db_change : float
The change (in dB) to the soundscape audio's ref_db if peak
normalization is applied to fix clipping or because the user
specified it. Otherwise will return 0.
Raises
------
ScaperError
If annotation is not of the scpaper namespace.
See Also
--------
Scaper.generate
'''
if ann.namespace != 'scaper':
raise ScaperError(
'Annotation namespace must be scaper, found: {:s}'.format(
ann.namespace))
# disable sox warnings
if disable_sox_warnings:
temp_logging_level = 'CRITICAL' # only critical messages please
else:
temp_logging_level = logging.getLogger().level
# List for storing all generated audio (one array for every event)
soundscape_audio = None
event_audio_list = []
scale_factor = 1.0
ref_db_change = 0
with _set_temp_logging_level(temp_logging_level):
isolated_events_audio_path = []
duration_in_samples = int(self.duration * self.sr)
for i, e in enumerate(ann.data):
if e.value['role'] == 'background':
# Concatenate background if necessary.
source_duration = soundfile.info(e.value['source_file']).duration
ntiles = int(
max(self.duration // source_duration + 1, 1))
# Create transformer
tfm = sox.Transformer()
# Ensure consistent sampling rate and channels
# Need both a convert operation (to do the conversion),
# and set_output_format (to have sox interpret the output
# correctly).
tfm.convert(
samplerate=self.sr,
n_channels=self.n_channels,
bitdepth=None
)
tfm.set_output_format(
rate=self.sr,
channels=self.n_channels
)
# PROCESS BEFORE COMPUTING LUFS
tmpfiles_internal = []
with _close_temp_files(tmpfiles_internal):
# create internal tmpfile
tmpfiles_internal.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# read in background off disk, using start and stop
# to only read the necessary audio
event_sr = soundfile.info(e.value['source_file']).samplerate
start = int(e.value['source_time'] * event_sr)
stop = int((e.value['source_time'] + e.value['event_duration']) * event_sr)
event_audio, event_sr = soundfile.read(
e.value['source_file'], always_2d=True,
start=start, stop=stop)
# tile the background along the appropriate dimensions
event_audio = np.tile(event_audio, (ntiles, 1))
event_audio = event_audio[:stop]
event_audio = tfm.build_array(
input_array=event_audio,
sample_rate_in=event_sr
)
event_audio = event_audio.reshape(-1, self.n_channels)
# NOW compute LUFS
bg_lufs = get_integrated_lufs(event_audio, self.sr)
# Normalize background to reference DB.
gain = self.ref_db - bg_lufs
event_audio = np.exp(gain * np.log(10) / 20) * event_audio
event_audio_list.append(event_audio[:duration_in_samples])
elif e.value['role'] == 'foreground':
# Create transformer
tfm = sox.Transformer()
# Ensure consistent sampling rate and channels
# Need both a convert operation (to do the conversion),
# and set_output_format (to have sox interpret the output
# correctly).
tfm.convert(
samplerate=self.sr,
n_channels=self.n_channels,
bitdepth=None
)
tfm.set_output_format(
rate=self.sr,
channels=self.n_channels
)
# Pitch shift
if e.value['pitch_shift'] is not None:
tfm.pitch(e.value['pitch_shift'], quick=quick_pitch_time)
# Time stretch
if e.value['time_stretch'] is not None:
factor = 1.0 / float(e.value['time_stretch'])
tfm.tempo(factor, audio_type='s', quick=quick_pitch_time)
# PROCESS BEFORE COMPUTING LUFS
tmpfiles_internal = []
with _close_temp_files(tmpfiles_internal):
# create internal tmpfile
tmpfiles_internal.append(
tempfile.NamedTemporaryFile(
suffix='.wav', delete=False))
# synthesize edited foreground sound event,
# doing the trim via soundfile
event_sr = soundfile.info(e.value['source_file']).samplerate
start = int(e.value['source_time'] * event_sr)
stop = int((e.value['source_time'] + e.value['event_duration']) * event_sr)
event_audio, event_sr = soundfile.read(
e.value['source_file'], always_2d=True,
start=start, stop=stop)
event_audio = tfm.build_array(
input_array=event_audio,
sample_rate_in=event_sr
)
event_audio = event_audio.reshape(-1, self.n_channels)
# NOW compute LUFS
fg_lufs = get_integrated_lufs(event_audio, self.sr)
# Normalize to specified SNR with respect to
# background
gain = self.ref_db + e.value['snr'] - fg_lufs
event_audio = np.exp(gain * np.log(10) / 20) * event_audio
# Apply short fade in and out
# (avoid unnatural sound onsets/offsets)
if self.fade_in_len > 0:
fade_in_samples = int(self.fade_in_len * self.sr)
fade_in_window = np.sin(np.linspace(0, np.pi / 2, fade_in_samples))[..., None]
event_audio[:fade_in_samples] *= fade_in_window
if self.fade_out_len > 0:
fade_out_samples = int(self.fade_out_len * self.sr)
fade_out_window = np.sin(np.linspace(np.pi / 2, 0, fade_out_samples))[..., None]
event_audio[-fade_out_samples:] *= fade_out_window
# Pad with silence before/after event to match the
# soundscape duration
prepad = int(self.sr * e.value['event_time'])
postpad = max(0, duration_in_samples - (event_audio.shape[0] + prepad))
event_audio = np.pad(event_audio, ((prepad, postpad), (0, 0)),
mode='constant', constant_values=(0, 0))
event_audio = event_audio[:duration_in_samples]
event_audio_list.append(event_audio[:duration_in_samples])
else:
raise ScaperError(
'Unsupported event role: {:s}'.format(
e.value['role']))
# Finally combine all the files and optionally apply reverb.
# If there are no events, throw a warning.
if len(event_audio_list) == 0:
warnings.warn(
"No events to synthesize (silent soundscape), no audio "
"generated.", ScaperWarning)
else:
# Sum all events to get soundscape audio
soundscape_audio = sum(event_audio_list)
# Check for clipping and fix [optional]
max_sample = np.max(np.abs(soundscape_audio))
clipping = max_sample > 1
if clipping:
warnings.warn('Soundscape audio is clipping!',
ScaperWarning)
if peak_normalization or (clipping and fix_clipping):
# normalize soundscape audio and scale event audio
soundscape_audio, event_audio_list, scale_factor = \
peak_normalize(soundscape_audio, event_audio_list)
ref_db_change = 20 * np.log10(scale_factor)
if clipping and fix_clipping:
warnings.warn(
'Peak normalization applied to fix clipping with '
'scale factor = {}. The actual ref_db of the '
'generated soundscape audio will change by '
'approximately {:.2f}dB with respect to the target '
'ref_db of {})'.format(
scale_factor, ref_db_change, self.ref_db),
ScaperWarning)
if scale_factor < 0.05:
warnings.warn(
'Scale factor for peak normalization is extreme '
'(<0.05), event SNR values in the generated soundscape '
'audio may not perfectly match their specified values.',
ScaperWarning
)
# Optionally apply reverb
# NOTE: must apply AFTER peak normalization: applying reverb
# to a clipping signal with sox and then normalizing doesn't
# work as one would hope.
if reverb is not None:
tfm = sox.Transformer()
tfm.reverb(reverberance=reverb * 100)
soundscape_audio = tfm.build_array(
input_array=soundscape_audio,
sample_rate_in=self.sr,
)
# Reshape to ensure data are 2d
soundscape_audio = soundscape_audio.reshape(-1, self.n_channels)
# Optionally save soundscape audio to disk
if audio_path is not None:
soundfile.write(audio_path, soundscape_audio, self.sr,
subtype='PCM_32')
# Optionally save isolated events to disk
if save_isolated_events:
base, ext = os.path.splitext(audio_path)
if isolated_events_path is None:
event_folder = '{:s}_events'.format(base)
else:
event_folder = isolated_events_path
os.makedirs(event_folder, exist_ok=True)
iso_idx = 0
role_counter = {'background': 0, 'foreground': 0}
for i, e in enumerate(ann.data):
_role_count = role_counter[e.value['role']]
event_audio_path = os.path.join(
event_folder,
'{:s}{:d}_{:s}{:s}'.format(
e.value['role'], _role_count, e.value['label'], ext))
role_counter[e.value['role']] += 1
soundfile.write(event_audio_path, event_audio_list[iso_idx], self.sr, subtype='PCM_32')
isolated_events_audio_path.append(event_audio_path)
iso_idx += 1
# TODO what do we do in this case? for now throw a warning
if reverb is not None:
warnings.warn(
"Reverb is on and save_isolated_events is True. Reverberation "
"is applied to the mixture but not output "
"source files. In this case the sum of the "
"audio of the isolated events will not add up to the "
"mixture", ScaperWarning)
# Document output paths
# TODO: this is redundant with audio_path and isolated_events_path that
# are also stored in ann.sandbox.scaper. For now we're keeping these
# here for now for backwards compatibility e.g. with FUSS. Eventually
# we should remove these two lines and consolidate how/where JAMS
# metadata is stored (cf. generate() and generate_from_jams()).
ann.sandbox.scaper.soundscape_audio_path = audio_path
ann.sandbox.scaper.isolated_events_audio_path = isolated_events_audio_path
# Return audio for in-memory processing
return soundscape_audio, event_audio_list, scale_factor, ref_db_change
def generate(self,
audio_path=None,
jams_path=None,
allow_repeated_label=True,
allow_repeated_source=True,
allow_global_repeated_source=True,
reverb=None,
fix_clipping=False,
peak_normalization=False,
quick_pitch_time=False,
save_isolated_events=False,
isolated_events_path=None,
disable_sox_warnings=True,
no_audio=False,
txt_path=None,
txt_sep='\t',
disable_instantiation_warnings=False):
"""
Generate a soundscape based on the current specification and return as
an audio file, a JAMS annotation, a simplified annotation list, and a
list containing the audio samples of each individual background and
foreground event. If output paths are provided, these objects will also
be saved to disk.
Parameters
----------
audio_path : str
Path for saving soundscape audio to disk. If None, does not save
audio to disk.
jams_path : str
Path for saving soundscape jams annotation to disk. If None, does
not save JAMS to disk.
allow_repeated_label : bool
When True (default) the same label can be used more than once
in a soundscape instantiation. When False every label can
only be used once.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in a soundscape instantiation. When False every source file can
only be used once.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in all soundscape instantiation. When False every source file can
only be used in one soundscape.
allow_repeated_source : bool
When True (default) the same source file can be used more than once
in all soundscape instantiation. When False every source file can
only be used in all soundscape.
reverb : float or None
Amount of reverb to apply to the generated soundscape between 0
(no reverberation) and 1 (maximum reverberation). Use None
(default) to prevent the soundscape from going through the reverb
module at all.
fix_clipping: bool
When True (default=False), checks the soundscape audio for clipping
(abs(sample) > 1). If so, the soundscape waveform is peak normalized,
i.e., scaled such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The updated
`ref_db` value will be stored in the JAMS annotation. The SNR of
foreground events with respect to the background is unaffected except
when extreme scaling is required to prevent clipping.
peak_normalization : bool
When True (default=False), normalize the generated soundscape audio
such that max(abs(soundscape_audio)) = 1. The audio for
each isolated event is also scaled accordingly. Note: this will change
the actual value of `ref_db` in the generated audio. The updated
`ref_db` value will be stored in the JAMS annotation. The SNR of
foreground events with respect to the background is unaffected except
when extreme scaling is required to achieve peak normalization.
quick_pitch_time : bool
When True (default=False), time stretching and pitch shifting will be
applied with `quick=True`. This is much faster but the resultant
audio is generally of lower audio quality.
save_isolated_events : bool
If True, this will save the isolated foreground events and
backgrounds in a directory adjacent to the generated soundscape
mixture, or to the path defined by `isolated_events_path`. The
audio of the isolated events sum up to the mixture if reverb is not
applied. Isolated events can be found (by default) at
`<audio_outfile parent folder>/<audio_outfile name>_events`.
Isolated event file names follow the pattern: `<role><idx>_<label>`,
where count is the index of the isolated event in self.fg_spec or
self.bg_spec (this allows events of the same label to be added more
than once to the soundscape without breaking things). Role is
"background" or "foreground". For example: `foreground0_siren.wav`
or `background0_park.wav`.
isolated_events_path : str
Path to folder for saving isolated events. If None, defaults to
`<audio_path parent folder>/<audio_path name>_events`. Only relevant
if save_isolated_events=True.
disable_sox_warnings : bool
When True (default), warnings from the pysox module are suppressed
unless their level is ``'CRITICAL'``. If you're experiencing issues related
to audio I/O setting this parameter to False may help with debugging.
no_audio : bool
If True this function will only generates a JAMS file but will not
generate any audio (neither in memory nor saved to disk). Useful for
efficiently generating a large number of soundscape JAMS for
later synthesis via `generate_from_jams()`.
txt_path: str or None
Path for saving a simplified annotation in a space separated format
[onset offset label] where onset and offset are in seconds. Good
for loading labels in e.g. Audacity. If None, does not save txt
annotation to disk.
txt_sep: str
The separator to use when saving a simplified annotation as a text
file (default is tab for compatibility with Audacity label files).
Only relevant if txt_path is not None.
disable_instantiation_warnings : bool
When True (default is False), warnings stemming from event
instantiation (primarily about automatic duration adjustments) are
disabled. Not recommended other than for testing purposes.
Returns
-------
soundscape_audio : np.ndarray
The audio samples of the generated soundscape. Returns None if
no_audio=True.
soundscape_jam: jams.JAMS
The JAMS object containing the full soundscape annotation.
annotation_list : list
A simplified annotation in a space-separated format
[onset offset label] where onset and offset are in seconds.
event_audio_list: list
A list of np.ndarrays containing the audio samples of every
individual background and foreground sound event. Events are listed
in the same order in which they appear in the jams annotations data
list, and can be matched with:
`for obs, event_audio in zip(ann.data, event_audio_list): ...`.
Raises
------
ScaperError
If the reverb parameter is passed an invalid value.
See Also
--------
Scaper.generate_from_jams
Scaper._instantiate
Scaper._generate_audio
Args:
allow_global_repeated_source:
"""
# Check parameter validity
if reverb is not None:
if not (0 <= reverb <= 1):
raise ScaperError(
'Invalid value for reverb: must be in range [0, 1] or '
'None.')
# Create specific instance of a soundscape based on the spec
soundscape_jam = self._instantiate(
allow_repeated_label=allow_repeated_label,
allow_repeated_source=allow_repeated_source,
allow_global_repeated_source=allow_global_repeated_source,
reverb=reverb,
disable_instantiation_warnings=disable_instantiation_warnings)
ann = soundscape_jam.annotations.search(namespace='scaper')[0]
soundscape_audio, event_audio_list = None, None
# Generate the audio and save to disk
scale_factor = 1.0
ref_db_change = 0
if not no_audio:
soundscape_audio, event_audio_list, scale_factor, ref_db_change = \
self._generate_audio(audio_path, ann,
reverb=reverb,
save_isolated_events=save_isolated_events,
isolated_events_path=isolated_events_path,
disable_sox_warnings=disable_sox_warnings,
fix_clipping=fix_clipping,
peak_normalization=peak_normalization,
quick_pitch_time=quick_pitch_time)
# TODO: Stick to heavy handed overwriting for now, in the future we
# should consolidate this with what happens inside _instantiate().
ann.sandbox.scaper.audio_path = audio_path
ann.sandbox.scaper.jams_path = jams_path
ann.sandbox.scaper.allow_repeated_label = allow_repeated_label
ann.sandbox.scaper.allow_repeated_source = allow_repeated_source
ann.sandbox.scaper.allow_global_repeated_source = allow_global_repeated_source
ann.sandbox.scaper.reverb = reverb
ann.sandbox.scaper.fix_clipping = fix_clipping
ann.sandbox.scaper.peak_normalization = peak_normalization
ann.sandbox.scaper.quick_pitch_time = quick_pitch_time
ann.sandbox.scaper.save_isolated_events = save_isolated_events
ann.sandbox.scaper.isolated_events_path = isolated_events_path
ann.sandbox.scaper.disable_sox_warnings = disable_sox_warnings
ann.sandbox.scaper.no_audio = no_audio
ann.sandbox.scaper.txt_path = txt_path
ann.sandbox.scaper.txt_sep = txt_sep
ann.sandbox.scaper.disable_instantiation_warnings = disable_instantiation_warnings
ann.sandbox.scaper.peak_normalization_scale_factor = scale_factor
ann.sandbox.scaper.ref_db_change = ref_db_change
ann.sandbox.scaper.ref_db_generated = self.ref_db + ref_db_change
# Save JAMS to disk too
if jams_path is not None:
soundscape_jam.save(jams_path)
# Create annotation list
annotation_list = []
for obs in ann.data:
if obs.value['role'] == 'foreground':
annotation_list.append(
[obs.time, obs.time + obs.duration, obs.value['label']])
if txt_path is not None:
with open(txt_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=txt_sep)
writer.writerows(annotation_list)
# Return
return soundscape_audio, soundscape_jam, annotation_list, event_audio_list
``` |
{
"source": "JorisDeRieck/hass-nhc2",
"score": 2
} |
#### File: custom_components/nhc2/light.py
```python
import logging
from homeassistant.components.light import LightEntity, SUPPORT_BRIGHTNESS, ATTR_BRIGHTNESS
from nhc2_coco import CoCoLight, CoCo
from nhc2_coco.coco_device_class import CoCoDeviceClass
from .const import DOMAIN, KEY_GATEWAY, BRAND, LIGHT
from .helpers import nhc2_entity_processor
KEY_GATEWAY = KEY_GATEWAY
KEY_ENTITY = 'nhc2_lights'
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load NHC2 lights based on a config entry."""
hass.data.setdefault(KEY_ENTITY, {})[config_entry.entry_id] = []
gateway: CoCo = hass.data[KEY_GATEWAY][config_entry.entry_id]
_LOGGER.debug('Platform is starting')
gateway.get_devices(CoCoDeviceClass.LIGHTS,
nhc2_entity_processor(hass,
config_entry,
async_add_entities,
KEY_ENTITY,
lambda x: NHC2HassLight(x))
)
class NHC2HassLight(LightEntity):
"""Representation of an NHC2 Light."""
def __init__(self, nhc2light: CoCoLight, optimistic=True):
"""Initialize a light."""
self._nhc2light = nhc2light
self._optimistic = optimistic
self._is_on = nhc2light.is_on
if self._nhc2light.support_brightness:
if self._is_on is False:
self._brightness = 0
else:
self._brightness = round(self._nhc2light.brightness * 2.55)
else:
self._brightness = None
nhc2light.on_change = self._on_change
def _on_change(self):
self._is_on = self._nhc2light.is_on
if self._nhc2light.support_brightness:
if self._is_on is False:
self._brightness = 0
else:
self._brightness = round(self._nhc2light.brightness * 2.55)
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Pass - not in use."""
pass
def turn_on(self, **kwargs) -> None:
"""Pass - not in use."""
pass
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self._nhc2light.turn_on()
brightness = kwargs.get(ATTR_BRIGHTNESS)
if self._nhc2light.support_brightness and brightness is not None:
self._nhc2light.set_brightness(round((brightness) / 2.55))
if self._optimistic:
self._is_on = True
self.schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._nhc2light.turn_off()
if self._optimistic:
self._is_on = False
self.schedule_update_ha_state()
def nhc2_update(self, nhc2light: CoCoLight):
"""Update the NHC2 light with a new object."""
self._nhc2light = nhc2light
nhc2light.on_change = self._on_change
self.schedule_update_ha_state()
@property
def unique_id(self):
"""Return the lights UUID."""
return self._nhc2light.uuid
@property
def uuid(self):
"""Return the lights UUID."""
return self._nhc2light.uuid
@property
def should_poll(self):
"""Return false, since the light will push state."""
return False
@property
def name(self):
"""Return the lights name."""
return self._nhc2light.name
@property
def available(self):
"""Return true if the light is online."""
return self._nhc2light.online
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if the light is on."""
return self._is_on
@property
def device_info(self):
"""Return the device info."""
return {
'identifiers': {
(DOMAIN, self.unique_id)
},
'name': self.name,
'manufacturer': BRAND,
'model': LIGHT,
'via_hub': (DOMAIN, self._nhc2light.profile_creation_id),
}
@property
def supported_features(self):
"""Return supported features."""
if self._nhc2light.support_brightness:
return SUPPORT_BRIGHTNESS
return 0
``` |
{
"source": "jorisfa/gcpdiag",
"score": 2
} |
#### File: gcpdiag/gcpdiag/caching.py
```python
import atexit
import collections
import contextlib
import functools
import hashlib
import logging
import pickle
import shutil
import tempfile
import threading
from typing import List
import diskcache
from gcpdiag import config
_cache = None
def _clean_cache():
"""Remove all cached items with tag 'tmp'.
We use 'tmp' to store data that should be cached only during a single
execution of the script.
"""
if _cache:
count = _cache.evict('tmp')
count += _cache.expire()
if count:
logging.debug('removed %d items from cache', count)
def _close_cache():
if _cache:
_clean_cache()
_cache.close()
def get_cache() -> diskcache.Cache:
"""Get a Diskcache.Cache object that can be used to cache data."""
global _cache
if not _cache:
_cache = diskcache.Cache(config.CACHE_DIR, tag_index=True)
# Make sure that we remove any data that wasn't cleaned up correctly for
# some reason.
_clean_cache()
# Cleanup the cache at program exit.
atexit.register(_close_cache)
return _cache
deque_tmpdirs: List[str] = []
def _clean_tmp_deque():
for d in deque_tmpdirs:
logging.debug('deleting dequeue tempdir: %s', d)
shutil.rmtree(d, ignore_errors=True)
def get_tmp_deque(prefix='tmp-deque-') -> diskcache.Deque:
"""Get a Diskcache.Deque object useful to temporily store data (like logs).
arguments:
prefix: prefix to be added to the temporary directory (default: tmp-deque)
"""
tempdir = tempfile.mkdtemp(prefix=prefix, dir=config.CACHE_DIR)
if not deque_tmpdirs:
atexit.register(_clean_tmp_deque)
deque_tmpdirs.append(tempdir)
deque = diskcache.Deque(directory=tempdir)
return deque
# Write our own implementation instead of using private function
# functtools._make_key, so that there is no breakage if that
# private function changes with a newer Python version.
def _make_key(func, args, kwargs):
h = hashlib.sha256()
func_name = bytes(func.__module__ + '.' + func.__name__ + ':', 'utf-8')
h.update(pickle.dumps(args))
h.update(pickle.dumps(kwargs))
# we don't hash the function name so that it's easier to debug
key = func_name + h.digest()
return key
@contextlib.contextmanager
def _acquire_timeout(lock, timeout, name):
result = lock.acquire(timeout=timeout)
if not result:
raise RuntimeError(f"Couldn't aquire lock for {name}.")
try:
yield
finally:
if result:
lock.release()
def cached_api_call(expire=None, in_memory=False):
"""Caching decorator optimized for API calls.
This is very similar to functools.lru_cache, with the following differences:
- uses diskcache so that the memory footprint doesn't grow uncontrollably (the
API results might be big).
- uses a lock so that if the function is called from two threads
simultaneously, only one API call will be done and the other will wait until
the result is available in the cache.
Parameters:
- expire: number of seconds until the key expires (default: expire when the
process ends)
- in_memory: if true the result will be kept in memory, similarly to
lru_cache (but with the locking).
"""
def _cached_api_call_decorator(func):
lockdict = collections.defaultdict(threading.Lock)
if in_memory:
lru_cached_func = functools.lru_cache()(func)
@functools.wraps(func)
def _cached_api_call_wrapper(*args, **kwargs):
key = _make_key(func, args, kwargs)
lock = lockdict[key]
with _acquire_timeout(lock, config.CACHE_LOCK_TIMEOUT, func.__name__):
if in_memory:
return lru_cached_func(*args, **kwargs)
else:
api_cache = get_cache()
# We use 'no data' to be able to cache calls that returned None.
cached_result = api_cache.get(key, default='no data')
if cached_result != 'no data':
logging.debug('returning cached result for %s', func.__name__)
return cached_result
logging.debug('calling function %s (expire=%s, key=%s)',
func.__name__, expire, key)
result = func(*args, **kwargs)
if expire:
api_cache.set(key, result, expire=expire)
else:
api_cache.set(key, result, tag='tmp')
return result
return _cached_api_call_wrapper
# Decorator without parens -> called with function as first parameter
if callable(expire):
func = expire
expire = None
return _cached_api_call_decorator(func)
else:
return _cached_api_call_decorator
```
#### File: lint/gce/warn_2022_001_iap_tcp_forwarding.py
```python
import ipaddress
from gcpdiag import lint, models
from gcpdiag.queries import gce
VERIFY_PORTS = { #
'ssh': 22,
'rdp': 3389
}
IAP_SOURCE_NETWORK = ipaddress.ip_network('172.16.17.32/20')
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
instances = gce.get_instances(context).values()
if len(instances) == 0:
report.add_skipped(None, 'No instances found')
else:
for instance in sorted(instances, key=lambda i: i.name):
network = instance.network
port = VERIFY_PORTS['ssh']
if instance.is_windows_machine():
port = VERIFY_PORTS['rdp']
result = network.firewall.check_connectivity_ingress(
src_ip=IAP_SOURCE_NETWORK,
ip_protocol='tcp',
port=port,
target_service_account=instance.service_account,
target_tags=instance.tags)
if result.action == 'deny':
report.add_failed(
instance,
(f'connections from {IAP_SOURCE_NETWORK} to tcp:{port} blocked by '
f'{result.matched_by_str} (instance: {instance.name})'))
else:
report.add_ok(instance)
```
#### File: lint/gke/err_2021_003_kms_key_enabled.py
```python
from gcpdiag import lint, models
from gcpdiag.queries import gke, kms
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_app_layer_enc_enabled():
report.add_skipped(c, 'App-layer secrets encryption isn\'t enabled')
else:
crypto_key = kms.get_crypto_key(c.app_layer_sec_key)
if crypto_key.is_destroyed():
report.add_failed(c, f'Key {crypto_key} is destroyed')
elif not crypto_key.is_enabled():
report.add_failed(c, f'Key {crypto_key} is disabled')
else:
report.add_ok(c)
```
#### File: lint/gke/err_2021_007_gke_sa.py
```python
from gcpdiag import lint, models
from gcpdiag.queries import crm, gce, iam
# defining role
ROLE = 'roles/container.serviceAgent'
# creating rule to report if default SA exists
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
instances = gce.get_instances(context)
if not instances:
report.add_skipped(None, 'no instances found')
project_ids = {i.project_id for i in instances.values()}
for i in project_ids:
# fetch project number
project = crm.get_project(i)
sa = '<EMAIL>'.<EMAIL>(
project.number)
# get iam policy
iam_policy = iam.get_project_policy(i)
if iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_ok(project)
else:
report.add_failed(project,
reason=f'service account: {sa}\nmissing role: {ROLE}')
```
#### File: lint/gke/warn_2021_005_disk_latency.py
```python
from typing import Any, Dict
from gcpdiag import config, lint, models
from gcpdiag.queries import gke, monitoring
SLO_LATENCY_MS = 100
# SLO: at least 99.5% of minutes are good (7 minutes in a day)
SLO_BAD_MINUTES_RATIO = 0.005
# If we have less than this minutes measured, skip
SLO_VALID_MINUTES_PER_DAY = 12 * 60
_query_results_per_project_id: Dict[str, monitoring.TimeSeriesCollection] = {}
def prefetch_rule(context: models.Context):
# Fetch the metrics for all nodes.
#
# Note: we only group_by instance_id because of performance reasons (it gets
# much slower if you group_by multiple labels)
clusters = gke.get_clusters(context)
if not clusters:
return
within_str = 'within %dd, d\'%s\'' % (config.WITHIN_DAYS,
monitoring.period_aligned_now(60))
_query_results_per_project_id[context.project_id] = monitoring.query(
context.project_id, f"""
fetch gce_instance
| {{ metric 'compute.googleapis.com/guest/disk/operation_time' ;
metric 'compute.googleapis.com/guest/disk/operation_count' }}
| {within_str}
| filter metric.device_name = 'sda'
| group_by [resource.instance_id], .sum()
| every 1m
| ratio
| value(val() > cast_units({SLO_LATENCY_MS}, "ms"))
| group_by 1d, [ .count_true, .count ]
""")
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
return
# Organize data per-cluster.
per_cluster_results: Dict[gke.Cluster, Dict[str, Any]] = {}
for ts in _query_results_per_project_id[context.project_id].values():
try:
instance_id = ts['labels']['resource.instance_id']
node = gke.get_node_by_instance_id(context, instance_id)
except KeyError:
continue
cluster_results = per_cluster_results.setdefault(node.nodepool.cluster, {
'bad_instances': [],
'valid': False
})
# Did we miss the SLO on any day?
# note: we don't calculate the SLO for the whole "WITHIN_DAYS" period
# because otherwise you would get different results depending on how that
# period is defined.
total_minutes_bad = 0
total_minutes = 0
slo_missed = 0
for day_value in ts['values']:
total_minutes_bad += day_value[0]
total_minutes += day_value[1]
if day_value[1] > SLO_VALID_MINUTES_PER_DAY:
cluster_results['valid'] = 1
if day_value[0] / day_value[1] > SLO_BAD_MINUTES_RATIO:
slo_missed = 1
if slo_missed:
cluster_results['bad_instances'].append(
(node.instance.name, total_minutes, total_minutes_bad))
# Go over all selected clusters and report results.
for _, c in sorted(clusters.items()):
if c not in per_cluster_results or not per_cluster_results[c]['valid']:
report.add_skipped(c, 'no data')
elif not per_cluster_results[c]['bad_instances']:
report.add_ok(c)
else:
report.add_failed(
c,
f'disk latency >{SLO_LATENCY_MS}ms (1 min. avg., within {config.WITHIN_DAYS} days): \n. '
+ '\n. '.join([
f'{i[0]} ({i[2]} out of {i[1]} minutes bad)'
for i in sorted(per_cluster_results[c]['bad_instances'])
]))
```
#### File: lint/gke/warn_2021_008_gke_istio_incompatible_versions.py
```python
from typing import Dict
from gcpdiag import lint, models
from gcpdiag.queries import gke, monitoring
from gcpdiag.queries.gke import Version
_query_results_per_project_id: Dict[str, monitoring.TimeSeriesCollection] = {}
def prefetch_rule(context: models.Context):
clusters = gke.get_clusters(context)
if not clusters:
return
# Fetch the metrics for all clusters.
_query_results_per_project_id[context.project_id] = \
monitoring.query(
context.project_id, """
fetch k8s_container
| metric 'kubernetes.io/container/uptime'
| filter (metadata.system_labels.container_image =~ '.*pilot.*')
| within 1h
| group_by [resource.project_id,
cluster_name: resource.cluster_name,
location: resource.location,
container_image: metadata.system_labels.container_image]
""")
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
return
# Skip querying metrics if no cluster is in 1.21+
check_clusters = []
for _, c in sorted(clusters.items()):
current_version = c.master_version
if Version('1.21') < current_version < Version('1.23'):
check_clusters.append(c)
else:
report.add_ok(c, f'GKE {c.master_version}')
if len(check_clusters) == 0:
return
# Organize the metrics per-cluster.
per_cluster_results: Dict[tuple, Dict[str, str]] = {}
for ts in _query_results_per_project_id[context.project_id].values():
try:
cluster_key = (ts['labels']['resource.project_id'],
ts['labels']['location'], ts['labels']['cluster_name'])
cluster_values = per_cluster_results.setdefault(cluster_key, {})
cluster_values['container_image'] = ts['labels']['container_image']
except KeyError:
# Ignore metrics that don't have those labels
pass
# Go over the list of reported clusters
for c in check_clusters:
ts_cluster_key = (c.project_id, c.location, c.name)
if ts_cluster_key not in per_cluster_results:
report.add_skipped(c, 'no Istio/ASM reported')
else:
container_image = per_cluster_results[ts_cluster_key]['container_image']
(_, istio_version) = container_image.split(':')
if Version(istio_version) > Version('1.10.2'):
report.add_ok(c, f'Istio/ASM {istio_version}')
return
else:
report.add_failed(
c,
f'Current GKE version: {c.master_version} (Release channel: '+\
f'{c.release_channel})\nIn-cluster Istio/ASM control plane ' +\
f'version: {istio_version}'
)
```
#### File: lint/gke/warn_2021_009_node_deprecated_image_types.py
```python
from gcpdiag import lint, models
from gcpdiag.queries import gke
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, cluster in sorted(clusters.items()):
if not cluster.nodepools:
report.add_skipped(None, 'no nodepools found')
continue
for nodepool in cluster.nodepools:
if nodepool.config.image_type.find('CONTAINERD') != -1:
report.add_ok(nodepool)
elif nodepool.config.image_type.find('WINDOWS') != -1:
if nodepool.version < gke.Version('1.21.1'):
report.add_skipped(
nodepool, f'GKE windows node pool {nodepool.version}. '
f'the Docker container runtime is deprecated '
f'only with windows image versions >= 1.21.1')
else:
report.add_failed(
nodepool,
f'nodepool is using the deprecated Docker container runtime '
f'(nodepool version: {nodepool.version}, image type: {nodepool.config.image_type})'
)
else:
if nodepool.version < gke.Version('1.19.0'):
report.add_skipped(
nodepool, f'GKE node pool {nodepool.version}. '
f'the Docker container runtime is deprecated '
f'only with image versions >= 1.19')
else:
report.add_failed(
nodepool,
f'nodepool is using the deprecated Docker container runtime '
f'(nodepool version: {nodepool.version}, image type: {nodepool.config.image_type})'
)
```
#### File: lint/iam/sec_2021_001_sa_permissions.py
```python
from gcpdiag import lint, models
from gcpdiag.queries import crm, iam
ROLE = 'roles/owner'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
iam.get_project_policy(context.project_id)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
project = crm.get_project(context.project_id)
iam_policy = iam.get_project_policy(context.project_id)
for member in sorted(iam_policy.get_members()):
if member.startswith('serviceAccount:'):
if iam_policy.has_role_permissions(member, ROLE):
report.add_failed(project, member + f' has the role {ROLE}')
break
else:
report.add_ok(project)
```
#### File: gcpdiag/queries/apigee.py
```python
from typing import Dict, List, Mapping
import googleapiclient.errors
from gcpdiag import caching, config, models
from gcpdiag.queries import apis
from gcpdiag.utils import GcpApiError
class EnvironmentGroup(models.Resource):
"""Represents an Apigee Environment Group
https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.envgroups#resource:-environmentgroup
"""
_resource_data: dict
def __init__(self, project_id, resource_data):
super().__init__(project_id=project_id)
self._resource_data = resource_data
self.org_name = project_id
@property
def name(self) -> str:
return self._resource_data['name']
@property
def full_path(self) -> str:
return f'organizations/{self.org_name}/envgroups/{self.name}'
@property
def host_names(self) -> List[str]:
return self._resource_data['hostnames']
@caching.cached_api_call
def get_org(context: models.Context) -> Mapping[str, str]:
"""Get Apigee organizations matching the GCP Project Id"""
org: Dict[str, str] = {}
if not apis.is_enabled(context.project_id, 'apigee'):
return org
apigee_api = apis.get_api('apigee', 'v1', context.project_id)
# Apigee Organization : GCP Project = 1 : 1
query = apigee_api.organizations().list(parent='organizations')
try:
resp = query.execute(num_retries=config.API_RETRIES)
if 'organizations' not in resp:
return org
for resp_o in resp['organizations']:
if 'organization' not in resp_o or 'projectIds' not in resp_o:
raise RuntimeError('missing data in organizations.list response')
if context.project_id == resp_o['projectIds'][0]:
org[context.project_id] = resp_o['organization']
return org
except googleapiclient.errors.HttpError as err:
raise GcpApiError(err) from err
return org
@caching.cached_api_call
def get_envgroups(org_name: str) -> Mapping[str, EnvironmentGroup]:
"""Get Environment group list by organization name, caching the result."""
envgroups: Dict[str, EnvironmentGroup] = {}
apigee_api = apis.get_api('apigee', 'v1')
# Environment groups per organization limit: 85, set pageSize to 85
query = apigee_api.organizations().envgroups().list(
parent=f'organizations/{org_name}', pageSize=85)
try:
resource_data = query.execute(num_retries=config.API_RETRIES)
if 'environmentGroups' not in resource_data:
return envgroups
for envgroup in resource_data['environmentGroups']:
envgroups[envgroup['name']] = EnvironmentGroup(project_id=org_name,
resource_data=envgroup)
except googleapiclient.errors.HttpError as err:
raise GcpApiError(err) from err
return envgroups
@caching.cached_api_call
def get_envgroups_attachments(envgroup_name: str) -> List[str]:
"""Get Environment group attachments by environment group name, caching the result."""
environments: List[str] = []
apigee_api = apis.get_api('apigee', 'v1')
# Environment group attachments per organization limit: 100, set pageSize to 100
query = apigee_api.organizations().envgroups().attachments().list(
parent=envgroup_name, pageSize=100)
try:
resource_data = query.execute(num_retries=config.API_RETRIES)
if 'environmentGroupAttachments' not in resource_data:
return environments
for environmentgroupattachments in resource_data[
'environmentGroupAttachments']:
environments.append(environmentgroupattachments['environment'])
except googleapiclient.errors.HttpError as err:
raise GcpApiError(err) from err
return environments
```
#### File: gcpdiag/queries/apis_utils_test.py
```python
from unittest import mock
from gcpdiag import config, utils
from gcpdiag.queries import apis_stub, apis_utils
class RequestMock(apis_stub.ApiStub):
"""Mock a googleapiclient.request object."""
def __init__(self, n: int, fail_count: int = 0, fail_status: int = 429):
self.n = n
if fail_count:
self.fail_next(fail_count, fail_status)
def execute(self, num_retries: int = 0):
del num_retries
self._maybe_raise_api_exception()
if self.n == 1:
return {'items': ['a', 'b']}
elif self.n == 2:
return {'items': ['c', 'd']}
elif self.n == 3:
return {'items': ['e']}
def next_function_mock(previous_request, previous_response):
del previous_response
if previous_request.n == 1:
return RequestMock(2)
else:
return None
mock_sleep_slept_time = []
def mock_sleep(sleep_time: float):
mock_sleep_slept_time.append(sleep_time)
@mock.patch('gcpdiag.queries.apis.get_api', new=apis_stub.get_api_stub)
@mock.patch('time.sleep', new=mock_sleep)
class Test:
def test_list_all(self):
results = list(apis_utils.list_all(RequestMock(1), next_function_mock))
assert (results == ['a', 'b', 'c', 'd'])
def test_batch_list_all(self):
api = apis_stub.get_api_stub('compute', 'v1')
results = list(
apis_utils.batch_list_all( #
api=api,
requests=[RequestMock(1), RequestMock(3)],
next_function=next_function_mock,
log_text='testing'))
# batch_list_all will first retrieve all requests (first page), then in a
# second step any further required pages.
assert (results == ['a', 'b', 'e', 'c', 'd'])
def test_batch_execute_all(self):
api = apis_stub.get_api_stub('compute', 'v1')
results = list(
apis_utils.batch_execute_all(
api, [RequestMock(1), RequestMock(3)]))
# requests
assert [x[0].n for x in results] == [1, 3]
# responses
assert [x[1] for x in results] == [{'items': ['a', 'b']}, {'items': ['e']}]
def test_batch_execute_all_unretriable_exception(self):
api = apis_stub.get_api_stub('compute', 'v1')
results = list(
apis_utils.batch_execute_all(
api,
[RequestMock(1, fail_count=1, fail_status=403),
RequestMock(3)]))
assert isinstance(results[0][2], utils.GcpApiError) and \
results[0][2].status == 403
def test_batch_execute_all_too_many_failures(self):
api = apis_stub.get_api_stub('compute', 'v1')
results = list(
apis_utils.batch_execute_all(api, [
RequestMock(1, fail_count=config.API_RETRIES + 1, fail_status=429),
RequestMock(3)
]))
assert isinstance(results[1][2], Exception)
def test_batch_execute_all_retriable_exception(self):
global mock_sleep_slept_time
mock_sleep_slept_time = []
api = apis_stub.get_api_stub('compute', 'v1')
results = list(
apis_utils.batch_execute_all(api, [
RequestMock(1, fail_count=config.API_RETRIES, fail_status=429),
RequestMock(3)
]))
assert len(mock_sleep_slept_time) == config.API_RETRIES
# 20% is random, progression: 1, 1.4, 2.0, 2.7, ... 28.9 (10 retries)
assert 0.8 <= mock_sleep_slept_time[0] <= 1.0
assert 1.1 <= mock_sleep_slept_time[1] <= 1.4
# requests
assert [x[0].n for x in results] == [3, 1]
# responses
assert [x[1] for x in results] == [{'items': ['e']}, {'items': ['a', 'b']}]
def test_batch_execute_batchapi_tempfail(self):
"""Test the batch API producing a retryable failure."""
global mock_sleep_slept_time
mock_sleep_slept_time = []
api = apis_stub.get_api_stub('compute', 'v1')
api.fail_next(1)
results = list(
apis_utils.batch_execute_all(
api, [RequestMock(1), RequestMock(3)]))
assert len(mock_sleep_slept_time) == 1
# requests
assert [x[0].n for x in results] == [1, 3]
# responses
assert [x[1] for x in results] == [{'items': ['a', 'b']}, {'items': ['e']}]
```
#### File: gcpdiag/queries/composer.py
```python
import re
from typing import Iterable, List, Tuple
from gcpdiag import caching, config, models
from gcpdiag.lint import get_executor
from gcpdiag.queries import apis, crm
class Environment(models.Resource):
""" Represents Composer environment """
_resource_data: dict
def __init__(self, project_id: str, resource_data: dict):
super().__init__(project_id)
self._resource_data = resource_data
self.region, self.name = self.parse_full_path()
@property
def is_running(self) -> bool:
return self.status == 'RUNNING'
@property
def full_path(self) -> str:
return self._resource_data['name']
@property
def status(self) -> str:
return self._resource_data['state']
@property
def short_path(self) -> str:
return f'{self.project_id}/{self.region}/{self.name}'
@property
def service_account(self) -> str:
sa = self._resource_data['config']['nodeConfig'].get('serviceAccount')
if sa is None:
# serviceAccount is marked as optional in REST API docs
# using a default GCE SA as a fallback
project_nr = crm.get_project(self.project_id).number
sa = f'{project_nr}-<EMAIL>'
return sa
def parse_full_path(self) -> Tuple[str, str]:
match = re.match(r'projects/[^/]*/locations/([^/]*)/environments/([^/]*)',
self.full_path)
if not match:
raise RuntimeError(f'Can\'t parse full_path {self.full_path}')
return match.group(1), match.group(2)
def __str__(self) -> str:
return self.short_path
def is_private_ip(self) -> bool:
return self._resource_data['config']['privateEnvironmentConfig'].get(
'enablePrivateEnvironment', False)
COMPOSER_REGIONS = [
'asia-northeast2', 'us-central1', 'northamerica-northeast1', 'us-west3',
'southamerica-east1', 'us-east1', 'asia-northeast1', 'europe-west1',
'europe-west2', 'asia-northeast3', 'us-west4', 'asia-east2',
'europe-central2', 'europe-west6', 'us-west2', 'australia-southeast1',
'europe-west3', 'asia-south1', 'us-west1', 'us-east4', 'asia-southeast1'
]
def _query_region_envs(region, api, project_id):
query = api.projects().locations().environments().list(
parent=f'projects/{project_id}/locations/{region}')
resp = query.execute(num_retries=config.API_RETRIES)
return resp.get('environments', [])
def _query_regions_envs(regions, api, project_id):
result: List[Environment] = []
executor = get_executor()
for descriptions in executor.map(
lambda r: _query_region_envs(r, api, project_id), regions):
result += descriptions
return result
@caching.cached_api_call
def get_environments(context: models.Context) -> Iterable[Environment]:
if not apis.is_enabled(context.project_id, 'composer'):
return []
api = apis.get_api('composer', 'v1', context.project_id)
return [
Environment(context.project_id, d)
for d in _query_regions_envs(COMPOSER_REGIONS, api, context.project_id)
]
```
#### File: gcpdiag/queries/crm_stub.py
```python
import json
import re
from gcpdiag.queries import apis_stub
# pylint: disable=unused-argument
class CrmApiStub:
"""Mock object to simulate CRM API calls."""
# example API call:
# crm_api.projects().getIamPolicy(resource=self._project_id).execute()
def __init__(self, mock_state='init', project_id=None):
self.mock_state = mock_state
self.project_id = project_id
def projects(self):
return self
# pylint: disable=invalid-name
def get(self, project_id=None, name=None):
if not project_id and name is not None:
m = re.match(r'projects/(.*)', name)
project_id = m.group(1)
return CrmApiStub('get_project', project_id)
# pylint: disable=invalid-name
def getIamPolicy(self, resource):
return CrmApiStub(mock_state='get_iam_policy', project_id=resource)
def execute(self, num_retries=0):
del num_retries
json_dir = apis_stub.get_json_dir(self.project_id)
if self.mock_state == 'get_iam_policy':
with open(json_dir / 'iam-policy.json', encoding='utf-8') as json_file:
return json.load(json_file)
elif self.mock_state == 'get_project':
with open(json_dir / 'project.json', encoding='utf-8') as json_file:
return json.load(json_file)
else:
raise ValueError("can't call this method here")
```
#### File: gcpdiag/queries/monitoring_stub.py
```python
import json
import re
from gcpdiag.queries import apis_stub
# pylint: disable=unused-argument
class MonitoringApiStub:
"""Mock object to simulate monitoring.googleapis.com calls."""
def projects(self):
return self
# pylint: disable=invalid-name
def timeSeries(self):
return self
def query(self, name, body):
del body
m = re.match(r'projects/([^/]+)', name)
self.project_id = m.group(1)
return self
def query_next(self, previous_request, previous_response):
del previous_request
del previous_response
def execute(self, num_retries=0):
json_dir = apis_stub.get_json_dir(self.project_id)
with open(json_dir / 'monitoring-query.json',
encoding='utf-8') as json_file:
return json.load(json_file)
```
#### File: gcpdiag/queries/network_stub.py
```python
import json
from gcpdiag.queries import apis_stub
# pylint: disable=unused-argument
# pylint: disable=invalid-name
SUBNETWORKS_REGION = 'europe-west4'
class NetworkApiStub:
"""Mock object to simulate compute engine networking api calls.
This object is created by GceApiStub, not used directly in test scripts."""
def __init__(self, mock_state):
self.mock_state = mock_state
def get(self, project, network=None, region=None, subnetwork=None):
if not subnetwork:
self.mock_state = 'get'
self.network = network
else:
self.mock_state = 'get_single_subnetwork'
self.subnetwork = subnetwork
self.project_id = project
return self
def getIamPolicy(self, project, region, resource):
self.mock_state = 'getIamPolicy_subnetwork'
self.project_id = project
self.subnetwork = resource
return self
def getEffectiveFirewalls(self, project, network):
self.mock_state = 'get_effective_firewalls'
self.project_id = project
self.network = network
return self
# pylint: disable=redefined-builtin
def list(self, project, region, filter=None, fields=None):
self.project_id = project
if self.mock_state == 'subnetworks':
return self
elif self.mock_state == 'routers':
return self
else:
raise ValueError(f'cannot call method {self.mock_state} here')
def list_next(self, prev_request, prev_response):
return None
def execute(self, num_retries=0):
json_dir = apis_stub.get_json_dir(self.project_id)
if self.mock_state == 'get':
with open(json_dir / f'compute-network-{self.network}.json',
encoding='utf-8') as json_file:
return json.load(json_file)
elif self.mock_state == 'get_effective_firewalls':
with open(json_dir / f'compute-effective-firewalls-{self.network}.json',
encoding='utf-8') as json_file:
return json.load(json_file)
elif self.mock_state == 'subnetworks':
with open(json_dir / f'compute-subnetworks-{SUBNETWORKS_REGION}.json',
encoding='utf-8') as json_file:
return json.load(json_file)
elif self.mock_state == 'get_single_subnetwork':
with open(json_dir / f'compute-subnetworks-{SUBNETWORKS_REGION}.json',
encoding='utf-8') as json_file:
for subnet in json.load(json_file)['items']:
if subnet['name'] == self.subnetwork:
return subnet
elif self.mock_state == 'getIamPolicy_subnetwork':
with open(json_dir / 'compute-subnetwork-policy.json',
encoding='utf-8') as json_file:
return json.load(json_file)
elif self.mock_state == 'routers':
with open(json_dir / f'compute-routers-{SUBNETWORKS_REGION}.json',
encoding='utf-8') as json_file:
return json.load(json_file)
else:
raise ValueError(f'cannot call method {self.mock_state} here')
```
#### File: gcpdiag/queries/network_test.py
```python
import ipaddress
import re
from unittest import mock
from gcpdiag.queries import apis_stub, network
DUMMY_PROJECT_ID = 'gcpdiag-fw-policy-aaaa'
DUMMY_DEFAULT_NETWORK = 'default'
DUMMY_DEFAULT_SUBNET = 'default'
DUMMY_GKE_PROJECT_ID = 'gcpdiag-gke1-aaaa'
DUMMY_GKE_REGION = 'europe-west4'
DUMMY_GKE_SUBNET = 'gke1-subnet'
DUMMY_SERVICE_ACCOUNT = 'gke1sa@gcpdiag-gke1-aaaa.<EMAIL>'
@mock.patch('gcpdiag.queries.apis.get_api', new=apis_stub.get_api_stub)
class TestNetwork:
"""Test network.Network."""
def test_get_network(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
assert net.name == DUMMY_DEFAULT_NETWORK
assert net.full_path == 'projects/gcpdiag-fw-policy-aaaa/global/networks/default'
assert net.short_path == f'{DUMMY_PROJECT_ID}/default'
assert net.self_link == \
f'https://www.googleapis.com/compute/v1/projects/{DUMMY_PROJECT_ID}/global/networks/default'
def test_subnetworks(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
expected_subnet_url = (
f'https://www.googleapis.com/compute/v1/projects/{DUMMY_PROJECT_ID}/'
'regions/europe-west4/subnetworks/default')
assert expected_subnet_url in net.subnetworks
assert isinstance(net.subnetworks[expected_subnet_url].ip_network,
ipaddress.IPv4Network)
def test_cluster_subnetwork(self):
subnet = network.get_subnetwork(project_id=DUMMY_GKE_PROJECT_ID,
region=DUMMY_GKE_REGION,
subnetwork_name=DUMMY_GKE_SUBNET)
assert subnet.name == DUMMY_GKE_SUBNET
assert subnet.ip_network == ipaddress.ip_network('192.168.0.0/24')
def test_cluster_subnetwork_iam_policy(self):
policy = network.get_subnetwork_iam_policy(project_id=DUMMY_GKE_PROJECT_ID,
region=DUMMY_GKE_REGION,
subnetwork_name=DUMMY_GKE_SUBNET)
assert policy.has_role_permissions(
f'serviceAccount:{DUMMY_SERVICE_ACCOUNT}', 'roles/compute.networkUser')
assert not policy.has_role_permissions(
f'serviceAccount:{DUMMY_SERVICE_ACCOUNT}', 'roles/compute.networkAdmin')
def test_get_routers(self):
net = network.get_network(project_id=DUMMY_GKE_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
sub1 = network.get_subnetwork(project_id=DUMMY_GKE_PROJECT_ID,
region=DUMMY_GKE_REGION,
subnetwork_name=DUMMY_GKE_SUBNET)
sub2 = network.get_subnetwork(project_id=DUMMY_GKE_PROJECT_ID,
region=DUMMY_GKE_REGION,
subnetwork_name=DUMMY_DEFAULT_SUBNET)
router = network.get_router(project_id=DUMMY_GKE_PROJECT_ID,
region=DUMMY_GKE_REGION,
network=net)
assert router.name == 'gke-default-router'
assert router.subnet_has_nat(sub1) is False
assert router.subnet_has_nat(sub2) is True
def test_ingress_deny(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_address('10.0.0.1'), #
ip_protocol='tcp',
port=21)
assert r.action == 'deny'
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.0.0.0/24'), #
ip_protocol='tcp',
port=21)
assert r.action == 'deny'
def test_ingress_deny_2(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.100.0.16/29'), #
ip_protocol='tcp',
port=1001)
assert r.action == 'deny'
assert r.matched_by_str == 'vpc firewall rule: fw-test-800'
def test_ingress_deny_3(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
# a supernet of src_ip for a deny rule should also match
# (because we want to catch when a fw rule partially blocks
# a connection).
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.0.0.0/8'), #
ip_protocol='tcp',
port=1001)
assert r.action == 'deny'
assert r.matched_by_str == 'vpc firewall rule: fw-test-800'
def test_ingress_allow_src_ip(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.100.0.16/29'), #
ip_protocol='tcp',
port=1006)
assert r.action == 'allow'
assert r.matched_by_str == 'vpc firewall rule: fw-test-900'
def test_ingress_allow_src_ip_subnet(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.100.0.16/30'), #
ip_protocol='tcp',
port=1006)
assert r.action == 'allow'
assert r.matched_by_str == 'vpc firewall rule: fw-test-900'
def test_ingress_allow_source_tags(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.200.0.16/29'), #
source_tags=['foo'],
ip_protocol='tcp',
port=1006)
assert r.action == 'allow'
assert r.matched_by_str == 'vpc firewall rule: fw-test-900'
def test_ingress_allow_target_tags(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_address('192.168.1.1'), #
target_tags=['bar'],
ip_protocol='tcp',
port=1234)
assert r.action == 'allow'
assert r.matched_by_str == 'vpc firewall rule: fw-test-903'
def test_ingress_allow_source_sa(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.200.0.16/29'), #
source_service_account=
'<EMAIL>',
ip_protocol='tcp',
port=4000)
assert r.action == 'allow'
assert r.matched_by_str == 'vpc firewall rule: fw-test-901'
def test_ingress_allow_target_sa(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.200.0.16/29'), #
target_tags=['foo'],
ip_protocol='tcp',
port=4000)
assert r.action == 'allow'
def test_ingress_parent_policy_allow(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.101.0.1/32'), #
ip_protocol='tcp',
port=2001)
assert r.action == 'allow'
assert r.matched_by_str == 'policy: parent-folder-policy'
def test_ingress_sub_policy_allow(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.101.0.1/32'), #
ip_protocol='tcp',
port=2003)
assert r.action == 'allow'
assert r.matched_by_str == 'policy: sub-folder-policy'
def test_ingress_sub_policy_allow_target_sa(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.102.0.1/32'), #
ip_protocol='tcp',
port=2000,
target_service_account=
'<EMAIL>')
assert r.action == 'allow'
assert r.matched_by_str == 'policy: sub-folder-policy'
def test_ingress_sub_policy_deny_wrong_target_sa(self):
net = network.get_network(project_id=DUMMY_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
r = net.firewall.check_connectivity_ingress(
src_ip=ipaddress.ip_network('10.102.0.1/32'), #
ip_protocol='tcp',
port=2000,
target_service_account='<EMAIL>')
assert r.action == 'deny'
def test_get_ingress_rules(self):
net = network.get_network(project_id=DUMMY_GKE_PROJECT_ID,
network_name=DUMMY_DEFAULT_NETWORK)
pattern = re.compile(r'k8s-fw-l7-.*')
rules = net.firewall.get_vpc_ingress_rules(
name_pattern=pattern, target_tags=['gke-gke4-1019cf00-node'])
assert 'k8s-fw-l7--ff9247ffa8ffeb9e' == rules[0].name
assert 'gke-gke4-1019cf00-node' in rules[0].target_tags
assert ipaddress.IPv4Network('192.168.3.11/22') in rules[0].source_ranges
pattern = re.compile(r'default-allow-.*')
rules = net.firewall.get_vpc_ingress_rules(name_pattern=pattern)
assert 'default-allow-rdp' in [r.name for r in rules]
assert 'default-allow-ssh' in [r.name for r in rules]
assert 'default-allow-internal' in [r.name for r in rules]
assert 'default-allow-icmp' in [r.name for r in rules]
rules = net.firewall.get_vpc_ingress_rules(name='gke-gke3-8614055e-ssh')
assert 'gke-gke3-8614055e-ssh' == rules[0].name
assert 'tcp' == rules[0].allowed[0]['IPProtocol']
assert '22' in rules[0].allowed[0]['ports']
rules = net.firewall.get_vpc_ingress_rules(name='not-existing-rule')
assert 'gke-gke3-8614055e-ssh' not in [r.name for r in rules]
```
#### File: gcpdiag/queries/pubsub_test.py
```python
from unittest import mock
from gcpdiag import models
from gcpdiag.queries import apis_stub, pubsub
DUMMY_PROJECT_NAME = 'gcpdiag-pubsub1-aaaa'
DUMMY_TOPIC_NAME = 'projects/gcpdiag-pubsub1-aaaa/topics/gcpdiag-pubsub1topic-aaaa'
DUMMY_SUB_NAME = 'projects/gcpdiag-pubsub1-aaaa/subscriptions/gcpdiag-pubsub1subscription-aaaa'
DUMMY_PERM = 'domain:google.com'
@mock.patch('gcpdiag.queries.apis.get_api', new=apis_stub.get_api_stub)
class TestPubsub:
"""Test Pubsub"""
def test_get_topics(self):
context = models.Context(project_id=DUMMY_PROJECT_NAME)
topics = pubsub.get_topics(context=context)
assert DUMMY_TOPIC_NAME in topics
def test_get_subscription(self):
context = models.Context(project_id=DUMMY_PROJECT_NAME)
subscription = pubsub.get_subscription(context=context)
assert DUMMY_SUB_NAME in subscription
def test_get_topic_iam_policy(self):
policy = pubsub.get_topic_iam_policy(DUMMY_TOPIC_NAME)
assert DUMMY_PERM in policy.get_members()
def test_get_subscription_iam_policy(self):
policy = pubsub.get_subscription_iam_policy(DUMMY_SUB_NAME)
assert DUMMY_PERM in policy.get_members()
``` |
{
"source": "Joris-Fu/utterance_rewriter_transformer",
"score": 2
} |
#### File: Joris-Fu/utterance_rewriter_transformer/model.py
```python
import logging
import tensorflow as tf
from tqdm import tqdm
from data_load import _load_vocab
from modules import get_token_embeddings, ff, positional_encoding, multihead_attention, noam_scheme
from utils import convert_idx_to_token_tensor, split_input
logging.basicConfig(level=logging.INFO)
class Transformer:
def __init__(self, hp):
self.hp = hp
self.token2idx, self.idx2token = _load_vocab(hp.vocab)
self.embeddings = get_token_embeddings(self.hp.vocab_size, self.hp.d_model, zero_pad=True)
def encode(self, xs, training=True, use_turn_embedding=True):
'''
Returns
memory: encoder outputs. (N, T1, d_model)
'''
with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE):
self.x, self.turn_ids,sents1 = xs
# self.x shape:(batch_size,max_len1)
# embedding
enc = tf.nn.embedding_lookup(self.embeddings, self.x) # (N, T1, d_model)
enc *= self.hp.d_model**0.5 # scale
enc += positional_encoding(enc, self.hp.maxlen1+self.hp.maxlen2)
batch_size = tf.shape(enc)[0]
# TODO add turn encoding,定义turn_ids如何传入,放在xs里面
if use_turn_embedding:
if self.turn_ids is None:
raise ValueError("`turn_ids` must be specified if"
"`use_turn_embedding` is True.")
turn_cnt = tf.to_int32(tf.reduce_max(self.turn_ids))
turn_ids_table = tf.get_variable(
name="turn_embedding",
dtype=tf.float32,
shape=(20, self.hp.d_model), # width即embedding size
initializer=tf.contrib.layers.xavier_initializer())
flat_turn_ids = tf.reshape(self.turn_ids, [-1]) # (batch_size*seq_len)
one_hot_ids = tf.one_hot(flat_turn_ids, depth=20) # (batch_size*seq_len,turn_cnt)
turn_embedding = tf.matmul(one_hot_ids, turn_ids_table) # (batch_size*seq_len,embed_size)
turn_embedding = tf.reshape(turn_embedding,
[batch_size, self.hp.maxlen1+self.hp.maxlen2, self.hp.d_model])
enc += turn_embedding
# TODO end
enc = tf.layers.dropout(enc, self.hp.dropout_rate, training=training)
## Blocks
for i in range(self.hp.num_blocks):
with tf.variable_scope("num_blocks_{}".format(i), reuse=tf.AUTO_REUSE):
# self-attention
enc, _ = multihead_attention(queries=enc,
keys=enc,
values=enc,
num_heads=self.hp.num_heads,
dropout_rate=self.hp.dropout_rate,
training=training,
causality=False)
# feed forward
enc_h = ff(enc, num_units=[self.hp.d_ff, self.hp.d_model])
enc_u = ff(enc, num_units=[self.hp.d_ff, self.hp.d_model])
# enc = enc_h/2 + enc_u/2
# print(enc)
#TODO 修改成concatenation再加一个ff
enc = tf.layers.dense(tf.concat([enc_h, enc_u], axis=-1), units=self.hp.d_model, activation=tf.sigmoid,
trainable=training, use_bias=False)
self.enc_output = enc
self.enc_output_h = enc_h
self.enc_output_u = enc_u
return self.enc_output_h, self.enc_output_u, sents1
def decode(self, xs, ys, memory_h, memory_u, training=True):
'''
memory: encoder outputs. (N, T1, d_model)
Returns
logits: (N, T2, V). float32.
y: (N, T2). int32
sents2: (N,). string.
'''
self.memory_h = memory_h
self.memory_u = memory_u
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
self.decoder_inputs, y, sents2 = ys
x, _, _, = xs
# embedding
dec = tf.nn.embedding_lookup(self.embeddings, self.decoder_inputs) # (N, T2, d_model)
dec *= self.hp.d_model ** 0.5 # scale
dec += positional_encoding(dec, self.hp.maxlen2)
before_dec = dec
dec = tf.layers.dropout(dec, self.hp.dropout_rate, training=training)
attn_dists = []
# Blocks
for i in range(self.hp.num_blocks):
with tf.variable_scope("num_blocks_{}".format(i), reuse=tf.AUTO_REUSE):
# Masked self-attention (Note that causality is True at this time)
dec, _ = multihead_attention(queries=dec,
keys=dec,
values=dec,
num_heads=self.hp.num_heads,
dropout_rate=self.hp.dropout_rate,
training=training,
causality=True,
scope="self_attention")
# dec (batch_size, max_len2, embed_size)
# memory_h (batch_size, max_len1, embed_size)
# Vanilla attention
dec_h, attn_dist_h = multihead_attention(queries=dec,
keys=self.memory_h,
values=self.memory_h,
num_heads=self.hp.num_heads,
dropout_rate=self.hp.dropout_rate,
training=training,
causality=False,
scope="vanilla_attention")
dec_u, attn_dist_u = multihead_attention(queries=dec,
keys=self.memory_u,
values=self.memory_u,
num_heads=self.hp.num_heads,
dropout_rate=self.hp.dropout_rate,
training=training,
causality=False,
scope="vanilla_attention")
# TODO 确认维度关系
# print(attn_dist_u)
# print(attn_dist_h)
# attn_dist = tf.concat([attn_dist_h,attn_dist_u],axis=1) # N * T_q * T_k
attn_dist = tf.layers.dense(tf.concat([attn_dist_h, attn_dist_u], axis=-1), units=self.hp.maxlen1+self.hp.maxlen2,
activation=tf.sigmoid,
trainable=training, use_bias=False)
attn_dists.append(attn_dist)
### Feed Forward
dec = tf.layers.dense(tf.concat([dec_h, dec_u], axis=-1), units=self.hp.d_model,
activation=tf.sigmoid,
trainable=training, use_bias=False)
dec = ff(dec, num_units=[self.hp.d_ff, self.hp.d_model])
# Final linear projection (embedding weights are shared)
# weights = tf.Variable(self.embeddings) # (d_model, vocab_size)
# logits = tf.einsum('ntd,dk->ntk', dec, weights) # (N, T2, vocab_size)
with tf.variable_scope("gen", reuse=tf.AUTO_REUSE):
# tf.concat([before_dec, dec, attn_dists[-1]], axis=-1) shape N * T_q *(2*d_model+T_k)
gens = tf.layers.dense(tf.concat([dec, dec_h, dec_u], axis=-1), units=1, activation=tf.sigmoid,
trainable=training, use_bias=False)
# gens shape N * t_q * 1
# logits = tf.nn.softmax(logits)
# final distribution
self.logits = self._calc_final_dist(x, gens, attn_dists[-1],training=training)
return self.logits, y, sents2
def _calc_final_dist(self, x, gens, attn_dists,training=True):
"""Calculate the final distribution, for the pointer-generator model
Args:
x: encoder input which contain oov number
gens: the generation, choose vocab from article or vocab
vocab_dists: The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays.
The words are in the order they appear in the vocabulary file.
attn_dists: The attention distributions. List length max_dec_steps of (batch_size, attn_len) arrays
Returns:
final_dists: The final distributions. List length max_dec_steps of (batch_size, extended_vsize) arrays.
"""
batch_size = tf.shape(x)[0]
with tf.variable_scope('final_distribution', reuse=tf.AUTO_REUSE):
# Multiply vocab dists by p_gen and attention dists by (1-p_gen)
# his_dists, utt_dists = tf.split(attn_dists,[self.hp.maxlen1,self.hp.maxlen2],axis=-1)
his_dists, utt_dists = attn_dists,attn_dists
# print(his_dists)
if training:
his_gens = tf.concat([tf.tile(gens,[1,1,self.hp.maxlen1]),tf.zeros([batch_size,self.hp.maxlen2,self.hp.maxlen2],dtype=tf.float32)],axis=-1)
else:
dec_t = tf.shape(gens)[1]
his_gens = tf.concat([tf.tile(gens,[1,1,self.hp.maxlen1]),tf.zeros([batch_size,dec_t,self.hp.maxlen2],dtype=tf.float32)],axis=-1)
his_dists = his_gens * his_dists
if training:
utt_gens = tf.concat([tf.zeros([batch_size,self.hp.maxlen2,self.hp.maxlen1],dtype=tf.float32),tf.tile(1-gens,[1,1,self.hp.maxlen2])],axis=-1)
else:
dec_t = tf.shape(gens)[1]
utt_gens = tf.concat([tf.zeros([batch_size,dec_t,self.hp.maxlen1],dtype=tf.float32),tf.tile(1-gens,[1,1,self.hp.maxlen2])],axis=-1)
utt_dists = utt_gens * utt_dists
# print(his_dists)
attn_dist_his_projected = self._project_attn_to_vocab(his_dists,x,vocab_size=10600)
attn_dist_utt_projected = self._project_attn_to_vocab(utt_dists,x,vocab_size=10600)
final_dists = attn_dist_his_projected + attn_dist_utt_projected
# shape (batch_size * decode_step * vocab_size)
return final_dists
def _project_attn_to_vocab(self,attn_dist,x,vocab_size=10600):
"""
project attention distribution to vocab distribution
:param attn_dist: attention distribution (batch_size,dec_t,attn_len)
:param x: input list,list of num
:param vocab_size:
:return:
"""
batch_size = tf.shape(attn_dist)[0]
dec_t = tf.shape(attn_dist)[1]
attn_len = tf.shape(attn_dist)[2]
dec = tf.range(0, limit=dec_t) # [dec]
dec = tf.expand_dims(dec, axis=-1) # [dec, 1]
dec = tf.tile(dec, [1, attn_len]) # [dec, atten_len]
dec = tf.expand_dims(dec, axis=0) # [1, dec, atten_len]
dec = tf.tile(dec, [batch_size, 1, 1]) # [batch_size, dec, atten_len]
x = tf.expand_dims(x, axis=1) # [batch_size, 1, atten_len]
x = tf.tile(x, [1, dec_t, 1]) # [batch_size, dec, atten_len]
x = tf.stack([dec, x], axis=3)
attn_dists_projected = tf.map_fn(fn=lambda y: tf.scatter_nd(y[0], y[1], [dec_t, vocab_size]),
elems=(x, attn_dist), dtype=tf.float32)
return attn_dists_projected
def _calc_loss(self, targets, final_dists):
"""
calculate loss
:param targets: reference
:param final_dists: transformer decoder output add by pointer generator
:return: loss
"""
with tf.name_scope('loss'):
dec = tf.shape(targets)[1]
batch_nums = tf.shape(targets)[0]
dec = tf.range(0, limit=dec)
dec = tf.expand_dims(dec, axis=0)
dec = tf.tile(dec, [batch_nums, 1])
indices = tf.stack([dec, targets], axis=2) # [batch_size, dec, 2]
loss = tf.map_fn(fn=lambda x: tf.gather_nd(x[1], x[0]), elems=(indices, final_dists), dtype=tf.float32)
loss = tf.log(0.9) - tf.log(loss)
nonpadding = tf.to_float(tf.not_equal(targets, self.token2idx["<pad>"])) # 0: <pad>
loss = tf.reduce_sum(loss * nonpadding) / (tf.reduce_sum(nonpadding) + 1e-7)
return loss
def train(self, xs, ys):
"""
train model
:param xs: dataset xs
:param ys: dataset ys
:return: loss
train op
global step
tensorflow summary
"""
tower_grads = []
global_step = tf.train.get_or_create_global_step()
global_step_ = global_step * self.hp.gpu_nums
lr = noam_scheme(self.hp.d_model, global_step_, self.hp.warmup_steps)
optimizer = tf.train.AdamOptimizer(lr)
losses = []
xs, ys = split_input(xs, ys, self.hp.gpu_nums)
with tf.variable_scope(tf.get_variable_scope()):
for no in range(self.hp.gpu_nums):
with tf.device("/gpu:%d" % no):
with tf.name_scope("tower_%d" % no):
memory_h, memory_u, sents1 = self.encode(xs[no])
logits, y, sents2 = self.decode(xs[no], ys[no], memory_h, memory_u)
tf.get_variable_scope().reuse_variables()
loss = self._calc_loss(y, logits)
losses.append(loss)
grads = optimizer.compute_gradients(loss)
# print(grads)
tower_grads.append(grads)
with tf.device("/cpu:0"):
grads = self.average_gradients(tower_grads)
train_op = optimizer.apply_gradients(grads, global_step=global_step)
loss = sum(losses) / len(losses)
tf.summary.scalar('lr', lr)
tf.summary.scalar("train_loss", loss)
summaries = tf.summary.merge_all()
return loss, train_op, global_step_, summaries
def average_gradients(self, tower_grads):
"""
average gradients of all gpu gradients
:param tower_grads: list, each element is a gradient of gpu
:return: be averaged gradient
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for _, g in grad_and_vars:
expend_g = tf.expand_dims(g, 0)
grads.append(expend_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def eval(self, xs, ys):
'''Predicts autoregressively
At inference, input ys is ignored.
Returns
y_hat: (N, T2)
tensorflow summary
'''
# decoder_inputs <s> sentences
decoder_inputs, y, sents2 = ys
# decoder_inputs shape: [batch_size, 1] [[<s>], [<s>], [<s>], [<s>]]
decoder_inputs = tf.ones((tf.shape(xs[0])[0], 1), tf.int32) * self.token2idx["<s>"]
ys = (decoder_inputs, y, sents2)
memory_h, memory_u, sents1 = self.encode(xs, False)
y_hat = None
logging.info("Inference graph is being built. Please be patient.")
for _ in tqdm(range(self.hp.maxlen2)):
logits, y, sents2 = self.decode(xs, ys, memory_h, memory_u, False)
y_hat = tf.to_int32(tf.argmax(logits, axis=-1))
if tf.reduce_sum(y_hat, 1) == self.token2idx["<pad>"]: break
_decoder_inputs = tf.concat((decoder_inputs, y_hat), 1)
ys = (_decoder_inputs, y, sents2)
# monitor a random sample
n = tf.random_uniform((), 0, tf.shape(y_hat)[0]-1, tf.int32)
sent1 = sents1[n]
pred = convert_idx_to_token_tensor(y_hat[n], self.idx2token)
sent2 = sents2[n]
tf.summary.text("sent1", sent1)
tf.summary.text("pred", pred)
tf.summary.text("sent2", sent2)
summaries = tf.summary.merge_all()
return y_hat, summaries
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
``` |
{
"source": "jorisgu/jg_pfr",
"score": 2
} |
#### File: jg_pfr/tools/jg_net_evaluate.py
```python
import _init_paths
#from fast_rcnn.test import test_net
import fast_rcnn.test as frcnnt
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
# from lib faster_rcnn.test
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
from utils.timer import Timer
import numpy as np
import cv2
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a RPN')
parser.add_argument('--imdb_name', dest='imdb_name',type=str)
parser.add_argument('--output_dir', dest='output_dir', type=str)
parser.add_argument('--input_file_name', dest='input_file_name', type=str)
parser.add_argument('--output_file_name', dest='output_file_name', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if os.path.exists(os.path.join(args.output_dir, args.input_file_name)):
with open(os.path.join(args.output_dir, args.input_file_name), 'rb') as fid:
all_boxes = cPickle.load(fid)
else:
print('File not found in net evaluate')
imdb = get_imdb(args.imdb_name)
imdb.output_dir = args.output_dir
#print 'Evaluating detections'
imdb.evaluate_detections(all_boxes, os.path.join(args.output_dir,'results'))
```
#### File: jg_pfr/tools/jg_rpn_generate.py
```python
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a RPN')
parser.add_argument('--gpu_id', dest='gpu_id', type=int)
parser.add_argument('--path_net_proto', dest='path_net_proto', type=str)
parser.add_argument('--path_net_weights', dest='path_net_weights', type=str)
parser.add_argument('--imdb_name', dest='imdb_name',type=str)
parser.add_argument('--path_cfg', dest='path_cfg',type=str)
parser.add_argument('--output_dir', dest='output_dir', type=str)
parser.add_argument('--output_file_name', dest='output_file_name', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
print 'Requiring dataset `{:s}` for training'.format(imdb_name)
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
if __name__ == '__main__':
args = parse_args()
if args.path_cfg is not None:
cfg_from_file(args.path_cfg)
cfg.GPU_ID = args.gpu_id
print 'RPN model: {}'.format(args.path_net_weights)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(args.path_net_proto, args.path_net_weights, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(args.output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_proposals_path = os.path.join(args.output_dir, args.output_file_name)
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
``` |
{
"source": "JorisHartog/sobchak",
"score": 3
} |
#### File: sobchak/sobchak/helper.py
```python
import logging
import yaml
def sigmoid(x):
"""sigmoid
A sigmoid-like function.
"""
return x / (1 + abs(x))
def get_object_by_id(objects, identifier):
"""get_object_by_id
Returns the object which belongs to the given ID. Returns None if it wasn't
found.
"""
logging.debug('Searching for %s inside %s', identifier, objects)
for obj in objects:
if obj.id == identifier or obj.name == identifier:
return obj
logging.info('Could not find %s inside %s', identifier, objects)
return None
def parse_config(filename):
"""parse_config
Load a certain YAML-file and return its contents as a dictionary.
"""
try:
with open(filename, 'r') as config:
return yaml.safe_load(config)
except Exception as e:
logging.error('Could not load %s: %s', filename, e)
exit(1)
```
#### File: sobchak/sobchak/inventory.py
```python
import logging
from sobchak.helper import get_object_by_id
from sobchak.hypervisor import CustomHypervisor
from sobchak.server import CustomServer
from sobchak.migration import Migration
class Inventory(object):
"""Inventory
An object containing Hypervisor and VM objects which are fetched using the
OpenStack API.
"""
def __init__(self, novaclient, config={}):
self._client = novaclient
self._config = config
self._hypervisors = []
self._vms = []
self._flavors = []
def to_dict(self):
"""to_dict
Returns the inventory as a dictionary.
"""
return {
'common_ratio': self.common_ratio,
'inventory': [h.to_dict() for h in self.hypervisors]
}
def snapshot(self, validate=True):
"""snapshot
Saves a snapshot of the current inventory.
"""
logging.debug('Taking snapshot')
for hypervisor in self.hypervisors:
hypervisor.snapshot(validate)
def use_snapshot(self, index=-1, validate=True):
"""use_snapshot
Reverts to the last snapshot.
"""
logging.debug('Reverting to snapshot')
for hypervisor in self.hypervisors:
hypervisor.use_snapshot(index, validate)
@property
def hypervisors(self):
"""hypervisors
Returns a list of hypervisors as CustomHypervisor objects. If it's the
first time the list is being called, the VM's are attached to their
hypervisors.
"""
if not self._hypervisors:
logging.info('Fetching hypervisor info')
self._hypervisors = [
CustomHypervisor(h, self.common_ratio, self._config)
for h in self._client.hypervisors.list()]
for vm in self.vms:
hypervisor = get_object_by_id(self._hypervisors, vm.hypervisor)
if hypervisor:
hypervisor.add_server(vm, force=True)
else:
logging.warning('Unknown hypervisor for %s (status: %s)',
vm, vm.status)
self.snapshot()
return self._hypervisors
@property
def vms(self):
"""vms
Returns a list of VM's as CustomServer objects.
"""
def _fetch_vms(client, chunksize=1000):
"""_fetch_vms
Fetches and returns a list of all servers using pagination.
"""
vms = []
listmarker = None
while True:
new_vms = client.servers.list(
search_opts={'all_tenants': True},
limit=chunksize,
marker=listmarker)
vms.extend(new_vms)
if len(new_vms) < chunksize:
break
else:
listmarker = vms[-1].id
return vms
if not self._vms:
logging.info('Fetching VM info')
self._vms = [CustomServer(vm, self.flavors)
for vm in _fetch_vms(self._client)
if vm.status != 'SHELVED_OFFLOADED']
return self._vms
@property
def enabled_hypervisors(self):
"""enabled_hypervisors
Returns a list of enabled hypervisors.
"""
return [h for h in self.hypervisors if h.enabled]
@property
def left_divergent(self):
"""left_divergent
Returns the enabled hypervisor which is the most divergent to the left
and has a negative score, so it can help. Returns None if no hypervisors
fit that profile.
"""
candidate_hypervisors = [h for h in self.enabled_hypervisors
if h.score < 0]
if candidate_hypervisors:
return max(candidate_hypervisors, key=lambda h: h.divergence[0])
else:
return None
@property
def right_divergent(self):
"""right_divergent
Returns the enabled hypervisor which is the most divergent to the right
and has a positive score, so it can help. Returns None if no hypervisors
fit that profile.
"""
candidate_hypervisors = [h for h in self.enabled_hypervisors
if h.score > 0]
if candidate_hypervisors:
return max(candidate_hypervisors, key=lambda h: h.divergence[1])
else:
return None
@property
def common_ratio(self):
"""common_ratio
Returns the most common ratio amongst all VMs.
"""
ratios = [vm.ratio for vm in self.vms]
return max(ratios, key=ratios.count)
@property
def flavors(self):
"""flavors
Returns a list of Flavors.
"""
if not self._flavors:
self._flavors = self._client.flavors.list(is_public=None)
return self._flavors
def _validate_migrations(self, migrations):
"""_validate_migrations
Validate a list of migrations on several points:
* No duplicate VMs
* Migrations - in order - are possible after last snapshot
* Same amount of VMs
* Disabled hypervisors are left alone
"""
self.use_snapshot(0)
# Check for duplicate VMs
vm_ids = [vm.id for vm in self.vms]
assert len(vm_ids) == len(set(vm_ids))
number_of_vms = len(self.vms)
# Check for valid migration list
for migration in migrations:
assert migration.source.enabled
assert migration.destination.enabled
assert migration.source.remove_server(migration.server)
assert migration.destination.add_server(migration.server)
# Check for number of VMs
assert number_of_vms == len(self.vms)
# Check for duplicate VMs
vm_ids = [vm.id for vm in self.vms]
assert len(vm_ids) == len(set(vm_ids))
logging.info('Validated migration list')
def _increase_buffer(self, hypervisor, skip_hypervisor_ids=[],
skip_server_ids=[]):
"""_increase_buffer
Returns a migration which will temporarily give a given hypervisor extra
available resources. Does not use the hypervisors given in `skip` as a
buffer.
"""
potential_buffers = [h for h in self.enabled_hypervisors
if h.id not in skip_hypervisor_ids and
h.id != hypervisor.id]
servers = [s for s in hypervisor.servers if s.id not in skip_server_ids]
buffers = reversed(sorted(potential_buffers,
key=lambda h: h.available_vcpus * h.available_ram))
sorted_servers = reversed(sorted(servers, key=lambda s: s.length))
for buff in buffers:
for server in sorted_servers:
if buff.add_server(server):
assert hypervisor.remove_server(server)
return Migration(server, hypervisor, buff)
logging.warning('Could not find available resources to migrate!')
return None
def _try_migration(self, migration):
"""_try_migration
Tries a migration and adds a migration to a buffer hypervisor if needed.
Returns a tuple containing lists of migrations and optional post
migrations.
"""
assert migration.source.remove_server(migration.server)
migrations = []
post_migrations = []
while not migration.destination.add_server(migration.server):
logging.info('Unable to migrate server %s, adding buffer.',
migration.server)
buffer_migration = self._increase_buffer(migration.destination,
skip_hypervisor_ids=[
migration.source.id],
skip_server_ids=[migration.server.id])
if buffer_migration:
migrations.append(buffer_migration)
post_migrations.append(buffer_migration.reverse)
else:
migration.source.add_server(migration.server)
return None
migrations.append(migration)
return (migrations, post_migrations)
def _plan_migrations(self, needed_migrations):
"""_plan_migrations
Takes a list of Migration objects and determines which actual migrations
need to be done to realize this (as some migrations will not be possible
due to insufficient available resources). Returns a list of Migration
objects or an empty list if it's not possible.
"""
migrations = []
skip_servers = []
for migration in needed_migrations:
if migration.server in skip_servers:
skip_servers.remove(migration.server)
continue
new_migrations = self._try_migration(migration)
if not new_migrations:
logging.warning('Could not get enough free resources.')
self.use_snapshot()
return []
new_migration, post_migrations = new_migrations
migrations.extend(new_migration)
for post_migration in post_migrations:
if post_migration.server in [m.server for m in
needed_migrations if m not in migrations]:
skip_servers.append(post_migration.server)
destinations = [m.destination for m in needed_migrations
if m.server == post_migration.server]
assert len(destinations) == 1
post_migration.destination = destinations[0]
needed_migrations.append(post_migration)
return migrations
def _score_with_vm(self, hypervisor, vm):
"""_score_with_vm
Returns the score a hypervisor would have if it hosted a given VM.
"""
if not hypervisor.add_server(vm):
return hypervisor.score
else:
score = hypervisor.score
assert hypervisor.remove_server(vm)
return score
def _mix_hypervisors(self, subject, improvement):
"""_mix_hypervisors
Takes two hypervisors (a `subject` which is to be improved and an
`improvement` which has the divergence which enables the improvement)
and mixes their VMs to improve the overall score.
Returns a list of migrations if the combined score is lowered, otherwise
returns None. Also returns None if the VMs do not fit on the two
hypervisors (e.g. due to bad scheduling).
Note that the list of migrations that is generated does not take
hypervisor resources into account, so shuffling between a third node is
needed when there's not enough free resources to migrate certain VMs.
"""
logging.info('Mixing %s and %s', subject.name, improvement.name)
score_before = abs(subject.score) + abs(improvement.score)
subject_vms = subject.pop()
improvement_vms = improvement.pop()
vms = subject_vms + improvement_vms
while vms:
best_vm = min(vms,
key=lambda vm: abs(self._score_with_vm(subject, vm)))
if not subject.add_server(best_vm):
break
vms.remove(best_vm)
for vm in vms:
if not improvement.add_server(vm):
logging.warning('Could not fit VMs in hypervisors!')
subject.servers = subject_vms
improvement.servers = improvement_vms
return None
score_after = abs(subject.score) + abs(improvement.score)
logging.info('Score from %f to %f', score_before, score_after)
if score_after >= score_before:
subject.servers = subject_vms
improvement.servers = improvement_vms
return None
return [Migration(s, improvement, subject) for s in subject.servers
if s not in subject_vms] + \
[Migration(s, subject, improvement) for s in improvement.servers
if s not in improvement_vms]
def optimize(self, migrations=[], iterations=3):
"""optimize
Generates and returns a list of migrations to improve Hypervisor
resource distribution.
"""
if iterations == 0:
return migrations
for subject in reversed(sorted(self.enabled_hypervisors,
key=lambda h: abs(h.score))):
if subject.score < 0:
improvement = self.right_divergent
else:
improvement = self.left_divergent
if not improvement:
continue
needed_migrations = self._mix_hypervisors(subject, improvement)
self.use_snapshot(validate=False)
if needed_migrations:
migrations.extend(self._plan_migrations(needed_migrations))
# Final optimization; merge successive migrations of the same VM
optimizing = True
while optimizing:
optimizing = False
for i in range(len(migrations) - 1):
if migrations[i].server == migrations[i+1].server:
optimizing = True
migrations = migrations[:i] + \
[Migration(migrations[i].server,
migrations[i].source,
migrations[i+1].destination)] + \
migrations[i+2:]
break
self.snapshot(validate=False)
self._validate_migrations(migrations)
return self.optimize(migrations=migrations,
iterations=iterations-1)
return migrations
```
#### File: sobchak/sobchak/plot.py
```python
from io import BytesIO
from matplotlib import pyplot, patches
import base64
class Plot(object):
"""Plot
Generates a graph and converts it to a base64-decoded PNG file.
"""
def __init__(self, width, height, title, xlabel, ylabel):
self.width = width
self.height = height
self._png_file = BytesIO()
pyplot.xlim(1.1 * width)
pyplot.ylim(1.1 * height)
pyplot.gca().invert_xaxis()
pyplot.gca().invert_yaxis()
pyplot.title(title)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def __del__(self):
"""__del__
Make sure the plot is cleared when this object is destructed.
"""
pyplot.clf()
def add_graph(self, x, y, label):
"""add_graph
Turn two lists representing x and y values into a plot and add it to
the graph.
"""
pyplot.plot(x[:len(y)], y[:len(x)], label=label)
def add_box(self, width, height, label=None, facecolor='none', color='b'):
"""add_box
Add a box with a given width and height of a given color (blue by
default) to the graph.
"""
rect = patches.Rectangle(
(0, 0),
width,
height,
linewidth=1,
edgecolor=color,
label=label,
facecolor=facecolor)
pyplot.gca().add_patch(rect)
@property
def png(self):
"""png
Saves the current plot to the in-memory PNG file and returns the file.
"""
pyplot.legend(loc='lower right')
pyplot.savefig(self._png_file, format='png')
return self._png_file
@property
def base64(self):
"""base64
Returns a base64-decoded string of the graph.
"""
image = self.png.getvalue()
return base64.encodestring(image).decode('utf-8')
```
#### File: sobchak/sobchak/report.py
```python
import os
import logging
class Report(object):
"""Report
A Report object generates a report in the form of a HTML-page of a list of
hypervisors and information about how certain migrations improve the
resource distribution.
"""
def __init__(self, inventory, template='template.html'):
self._inventory = inventory
self._migration_report = ''
self._template = self._fetch_template(template)
self.title = 'Migration report'
def _fetch_template(self, filename):
"""_fetch_template
Reads a template and returns the contents.
"""
try:
with open(filename, 'r') as template:
return template.read()
except Exception as e:
logging.error('Could not load %s: %s', filename, e)
exit(1)
def add_migrations(self, migrations):
"""add_migrations
Adds the migrations to the report.
"""
def code_block(c):
return '<pre><code>' + c + '</code></pre>'
migration_list = '<br />'.join([str(m) for m in migrations])
self._migration_report = code_block(migration_list)
def save(self, filename='report.html'):
"""save
Save the report as a HTML-file.
"""
with open(filename, 'w+') as f:
f.write(self.page)
print('Report available: {}'.format(os.path.abspath(filename)))
@property
def body(self):
"""body
Returns the HTML body of the report.
"""
def img_tag(i): return \
'<img width="25%" src="data:image/png;base64,{}"/>'.format(i)
body = '<h1>{}</h1>'.format(self.title)
body += '<h2>Hypervisor info</h2>'
for hypervisor in self._inventory.hypervisors:
body += img_tag(hypervisor.plot)
body += '<h2>Migration list</h2>'
body += self._migration_report
return body
@property
def page(self):
"""page
Returns the report as HTML.
"""
variables = {
'title': self.title,
'body': self.body
}
content = self._template
for key, value in variables.items():
content = content.replace('{{'+key+'}}', value)
return content
``` |
{
"source": "JorisHerbots/aioquic",
"score": 3
} |
#### File: scripts/crawl_interoprunner/crawler.py
```python
import logging, logging.config
import argparse
from json import JSONDecodeError
import os
import requests
import sys
# logging
LOG_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {
"format": "\033[1m\033[92m[%(name)s]\033[0m %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "basic",
"stream": "ext://sys.stdout"
}
},
"loggers": {},
"root": {
"level": "INFO",
"handlers": ["console"]
}
}
logging.config.dictConfig(LOG_CONFIG)
logger = logging.getLogger("Crawler")
# Constants
LOGS_URL = "https://interop.seemann.io/logs.json"
RESULTS_URL = "https://interop.seemann.io/{}/result.json" # Requires formatting: run
QLOG_URL = "https://interop.seemann.io/{}/{}_{}/{}/{}/qlog/" # Requires formatting: run, server, client, test, server or client
TXTLOG_URL = "https://interop.seemann.io/{}/{}_{}/{}/{}/log.txt"
# Argparse (flag letters are picked arbitrarily :) )
parser = argparse.ArgumentParser(description="QUIC Interop crawler (https://interop.seemann.io/) [last tested: 2020-4-30]")
parser.add_argument("--server", type=str.lower, default=None, help="Server name (case-insensitive)")
parser.add_argument("--client", type=str.lower, default=None, help="Client name (case-insensitive)")
parser.add_argument("--outdir", type=str, default="./output", help="Output directory [default=./output]")
parser.add_argument("-s", action="store_true", default=False, help="Fetch log.txt instead of the QLOG.")
parser.add_argument("-p", action="store_true", default=False, help="Setting this flag allows for selecting older interop runs [default=latest]")
parser.add_argument("-t", action="store_true", default=False, help="Collect all server interop runs for the provided client, --server is ignored (cannot be used with -u)")
parser.add_argument("-u", action="store_true", default=False, help="Collect all client interop runs for the provided server, --client is ignored (cannot be used with -t)")
parser.add_argument("-v", action="store_false", default=True, help="Disable verbose mode (debugging information)")
args = parser.parse_args()
def select_input():
data = int(input("> ")) - 1
if data < 0:
raise IndexError
return data
def select_interop_run():
try:
# This will not work good if we have >100 runs in the future ;)
logs = requests.get(LOGS_URL).json()
logs_formatted = "\n".join("{}. {}".format(i+1, logs[i]) for i in range(0, len(logs)))
logging.info("Interup run selector flag set. Pick one of the following interop runs:\n" + logs_formatted)
selected_run = select_input()
return logs[selected_run]
except requests.exceptions.RequestException as e:
logger.exception("Could not connect to interop website.", e)
except JSONDecodeError as e:
logger.exception("Output from interop runner was not valid JSON", e)
except ValueError as e:
logger.exception("Input was non integer", e)
except IndexError as e:
logger.warning("Selected undefined interop run [{}].".format(str(selected_run+1)))
return "latest"
def check_selected_implementations():
try:
interop_results = requests.get(RESULTS_URL.format(run)).json()
implementations = interop_results.get('servers')
server_valid = args.server in implementations
client_valid = args.client in implementations
client_implementation_name = args.client
server_implementation_name = args.server
if ((not args.server or not server_valid) and not args.t) or ((not args.client or not client_valid) and not args.u):
implementations_formatted = "\n".join("{}. {}".format(i+1, implementations[i]) for i in range(0, len(implementations)))
logger.info("List of available QUIC implementations for selected run:\n" + implementations_formatted)
if (not args.client or not client_valid) and not args.u:
logger.info("Select a client implementation:" if not args.client else "Invalid client name provided, select a client implementation:")
client_implementation_name = implementations[select_input()]
if (not args.server or not server_valid) and not args.t:
logger.info("Select a server implementation:" if not args.server else "Invalid server name provided, select a server implementation:")
server_implementation_name = implementations[select_input()]
return server_implementation_name, client_implementation_name, implementations
except requests.exceptions.RequestException as e:
logger.exception("Could not connect to interop website.", e)
except JSONDecodeError as e:
logger.exception("Output from interop runner was not valid JSON", e)
except TypeError as e:
logger.exception("Interop website did not return any servers?", e)
def check_output_dir():
outdir = args.outdir
if not os.path.exists(outdir):
logging.warning("Given output path [{}] does not exist, do you want to create it?".format(outdir))
create_path = input("y/n? ").strip().lower() == "y"
if create_path:
os.makedirs(outdir)
else:
logger.error("Cannot continue without output directory, halting script.")
sys.exit()
return outdir
def select_interop_test():
try:
# transfer: multiplexing and flow control
# HTTP3 : should not be needed for most, contained in transfer test case
# goodput : downloads a single, large file. Should be better indication of flow control than transfer, maybe?
# multiplexing: stress test with many small files
# zerortt
tests = ["transfer", "http3", "multiplexing", "goodput/1", "zerortt"]
logger.info("What interop test results should be crawled for?\n" + "\n".join("{}. {}".format(i+1, tests[i]) for i in range(0, len(tests))))
selected_test = select_input()
return tests[selected_test]
except ValueError as e:
logger.exception("Input was non integer", e)
except IndexError:
logger.warning("Selected undefined interop test [{}]. Cannot continue script.".format(str(selected_test + 1)))
sys.exit()
def crawl(run, server, client, implementations, interop_test, outdir):
clients_to_crawl = implementations if args.u else [client]
servers_to_crawl = implementations if args.t else [server]
perspectives = ["server", "client"]
custom_headers = {"accept": "application/json"}
for s in servers_to_crawl:
for c in clients_to_crawl:
for perspective in perspectives:
if args.s:
try:
log_url = TXTLOG_URL.format(run, s, c, interop_test, perspective)
logger.debug("Fetching {}".format(log_url))
response = requests.get(log_url, headers=custom_headers)
response.raise_for_status()
out_path = os.path.join(outdir, "test-{}_server-{}_client-{}_perspective-{}.txt".format(interop_test.replace("/", "-"), s, c, perspective))
with open(out_path, "wb") as fp:
for chunk in response.iter_content(1024):
fp.write(chunk)
logger.info("LOG for test [{}] between server [{}] and client [{}] saved to [{}].".format(interop_test, s, c, out_path))
except (TypeError, JSONDecodeError, requests.HTTPError, ValueError):
logger.warning("No LOG results found for test [{}] between server [{}] and client [{}].".format(interop_test, s, c))
else:
try:
qlog_url = QLOG_URL.format(run, s, c, interop_test, perspective)
response = requests.get(qlog_url, headers=custom_headers)
response.raise_for_status()
directory_listing = response.json()
for item in directory_listing:
if ".qlog" in item.get("name", []):
qlog_url = qlog_url + item.get("name")
logger.debug("Fetching {}".format(qlog_url))
qlog = requests.get(qlog_url, headers=custom_headers, stream=True)
out_path = os.path.join(outdir, "test-{}_server-{}_client-{}_perspective-{}.qlog".format(interop_test.replace("/", "-"), s, c, perspective))
with open(out_path, "wb") as fp:
for chunk in qlog.iter_content(1024):
fp.write(chunk)
logger.info("QLOG for test [{}] between server [{}] and client [{}] saved to [{}].".format(interop_test, s, c, out_path))
break
except (TypeError, JSONDecodeError, requests.HTTPError, ValueError):
logger.warning("No QLOG results found for test [{}] between server [{}] and client [{}].".format(interop_test, s, c))
if __name__ == "__main__":
if args.v:
logger.setLevel(logging.DEBUG)
if args.t and args.u:
logger.error("Cannot fetch all server and client runs if none were set. Args -u and -t cannot be combined.")
sys.exit()
run = "latest"
if args.p:
run = select_interop_run()
logger.info("Collecting information for run [{}].".format(run))
server, client, implementations = check_selected_implementations()
if args.u:
logger.info("Collecting all interop runs for server [{}]".format(server))
elif args.t:
logger.info("Collecting all interop runs for client [{}]".format(client))
else:
logger.info("Collecting interop runs between server [{}] and client [{}]".format(server, client))
outdir = check_output_dir()
logger.info("Output directory set to [{}]".format(outdir))
interop_test = select_interop_test()
logger.info("Results from interop test [{}] will be collected.".format(interop_test))
logger.info("Starting crawl...")
crawl(run, server, client, implementations, interop_test, outdir)
logger.info("Finished crawling, results can now be found in [{}]!".format(outdir))
``` |
{
"source": "JorisHerbots/iwevent_monitor",
"score": 3
} |
#### File: iwevent_monitor/iwevent_monitor/monitor.py
```python
import subprocess
import threading
import enum
class IweventNotInstalledException(Exception):
"""Exception thrown when iwevent is not installed on the host OS"""
pass
class UnsupportedEvent(Exception):
"""Exception thrown when trying to register a method to an unsupported iwevent"""
pass
class UncleanShutdownException(Exception):
"""Raised when the IweventMonitor.stop() method cannot cleanly halt the monitor thread"""
pass
class Iwevents(enum.Enum):
"""Currently supported list of events from iwevent"""
ASSOCIATION_NEW = 0
ASSOCIATION_LOST = 1
@classmethod
def check_value_existence(cls, enum_value):
values = [item.value for item in cls]
print(values)
if enum_value not in values:
raise ValueError("Unknown value [{}]".format(enum_value))
class IweventMonitor:
"""iwevent monitor tool
Enables event driven code based on occurrences from wireless network interfaces
Upon creating the monitoring process starts automatically in a seperate thread.
Methods are (dynamically) added to the monitor tool through the supported dectorators/method:
connect_event()
disconnect_event()
register_method_for_event(Iwevents value, method)
IweventMonitor object needs to be cleaned up with its builtin stop() method.
:param use_threading: Use threads for running event methods
:param daemonized_threads: Spawn threads as daemons
"""
def __init__(self, use_threading=True, daemonized_threads=False):
self.__check_iwevent_presence()
self.monitor_thread = threading.Thread(target=self.__iwevent_parser)
self.monitor_thread.start()
self.iwevent_process = None
self.__use_threading = use_threading
self.__daemonized_threads = daemonized_threads
self.__threads = []
self.connected_methods = {}
for event in list(Iwevents):
self.connected_methods[event.value] = []
def __check_iwevent_presence(self):
process = subprocess.run(['which', 'iwevent'], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if process.returncode is not 0:
raise IweventNotInstalledException()
def __start_method(self, method):
if not self.__use_threading:
return method()
else:
t = threading.Thread(target=method, daemon=self.__daemonized_threads)
if self.__daemonized_threads:
self.__threads.append(t)
t.start()
def __process_single_event(self, data):
data = data.lower()
if "new access point/cell" not in data:
return
if len(self.connected_methods[Iwevents.ASSOCIATION_NEW.value]) > 0 and "not-associated" not in data:
for method in self.connected_methods[Iwevents.ASSOCIATION_NEW.value]:
self.__start_method(method)
if len(self.connected_methods[Iwevents.ASSOCIATION_LOST.value]) > 0 and "not-associated" in data:
for method in self.connected_methods[Iwevents.ASSOCIATION_LOST.value]:
self.__start_method(method)
def __iwevent_parser(self):
self.iwevent_process = subprocess.Popen(['iwevent'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
line = self.iwevent_process.stdout.readline()
if line.decode("utf-8") == "" or self.iwevent_process.poll():
break
self.__process_single_event(line.decode("utf-8"))
self.iwevent_process.wait()
def stop(self):
"""Stop the monitor
:raises: UncleanShutdownException when the monitor thread cannot be killed.
"""
if self.iwevent_process:
try:
self.iwevent_process.kill()
except ProcessLookupError:
pass # Silently ignore this as the process is simply dead already
if self.monitor_thread:
self.monitor_thread.join(timeout=5)
if self.monitor_thread.is_alive(): # 5sec timeout
raise UncleanShutdownException("Could not stop iwevent monitor thread.")
if not self.__daemonized_threads:
for t in self.__threads:
t.join()
def association_new_event(self):
"""Decorator for new association events"""
def decorator(f):
self.register_method_for_event(Iwevents.ASSOCIATION_NEW, f)
return f
return decorator
def association_lost_event(self):
"""Decorator for lost association events"""
def decorator(f):
self.register_method_for_event(Iwevents.ASSOCIATION_LOST, f)
return f
return decorator
def register_method_for_event(self, event, method):
"""Register a method for a given event
IweventMonitor will execute all linked methods upon receiving the corresponding event
:param event: Iwevents enum value (it is advised to use Iwevents directly
:param method: Method to call
:raises: UnsupportedEvent whenever a wrong event type is given
"""
if isinstance(event, int) and not Iwevents.check_value_existence(event):
raise UnsupportedEvent("Event [{}] unknown.".format(event))
else:
try:
event = event.value
except ValueError:
raise UnsupportedEvent("Event [{}] unknown.".format(event))
self.connected_methods[event].append(method)
``` |
{
"source": "JorisHerbots/niip_iot_zombie_apocalypse",
"score": 3
} |
#### File: niip_iot_zombie_apocalypse/network_core/usms.py
```python
__usms_chars = [None]
__usms_chars += [chr(i) for i in range(ord("a"), ord("z") + 1)]
__usms_chars += [chr(i) for i in range(ord("0"), ord("9") + 1)]
__usms_chars += list(",?;.:/\()[]!&|@#\'\"%*-_+=<> ")
if len(__usms_chars) > 2**6:
raise RuntimeError("USMS system dictionary contains more characters than its 6bit encoding system can support!")
class UsmsCharacterOutOfRange(Exception):
pass
def print_usms_table():
"""Pretty print all possible characters in our 6bit USMS alphabet"""
print("+-----+------+")
print("| DEC | CHAR |")
print("+=====+======+")
for i in range(1, len(__usms_chars)):
print("| {}{} | {} |".format("" if i > 9 else " ", i, __usms_chars[i]))
print("+-----+------+")
def bytes_to_ascii(bytestring):
"""Decode a 6-bit USMS bytestring to ASCII string representation
:param bytestring: 6bit encoded USMS bytestring (with end-padding)
:return: ASCII string
"""
pattern = [(2, 3), (4, 15), (6, 63)] # (bits to shift, rest bit pattern)
pattern_index = 0
ascii_output = []
rest_bits = 0
for byte in bytestring:
six_bit_int_rep = (byte >> pattern[pattern_index][0]) | (rest_bits << (8 - pattern[pattern_index][0]))
rest_bits = byte & pattern[pattern_index][1]
if six_bit_int_rep not in range(0, len(__usms_chars)):
raise UsmsCharacterOutOfRange("Unknown character index [{}]".format(str(six_bit_int_rep)))
if __usms_chars[six_bit_int_rep] is not None:
ascii_output.append(__usms_chars[six_bit_int_rep])
if pattern_index == 2 and __usms_chars[rest_bits] is not None:
if rest_bits not in range(0, len(__usms_chars)):
raise UsmsCharacterOutOfRange("Unknown character index [{}]".format(str(rest_bits)))
ascii_output.append(__usms_chars[rest_bits])
pattern_index = (pattern_index + 1) % 3
return "".join(ascii_output)
def ascii_to_bytes(asciistring):
"""Encode an ASCII string to a 6bit encoded USMS bytestring with padding
:param asciistring: ASCII string
:return: 6bit encoded USMS bytestring
"""
byte_output = []
pattern = [(2, 3, 4), (4, 15, 2), (6, 0, 0)] # (bits to shift, rest bit pattern)
pattern_index = 0
for i in range(0, len(asciistring)):
int_rep = __usms_chars.index(asciistring[i]) << pattern[pattern_index][0] & 255
if pattern_index < 2:
next_int_rest = ((__usms_chars.index(asciistring[i + 1]) >> pattern[pattern_index][2]) if (i + 1) < len(asciistring) else 0) & pattern[pattern_index][1]
int_rep |= next_int_rest
else:
i += 1
byte_output.append(int_rep)
pattern_index = (pattern_index + 1) % 3
return bytes(byte_output)
if __name__ == "__main__":
print_usms_table()
print(ascii_to_bytes("abcdefghijklmnopqrstuvwxyz"))
print(bytes_to_ascii(b'\x04 \xc0\x10Q\x80\x1c\x82@(\xb3\x004\xe3\xc0A\x14\x80ME@Yv\x00e\xa0'))
print(ascii_to_bytes("eeeeee"))
print(bytes_to_ascii(b'\x14Q@\x14Q@'))
```
#### File: niip_iot_zombie_apocalypse/sensor_core/sleep.py
```python
import machine
import pycom
import utime
from exceptions import Exceptions
class Sleep:
@property
def wakeReason(self):
return machine.wake_reason()[0]
@property
def wakePins(self):
return machine.wake_reason()[1]
@property
def powerOnWake(self):
return self.wakeReason == machine.PWRON_WAKE
@property
def pinWake(self):
return self.wakeReason == machine.PIN_WAKE
@property
def RTCWake(self):
return self.wakeReason == machine.RTC_WAKE
@property
def ULPWake(self):
return self.wakeReason == machine.ULP_WAKE
@property
def isSleepWake(self):
return self.pinWake or self.RTCWake or self.ULPWake
@property
def activeTime(self):
return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)
@property
def inactiveTime(self):
return self.__inactiveTime
ACTIVE_TIME_KEY = 'activeTime'
INACTIVE_TIME_KEY = 'inactiveTime'
SLEEP_TIME_KEY = 'sleepTime'
def __init__(self):
self.__activityStart = utime.ticks_ms()
self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY)
self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY)
if not self.powerOnWake:
sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time()
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime)
self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY)
self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY)
self.__wakeUpPins = []
def __initPersistentVariable(self, key, value=0):
if (pycom.nvs_get(key) == None):
pycom.nvs_set(key, value)
def addWakeUpPin(self, pin):
# P2, P3, P4, P6, P8 to P10 and P13 to P23
if isinstance(pin, list):
self.__wakeUpPins.extend(pin)
else:
self.__wakeUpPins.append(pin)
try:
machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True)
except Exception as e:
Exceptions.error(Exception('Sleep not available: ' + str(e)))
def resetTimers(self):
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0)
pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0)
def sleep(self, milliseconds=0):
if milliseconds == 0:
milliseconds = 604800000 # 1 week
pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds)
pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart))
try:
machine.deepsleep(milliseconds)
except Exception as e:
Exceptions.error(Exception('Deepsleep not available: ' + str(e)))
def delay(self, milliseconds):
utime.sleep_ms(milliseconds)
```
#### File: niip_iot_zombie_apocalypse/utilities/volatileconfiguration.py
```python
import json
import os
class ConfigurationNotValid(Exception):
pass
class VolatileConfiguration:
"""Volatile Configuration
Provides volatile global configuration options to all libraries and code
This class can be used in a static or instantiated manner. Creating an instance will link
all set configs to the instance itself. Utilising the class directly (static) will result
in the usage of the global volatile configuration.
"""
_configuration = {}
def init(self, config=None):
"""Constructor
:param config: configuration to initialise the class instance with, defaults to None
:type config: dict, optional
:raises TypeError: When the given config is not a dictionary
"""
if config:
if not isinstance(config, dict):
raise TypeError("config parameter needs to be a dictionary | Given {}".format(config))
_configuration = config.copy() # Shallow copy, do we need a deep copy??
@classmethod
def get_full_configuration(cls):
"""Retrieve the full configuration dictionary (copy)
:return: copy of the dictionary
:rtype: dict
"""
return cls._configuration.copy()
@classmethod
def load_configuration_from_datastore(cls, name):
"""Load a configuration from file
:param name: COnfiguration name
:type name: str
:raises ConfigurationNotValid: When the config contents are not valid
:raises ConfigurationNotValid: When the config does not exist
"""
try:
with open("/flash/datastore/{}.json".format(name), "r") as fp:
cls._configuration.update(json.load(fp))
except ValueError:
raise ConfigurationNotValid("Configuration is not a valid JSON")
except: # IOError does not exist in µpython?
raise ConfigurationNotValid("Configuration file is non existing? [{}]".format(name))
@classmethod
def save_configuration_to_datastore(cls, name):
to_save_data = {}
for key in cls._configuration:
if cls._configuration[key]["can_be_saved"]:
to_save_data[key] = cls._configuration[key]
if not to_save_data:
return
try:
with open("/flash/datastore/{}.json".format(name), "w") as fp:
json.dump(to_save_data, fp)
except ValueError as e:
raise ConfigurationNotValid("Configuration could not be serialized to JSON | [{}]".format(str(e)))
except:
raise ConfigurationNotValid("Could not create configuration datastore object? [{}]".format(name))
@staticmethod
def clean_configuration_from_datastore(name):
try:
os.remove("/flash/datastore/{}.json".format(name))
except OSError:
raise ConfigurationNotValid("Datastore config [{}] non existant.".format(name))
@classmethod
def set(cls, key, value, can_be_saved=True, overwrite=True):
"""Set a configuration item
All keys are by default lowercase
:param str key:
:param value:
:param bool can_be_saved: Only keys marked with this get saved to the datastore upon request
:param bool overwrite: Will overwrite the key if it already exists
:raises TypeError: When the key is not a string
"""
if not isinstance(key, str):
raise TypeError("Key has to be of 'str' type | Given [{}]".format(type(key)))
if (key in cls._configuration and overwrite) or not key in cls._configuration:
cls._configuration[key.lower()] = {"value": value, "can_be_saved": can_be_saved}
@classmethod
def get(cls, key, default=None):
"""Retrieve a configuration item
All keys are by default lowercase
:param str key:
:param default: A default value to return; default None
:raises TypeError: When the key is not a string
"""
if not isinstance(key, str):
raise TypeError("Key has to be of 'str' type | Given [{}]".format(type(key)))
return cls._configuration.get(key)["value"] if key in cls._configuration else default
``` |
{
"source": "jorism1993/nvidia_amd_gpu_comparison",
"score": 2
} |
#### File: jorism1993/nvidia_amd_gpu_comparison/options.py
```python
import os
import argparse
import torch
import time
import random
def get_options():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Dataset options
parser.add_argument('--num_images_train', default=1e5, type=int, help='Number of images per dataset')
# Basic training options
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs during training')
parser.add_argument('--skip_batches', type=int, default=5, help='Number of batches to skip for logging')
parser.add_argument('--skip_epoch', type=int, default=1, help='Number of epochs to skip for logging')
# Model and image options
parser.add_argument('--model_type', type=str, default='resnet101', help='Type of model to use')
parser.add_argument('--img_size', type=int, default=299, help='Input image size')
# Optimizer, learning rate (scheduler) and early stopping options
parser.add_argument('--eps', type=float, default=1e-8, help='Epsilon for Adam optimizer')
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--weight_decay', type=float, default=1e-5, help='Weight decay for gradient updates')
# Cuda options
parser.add_argument('--no_cuda', action='store_true', help='Use this to train without cuda enabled')
parser.add_argument('--cuda_devices', nargs='+', default=[0])
parser.add_argument('--mixed_precision', action='store_true', help='Use mixed precision training')
# Logging options
parser.add_argument('--run_name', default='run', help='Name to identify the run')
parser.add_argument('--output_dir', default='outputs', help='Directory to write output files to')
# Misc
parser.add_argument('--seed', type=int, default=random.randint(0, 99999), help='Random seed to use')
parser.add_argument('--num_workers', type=int, default=12, help='Number of workers for dataloader')
opts = parser.parse_args()
opts.use_cuda = torch.cuda.is_available() and not opts.no_cuda
opts.cuda_devices = list(sorted([int(i) for i in opts.cuda_devices]))
opts.device = f"cuda:{opts.cuda_devices[0]}" if opts.use_cuda else "cpu"
if not opts.device == 'cpu':
torch.cuda.set_device(opts.device)
opts.run_name = "{}_{}".format(opts.run_name, time.strftime("%Y%m%dT%H%M%S"))
opts.output_dir = os.path.join(opts.output_dir, opts.model_type, opts.run_name)
os.makedirs(opts.output_dir, exist_ok=True)
return opts
```
#### File: jorism1993/nvidia_amd_gpu_comparison/utils.py
```python
import torch
import torchvision.models as tv_models
from efficientnet_pytorch import EfficientNet
def move_to(var, device):
if var is None:
return None
elif isinstance(var, dict):
return {k: move_to(v, device) for k, v in var.items()}
elif isinstance(var, list):
return [move_to(k, device) for k in var]
elif isinstance(var, tuple):
return (move_to(k, device) for k in var)
return var.to(device)
def get_model(opts):
if 'resnet' in opts.model_type or 'resnext' in opts.model_type:
model_func = getattr(tv_models, opts.model_type)
model = model_func()
elif opts.model_type == 'inception_v3':
model = tv_models.inception_v3(aux_logits=False)
elif 'efficientnet' in opts.model_type:
model = EfficientNet.from_pretrained(opts.model_type, image_size=opts.img_size)
else:
raise NotImplementedError('Invalid model type')
return model
``` |
{
"source": "JorisMeertBambrugge/trix",
"score": 3
} |
#### File: JorisMeertBambrugge/trix/stocks5_app.py
```python
import yfinance as yf
import pandas as pd
import random
import numpy as np
import os
import sys
from bokeh.plotting import figure,curdoc
from bokeh.io import show,reset_output,output_file
from bokeh.models import Column,Row,ColumnDataSource,LinearAxis, Range1d, Band,Div,Quad,TextInput,DatePicker
from bokeh.palettes import Spectral11
from bokeh.models import HoverTool
from datetime import datetime,date
###############################################################################
#####################HELP FUNCTIONS############################################
###############################################################################
def crossing(a,b):
""" returns the crossing indexes of two list-like series """
if isinstance(a,(list,pd.Series)):
crossing(np.array(a),b)
if isinstance(b,(list,pd.Series)):
crossing(a,np.array(b))
crossingIndexes=np.where(np.diff(np.signbit(a-b)))[0]
return crossingIndexes+1
# =============================================================================
# a = [-2, -1, 0, 1, 2,1,0,-1]
# b = pd.Series(data=[5,4,3,2,1,1,1,1])
# c=a-b
# print(c)
# crossingIndexes = crossing(a,b)
# print(crossingIndexes)
# =============================================================================
#a function that scrapes the dividend history from yahoo
def get_dividend(name,start='2000-01-01'):
ticker1_obj = yf.Ticker(name)
dividendList = list(ticker1_obj.dividends[start:])
dividendDateList = list(ticker1_obj.dividends[start:].index)
return {'date':dividendDateList,'dividend':dividendList}
# =============================================================================
# dividendDict=get_dividend('TUB.BR',start)#scrape the dividend data from the yahoo website
# print(dividendDict)
# =============================================================================
def fill_missing_range(df, field, range_from, range_to, range_step=1, fill_with=0):
"""Function to transform a dataframe with missing rows because one column should be a stepwise range"""
return df\
.merge(how='right', on=field,
right = pd.DataFrame({field:np.arange(range_from, range_to, range_step)}))\
.sort_values(by=field).reset_index().fillna(fill_with).drop(['index'], axis=1)
def createDivPlot(dividendDict,data,startDate):
dividendDF=pd.DataFrame(dividendDict)
dividendDF['year']=dividendDF['date'].dt.year+1#create a year column
dividendDF["yearDiv"] = dividendDF.groupby(["year"])["dividend"].transform(sum)#sum by year
dividendDF['SP']=[data.loc[date]["Close"] for date in dividendDF['date']]
dividendDF['divPercent']=[100*div/tub for div,tub in zip(dividendDF['dividend'],dividendDF["SP"])]
dividendDF=dividendDF[['date','year','yearDiv','divPercent']]#keep only what matters
dividendDF.columns=['date','year','dividend','divPercent']#rename
dividendDF = dividendDF.drop_duplicates(subset=['year'], keep='first')#drop duplicates
dividendDF=fill_missing_range(dividendDF, 'year', datetime.today().year, startDate.year, range_step=-1, fill_with=0)#add a row with zero for each year where there was no dividend given
dividendDF['date']=pd.to_datetime(dividendDF['year'].astype(str)+"-01-01",format="%Y-%m-%d",errors='raise')
if dividendDict['dividend']!=[]:
dividendSource = ColumnDataSource(data=dividendDict)
dividendDFSource = ColumnDataSource(data=dividendDF)
hover = HoverTool(tooltips=[("date","@date{%m/%d/%Y}"),("dividend","@dividend")],formatters={'@date': 'datetime'})
tools=['pan','box_zoom','wheel_zoom',hover,'reset']
divPlot=figure(width=1200,height=400,title='Historical dividend - from Yahoo Finance',x_axis_type='datetime',y_axis_label='Dividend',
y_range=(0,1.05*max(max(dividendDF['divPercent']),max(dividendDF['dividend']))),tools=tools)
divPlot.scatter(x='date',y='dividend',line_color="red",fill_color='red',size=10,alpha=0.8,name='dividend',source=dividendSource,legend_label='Dividend coupons')
divPlot.step(x='date',y='dividend',line_color="green",line_width=3,alpha=0.5,source=dividendDFSource,legend_label='Total dividend/year')
divPlot.step(x='date',y='divPercent',line_color="blue",line_width=3,alpha=0.5,source=dividendDFSource,legend_label='Dividend%')
divPlot.legend.location = "top_left"
return divPlot
else:
print(f'no dividend since {startDate.year}!')
divPlot=Div(text="no dividend since 2000! - Yahoo Finance")
return divPlot
def createBoxPlot(Filter,yAxisFilter,source,title='Boxplot',width=1400):
df=pd.DataFrame(source.data)
# generate the category list
catsColumn=list(source.data[Filter])
cats =sorted(set(catsColumn))
#get the x-axis for the dots and create jitter effect
x_axis_value=[0.5]#bokeh plots categories on x-axis like this: 0.5,1.5,2.5,..
for x in range (1,len(cats)):
x_axis_value.append(x_axis_value[-1]+1)#make a list of the different category x-axis values
x_axis=[]
for x in catsColumn:
index=cats.index(x)
x_axis.append(x_axis_value[index]+random.uniform(-0.3,0.3))#make a jitter around the x-axis value of the catergory for each datapoint
source.add(x_axis,'categorical_x_axis_value')#add a column to the datasource with the Jitter values
# find the quartiles and IQR for each category
groups = df.groupby(Filter)
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
TOOLS="pan,wheel_zoom,lasso_select,reset,save"
p = figure(tools=TOOLS, title=title, x_range=cats,width=width)
# if no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upperStem = [min([x,y]) for (x,y) in zip(list(qmax.loc[:,yAxisFilter]),upper[yAxisFilter])]
lowerStem = [max([x,y]) for (x,y) in zip(list(qmin.loc[:,yAxisFilter]),lower[yAxisFilter])]
# stems
p.segment(cats, upperStem, cats, q3[yAxisFilter], line_color="black")
p.segment(cats, lowerStem, cats, q1[yAxisFilter], line_color="black")
#create the boxes boxes
def createColorList(number=11):#create a color list for each category
colorList=[]
for x in range(0,number):
colorList.append(Spectral11[x%11])
return colorList
colorList=createColorList(number=len(cats))
p.vbar(x=cats, width=0.7, bottom=q2[yAxisFilter], top=q3[yAxisFilter], line_color="black",color=colorList)
p.vbar(cats, 0.7, q1[yAxisFilter], q2[yAxisFilter], line_color="black",color=colorList)
#add data points
#p.circle(source=source,x=Filter, y=yAxisFilter,size=5,color='black',alpha=0.3)
p.circle(source=source,x='categorical_x_axis_value', y=yAxisFilter,size=5,line_color='black',fill_alpha=0)#with Jitter and via source
# whiskers (almost-0 height rects simpler than segments)
whiskerHeight=(max(qmax[yAxisFilter])-min(qmin[yAxisFilter]))/1000
p.rect(x=cats, y=lowerStem, width=0.2, height=whiskerHeight, line_color="black",fill_color="black")
p.rect(x=cats, y=upperStem, width=0.2, height=whiskerHeight, line_color="black",fill_color="black")
return p
#get the index of the next value in a list equal to the value at startIndex
def findNextIndexOf(startIndex,timeList):
while True:
for time in timeList[startIndex+1:]:
if timeList[startIndex]==time:
index=timeList[startIndex+1:].index(time)
#print(str(timeList[startIndex])+' is found at index '+str(index+startIndex+1))
return index+startIndex+1
break
break
return False
#find the previous index in timeList with value equal to the startIndex. StartIndex needs to be larger than 0
def findPreviousIndexOf(startIndex,timeList):
if startIndex<len(timeList):
for i in range(startIndex-1,-1,-1):
if timeList[i]==timeList[startIndex]:
break
else:
i=False
else:
i=False
return(i)
#calculate the relative difference compared to the week average for a chonological list of values and a list of weeksdays with monday=0,tuesday=1,...
def getTimeVariability(timeList,values,intervalSize):
averageList=[1]*(intervalSize-1)#skip the first
for i in range(intervalSize,len(values)-intervalSize+2):
beforeStartIndex=findPreviousIndexOf(i,timeList)
afterEndIndex=findNextIndexOf(i,timeList)
intervalListBefore=values[beforeStartIndex:i-1]
intervalListAfter=values[i:afterEndIndex-1]
intervalListBefore=values[i-intervalSize:i-1]
avg=(sum(intervalListBefore)+sum(intervalListAfter))/(len(intervalListAfter)+len(intervalListBefore))
#print('the value at index '+str(i-1)+' is '+str(values[i-1]))
averageList.append(values[i-1]/avg)
for i in range(len(values)-intervalSize+1,len(values)):#skipt the last
averageList.append(1)
return averageList
values=[3,4,5,6,7,8,9,10,9,8]
timeList=[3,4,0,1,2,3,4,0,1,2]
intervalSize=5
#print(getTimeVariability(timeList,values,intervalSize))
#calculate the relative difference compared to the week average for a chonological list of values and a list of weeksdays with monday=0,tuesday=1,...
def getAverage(valuesList,sizeNumber):
averageList=[valuesList[0]]
for i in range(1,len(valuesList)):
sizeList=valuesList[max(0,i-sizeNumber):min(len(valuesList),i)]
averageList.append(sum(sizeList)/max(len(sizeList),1))
return averageList
#apply a certain buy/sell strategy on historical data
# meDirect transactie tarieven per beurs https://www.medirect.be/nl-be/tarieven-en-kosten/transactietarieven
def strategy(buySellList,sharePrice,trafficTax=0.0035,tafficCost=0.001):
"""
Function that returns the % of profit fro a given buy-sell strategy
buySellList = list of tuple indexes with (buyIndex,sellIndex)
sharePrice = list of sharePrice over time
"""
profitPercent=100
buyValue=1
for buySell in buySellList:
buyValue=round(sharePrice[buySell[0]],2)*(1+trafficTax)*(1+tafficCost)
sellValue=round(sharePrice[buySell[1]],2)*(1-trafficTax)*(1-tafficCost)
profitPercent=profitPercent*sellValue/buyValue
return round(profitPercent-100,2)
def findBuy(i,trix,EMA_on_Trix,EMA,sharePrice):
if trix[i]>EMA_on_Trix[i]:
for index in range(i,len(sharePrice)):
if trix[index]>0:
#print("trix>0")
#print(trix[index],EMA_on_Trix[index],EMA[index],sharePrice[index])
return None
elif trix[index]<0 and sharePrice[index]>EMA[index]:
#print(trix[index],EMA_on_Trix[index],EMA[index],sharePrice[index])
return index
return None
else:
return None
def findSell(i,trix,EMA_on_Trix):
for j in range(i,len(trix)):
if trix[j]<0 and trix[j]<EMA_on_Trix[j]:
return j
return None
###############################################################################
#####################BODY OF THE CODE##########################################
###############################################################################
def createView(symbol,startDate,EMA_days=200,Trix_EMA_days=39,EMA_on_Trix_days=9):
start=startDate.strftime("%Y-%m-%d") #e.g. '2017-01-01'
data=yf.download(symbol,start=start)
#print(data.keys())
#get the x-axis values: datetime
timeList=data.index.values
data['date']=timeList
data[f"EMA_{EMA_days}"]=data['Close'].ewm(span=EMA_days, adjust=False).mean()#add an exponential moving average
#calculate the Tripe Exponential Average, Trix see https://www.investopedia.com/terms/t/trix.asp
data['ema1']=data['Close'].ewm(span=Trix_EMA_days, adjust=False).mean()
data['ema2']=data['ema1'].ewm(span=Trix_EMA_days, adjust=False).mean()
data['ema3']=data['ema2'].ewm(span=Trix_EMA_days, adjust=False).mean()
data['ema3_yesterday']=data['ema3'].shift(1)
data['trix']=100*(data['ema3']-data['ema3_yesterday'])/data['ema3_yesterday']
#data['trix']=3*data['ema1']-3*data['ema2']+data['ema3']#calculate the trix, see https://en.wikipedia.org/wiki/Triple_exponential_moving_average
data['EMA_on_Trix']=data['trix'].ewm(span=EMA_on_Trix_days, adjust=False).mean()
data['zero']=0
crossIndexes=crossing(data['trix'],data['EMA_on_Trix'])#get the indexes when the trix and the ema(trix) cross
crossIndexes=[i for i in crossIndexes if i>EMA_days]#remove the indexes with data before the full EMA can be taken
#posCrossing=[i for i in crossIndexes if data['trix'][i]>data['EMA_on_Trix'][i] and data['trix'][i]<0 and data['Close'][i]>data[f"EMA_{EMA_days}"][i] ]
buySellList=[]
for i in crossIndexes[1:]:
#print("cross at ",timeList[i])
buy=findBuy(i,data['trix'],data['EMA_on_Trix'],data[f"EMA_{EMA_days}"],data['Close'])
#print("buy",buy)
if buy != None:
sell=findSell(buy,data['trix'],data['EMA_on_Trix'])
#print("sell",sell)
if sell != None:
buySellList.append((buy,sell))
else:
buySellList.append((buy,-1))
print(buySellList)
buySellList=list(dict.fromkeys(buySellList))
trixResult=strategy(buySellList,data['Close'],trafficTax=0.0035,tafficCost=0.001)
buyHoldResult=strategy([(0,-1)],data['Close'],trafficTax=0.0035,tafficCost=0.001)
resultDiv=Div(text=f"""
<p style="color:red;">Excluding dividends, these strategies would have resulted in a yield of:<br>
<b>Trix: {trixResult}%.<br>
Buy and hold: {buyHoldResult}%.<br></b></p>
Assumptions: a tax rate of 0.35% per transaction and a broker fee for 0.1% per transaction.
""")
################### PLOT THE STOCK PRICE WITH BUY AND SELL SIGNALS#########
yRangeMax=1.05*max(data['Close'])
stock_value=figure(height=350,width=1200,x_axis_type='datetime',title =f"{symbol} value (source=Yahoo finance)",
y_range=[0.95*min(data['Close']),yRangeMax])
stockSource=ColumnDataSource(data)
stock_value.line(source=stockSource,x='date',y='Close',color='black')#q line with the stock price
stock_value.line(source=stockSource,x='date',y=f"EMA_{EMA_days}",color='blue',legend_label=f"{EMA_days} days Exponential Moving Average")#200 days average
for buySell in buySellList:
stock_value.line(x=[timeList[buySell[0]],timeList[buySell[0]]],y=[0,yRangeMax],color='green')
if buySell[1] !=-1:
stock_value.line(x=[timeList[buySell[1]],timeList[buySell[1]]],y=[0,yRangeMax],color='red')
band = Quad(left=timeList[buySell[0]], right=timeList[buySell[1]], top=yRangeMax, bottom=0, fill_color="green",fill_alpha=0.1,line_width=0)
stock_value.add_glyph(band)
stock_value.legend.location = "bottom_left"
stock_value.legend.click_policy="hide"
################### A PLOT WITH THE TRIX AND ITS SIGNAL###################
signalPlot=figure(height=300,width=1200,x_axis_type='datetime',tools=['pan','box_zoom','wheel_zoom','reset'],y_range=(-0.3,0.4),x_range=stock_value.x_range,
title =f"{EMA_on_Trix_days} days EMA on Trix with {Trix_EMA_days} days")
signalPlot.line(timeList,data['trix'],color='blue',legend_label=f"{Trix_EMA_days} days Trix")#signal
signalPlot.line(timeList,data['EMA_on_Trix'],color='violet',legend_label=f"{EMA_on_Trix_days} days EMA on Trix")#signal
signalPlot.line(x=[timeList[0],timeList[-1]],y=[0,0],color='black',line_dash='dashed')#signal
for buySell in buySellList:
signalPlot.line(x=[timeList[buySell[0]],timeList[buySell[0]]],y=[-1,1],color='green')
if buySell[1] !=-1:
signalPlot.line(x=[timeList[buySell[1]],timeList[buySell[1]]],y=[-1,1],color='red')
band = Quad(left=timeList[buySell[0]], right=timeList[buySell[1]], top=1, bottom=-1, fill_color="green",fill_alpha=0.1,line_width=0)
stock_value.add_glyph(band)
############################ trading volume versus time###################
stock_volume=figure(height=300,width=1200,x_axis_type='datetime',x_range=stock_value.x_range,
title =f"{symbol} trading volume (source=Yahoo finance)")
stock_volume.vbar(x=timeList, top=data['Volume'], bottom=0, width=50000000, fill_color="#b3de69")
#######################DIVIDEND EVENTS#####################################
dividendDict=get_dividend(symbol,start=startDate)#scrape the dividend data from the yahoo website
if dividendDict['date']==[]:
dividendPlot=Div(text=f'<br>In this period, {symbol} did not pay any dividend.<br><br>',width=1200)
else:
dividendPlot=createDivPlot(dividendDict,data,startDate=startDate)
############## fluctuation depending on day of the week####################
dates = pd.DatetimeIndex(timeList) #convert to datetime format
weekdays = dates.weekday.values#get the weekdays (0=monday, 1=tuesday,...)
values=list(data['Open'])#get the values in a list
relToWeekAvg=getTimeVariability(timeList=list(weekdays),values=values,intervalSize=5)
weekdaysStrings=[]
for i in weekdays:
if i==0:
weekdaysStrings.append('1_Monday')
elif i==1:
weekdaysStrings.append('2_Tuesday')
elif i==2:
weekdaysStrings.append('3_Wednesday')
elif i==3:
weekdaysStrings.append('4_Thursday')
elif i==4:
weekdaysStrings.append('5_Friday')
elif i==5:
weekdaysStrings.append('6_Saturday')
elif i==6:
weekdaysStrings.append('7_Sunday')
sourceDays=ColumnDataSource({'ratio to week average':relToWeekAvg,'day of the week':weekdaysStrings})
weekdayBoxPlot=createBoxPlot(Filter='day of the week',yAxisFilter='ratio to week average',source=sourceDays,title='Variability depending on the day of the week',width=1200)
weekdayBoxPlot.y_range=Range1d(0.9,1.1)
################# fluctuation depending on month of the year ##############
months = dates.month.values#get the weekdays (0=monday, 1=tuesday,...)
values=list(data['Open'])#get the values in a list
relToYearAvg=getTimeVariability(timeList=list(months),values=values,intervalSize=12)
monthStrings=[]
for i in months:
if i==1:
monthStrings.append('01_Jan')
elif i==2:
monthStrings.append('02_Feb')
elif i==3:
monthStrings.append('03_Mar')
elif i==4:
monthStrings.append('04_Apr')
elif i==5:
monthStrings.append('05_May')
elif i==6:
monthStrings.append('06_Jun')
elif i==7:
monthStrings.append('07_Jul')
elif i==8:
monthStrings.append('08_Aug')
elif i==9:
monthStrings.append('09_Sep')
elif i==10:
monthStrings.append('10_Oct')
elif i==11:
monthStrings.append('11_Nov')
elif i==12:
monthStrings.append('12_Dec')
sourceMonth=ColumnDataSource({'ratio to year average':relToYearAvg,'month':monthStrings})
print(sourceMonth.data)
monthBoxPlot=createBoxPlot(Filter='month',yAxisFilter='ratio to year average',source=sourceMonth,title='Variability depending on the month of the year',width=1200)
monthBoxPlot.y_range=Range1d(0.8,1.2)
# =============================================================================
# ############## fluctuation depending on day of the month #################
# days = dates.day.values#get the weekdays (0=monday, 1=tuesday,...)
# values=list(data['Open'])#get the values in a list
# relToMonthAvg=getTimeVariability(timeList=list(days),values=values,intervalSize=27)#getWeekAverage(days,values,start=1)
# daysStrings=[str(i) if i>9 else '0'+str(i) for i in days]
#
# sourceMonth=ColumnDataSource({'ratio to month average':relToMonthAvg,'day':daysStrings})
# dayBoxPlot=createBoxPlot(Filter='day',yAxisFilter='ratio to month average',source=sourceMonth,title='Variability depending on the day of the month',width=1200)
#
# =============================================================================
################## PUT ALL TOGHETER ######################################
layout=Column(resultDiv,stock_value,signalPlot,stock_volume,dividendPlot,monthBoxPlot,weekdayBoxPlot)
return layout
###############################################################################
###################DATABASE####################################################
###############################################################################
#default settings
ticker='ABI.BR'
EMA_days='55'
Trix_EMA='39'
EMA_on_Trix='9'
startDate=date(2018,1,1)
####################VARIABLE INPUT BOXES######################################
def ticker_update(attr, old, new):
global ticker
ticker=new
updateCalback()
tickerInput=TextInput(value="ABI.BR", title="Yahoo Ticker Symbol:",width=200)
tickerInput.on_change("value", ticker_update)
def EMA_update(attr, old, new):
global EMA_days
try:
EMA_days=str(int(new))
except Exception as e:
print("exception: "+str(e)+" , in "+os.path.basename(__file__)+" on line "+str(sys.exc_info()[2].tb_lineno))
EMA_days='55'
EMA.value=EMA_days
updateCalback()
EMA=TextInput(value="55", title="Exponential moving average (days):",width=200)
EMA.on_change("value", EMA_update)
def Trix_EMA_update(attr, old, new):
global Trix_EMA
try:
Trix_EMA=str(int(new))
except Exception as e:
print("exception: "+str(e)+" , in "+os.path.basename(__file__)+" on line "+str(sys.exc_info()[2].tb_lineno))
Trix_EMA='39'
Trix_EMA_input.value=Trix_EMA
updateCalback()
Trix_EMA_input=TextInput(value="39", title="EMA in the Trix equation (days):",width=200)
Trix_EMA_input.on_change("value", Trix_EMA_update)
def EMA_on_Trix_update(attr, old, new):
global EMA_on_Trix
try:
EMA_on_Trix=str(int(new))
except Exception as e:
print("exception: "+str(e)+" , in "+os.path.basename(__file__)+" on line "+str(sys.exc_info()[2].tb_lineno))
EMA_on_Trix='9'
EMA_on_Trix_input.value=EMA_on_Trix
updateCalback()
EMA_on_Trix_input=TextInput(value="9", title="EMA applied on Trix (days):",width=200)
EMA_on_Trix_input.on_change("value", EMA_on_Trix_update)
datePicker=DatePicker(title='Select data start date:',value=startDate,min_date=date(2003,1,1),max_date=date.today(),width=200)
def dateChange(attr,old,new):
global startDate
print(new)
startDate=datetime.strptime(new,"%Y-%m-%d")
updateCalback()
datePicker.on_change('value',dateChange)
inputRow=Row(tickerInput,EMA,Trix_EMA_input,EMA_on_Trix_input,datePicker)
######EXPLANATION DIV#########################################################
infoDiv=Div(text="""
<h2>Stock autocorrelation analysis via Trix</h2>
This project is a data science and data visualization demo with python (bokeh), by <a href="mailto:<EMAIL>"><NAME></a><br>
This replicates the stock trading strategy as described by <b><NAME></b> in the edition #3 of 2021 of "<NAME>" on page 71 (<a href="https://vfb.be/onlinemagazines" target="_blank">VFB<a>).<br>
The strategy consists on calculating a <a href="https://www.investopedia.com/terms/t/trix.asp" target="_blank">Triple Exponential Average</a> of 39 days, and a 9 days Exponential Average as signal on that Trix. In addition to the Trix and it's signal Mr. Gins applies either a 200 of 55 days Exponential Moving Average on the stock prices itself.<br>
The BUY strategy: Trix < 0 & Trix crosses EMA(Trix) upwards & SP > EMA(SP)<br>
The SELL strategy: Trix < 0 & Trix < EMA(Trix)<br>
<b>This result is compared with a buy-and-hold strategy. </b><br>
The raw stock price data is pullled from the Yahoo Finance API and the Dividend data is scraped from Yahoo Finance.
""",width=1200,height=230)
animationDiv=Div(text="""<div class="loader">
<style scoped>
.loader {
width: 100px;
height: 100px;
background-color: red;
position: relative;
animation-name: example;
animation-duration: 4s;
animation-iteration-count: infinite;
}
@keyframes example {
0% {background-color:red; left:0px; top:0px;}
25% {background-color:yellow; left:1000px; top:0px;}
50% {background-color:blue; left:1000px; top:200px;}
75% {background-color:green; left:0px; top:200px;}
100% {background-color:red; left:0px; top:0px;}
}
</style></div>
""",width=1000,height=200)
def updateVisuals():
layout.children[-1]=createView(ticker, startDate=startDate,EMA_days=int(EMA_days),Trix_EMA_days=int(Trix_EMA),EMA_on_Trix_days=int(EMA_on_Trix))
def updateCalback():
layout.children[-1]=animationDiv
curdoc().add_next_tick_callback(updateVisuals)
######All togheter
layout=Column(infoDiv,inputRow,animationDiv)
def start():
graphs=createView(ticker, startDate=startDate,EMA_days=int(EMA_days),Trix_EMA_days=int(Trix_EMA),EMA_on_Trix_days=int(EMA_on_Trix))
layout.children[-1]=graphs
doc=curdoc()
doc.add_timeout_callback(start,500)#wait 500 ms before executing start
doc.add_root(layout)
doc.title="Trix"
show(layout)
``` |
{
"source": "Jorispilot/carddavclient",
"score": 3
} |
#### File: carddavclient/carddavclient/cmdline.py
```python
import argparse
import logging
from pathlib import Path
from io import StringIO
from sys import stdout
from .config import config
from .addressbook import CardDavAddressBook
__all__ = ["add_args", "process"]
def process(parser):
logger = logging.getLogger("CardDavClient")
args = parser.parse_args()
config_file = Path(args.config)
## Dump config, and exit.
if args.command == "dump-config":
dump_config(config_file, config)
return
## Read config, or create a new one.
if config_file.exists():
config.read(str(config_file))
logger.debug("Config file read: " + str(config_file))
config.check()
else:
logger.info("No config file found at " + str(config_file))
do_dump = False
do_dump = input("Should I dump one? [y/N]")
if do_dump:
dump_config(config_file, config)
return
##
if args.command == "get":
command_get(args, config)
##
if args.command == "info":
book = CardDavAddressBook(config)
book.start()
book.info(stdout)
##
if args.command == "mv":
command_mv(args, config)
##
if args.command == "put":
command_put(args, config)
##
if args.command == "rm":
command_rm(args, config)
##
if args.command == "print-config":
with StringIO() as buffer:
config.write(buffer)
buffer.seek(0)
print(buffer.read())
return
def add_args(parser):
parser.add_argument(
"--config", type=str, default="config", metavar="FILE",
help="Configuration file. (Default: config)")
subparsers = parser.add_subparsers(
dest="command", metavar="COMMAND", help="Command to execute")
subparser_dump = subparsers.add_parser(
'dump-config', help="Dump a default config file.")
subparser_get = subparsers.add_parser(
"get", help="Download vcards.")
subparser_get.add_argument("-a","--all",action="store_true",
help="Download ALL vcards.")
subparser_get.add_argument("-f","--force",action="store_true",
help="Force download. (Default: False)",
default=False)
subparser_get.add_argument("names",nargs="*",
help="List of vcard identifiers.")
subparser_info = subparsers.add_parser(
"info", help="Server information.")
subparser_mv = subparsers.add_parser(
"mv", help="Move vcards.")
subparser_mv.add_argument("orig", help="Vcard identifier.")
subparser_mv.add_argument("dest", help="Vcard identifier.")
subparser_print = subparsers.add_parser(
"print-config", help="Print config.")
subparser_put = subparsers.add_parser(
"put", help="Upload vcards.")
subparser_put.add_argument("-a","--all",action="store_true",
help="Upload ALL vcards.")
subparser_put.add_argument("-f","--force",action="store_true",
help="Force upload. (Default: False)",
default=False)
subparser_put.add_argument("names",nargs="*",
help="List of vcard identifiers.")
subparser_rm = subparsers.add_parser(
"rm", help="Remove vcards.")
subparser_rm.add_argument("-k","--keep-cache",action="store_true",
help="Keep cached vcards. (Default: False)",
default=False)
subparser_rm.add_argument("names",nargs="*",
help="List of vcards identifiers.")
def command_get(args, config):
book = CardDavAddressBook(config)
book.start()
if args.all:
get_list = book.propfind
else:
get_list = args.names
book.get(get_list, force=args.force)
def command_mv(args, config):
book = CardDavAddressBook(config)
book.start()
book.move(args.orig, args.dest)
def command_put(args, config):
book = CardDavAddressBook(config)
book.start()
if args.all:
put_list = book.cache
else:
put_list = args.names
book.put(put_list, force=args.force)
def command_rm(args, config):
book = CardDavAddressBook(config)
book.start()
book.delete(args.names, keep_cache=args.keep_cache)
def dump_config(config_file, config):
do_overwrite = False
if config_file.exists():
do_overwrite = input("The config file exists, really overwrite it? [y/N]")
do_overwrite = do_overwrite.startswith("Y")
else:
do_overwrite = True
if not config_file.exists():
config_file = Path("config")
if do_overwrite:
with config_file.open("w") as file:
config.write(file)
def read_config(config_file, config, logger):
if config_file.exists():
config.read(str(config_file))
logger.debug("Config file read: " + str(config_file))
else:
logger.info("No config file found at " + str(config_file))
``` |
{
"source": "Jorispilot/pycard",
"score": 3
} |
#### File: pycard/pycard/cmdline.py
```python
import argparse
import fileinput
from itertools import chain
from pathlib import Path
from .pycard import PyCard
__all__ = ["add_args", "process"]
def add_args(parser):
subparsers = parser.add_subparsers(
dest="command", metavar="COMMAND", help="Command to execute")
subparser_show = subparsers.add_parser(
'show', help="Show the content of files given as arguments.")
subparser_show.add_argument("filenames", metavar="filename",nargs="+",
help="Files containing vcards.")
def process(parser):
args = parser.parse_args()
if args.command == "show":
show(args)
def show(args):
filenames = fileinput.input(args.filenames)
for pyCard in PyCard.from_stream(filenames):
print(pyCard.format())
```
#### File: pycard/pycard/__main__.py
```python
import argparse
from .cmdline import add_args, process
def main():
parser = argparse.ArgumentParser()
add_args(parser)
process(parser)
if __name__ == "__main__":
main()
```
#### File: pycard/param/base.py
```python
params = dict()
class Param(object):
def __init__(self, name, value=None):
self._name = name
self._value = value
params[self.format()] = self
@property
def name(self):
return self._name
def format(self):
string = self.name
if self.value is not None:
string += "=" + self.value
return string
@property
def value(self):
return self._value
class UnknownParam(Param):
def __init__(self, name, value=None):
self._name = name
self._value = value
## Do not append the parameter to the list.
```
#### File: pycard/param/encodings.py
```python
import base64
import quopri
from .base import Param
__all__ = ["Base64Param", "QuotedPrintableParam"]
class EncodingParam(Param):
def __init__(self, value, decoder, encoder):
super().__init__("ENCODING", value)
self._decoder = decoder
self._encoder = encoder
def decode(self, text):
return self._decoder(text)
def encode(self, text):
return self._encoder(text)
Base64Param = EncodingParam(
"BASE64",
base64.standard_b64decode,
base64.standard_b64encode)
QuotedPrintableParam = EncodingParam(
"QUOTED-PRINTABLE",
quopri.decodestring,
quopri.encodestring)
```
#### File: pycard/prop/common.py
```python
from ..param import from_string as param_from_string
from .base import ListProp, StringProp, props
__all__ = ["BeginProp", "CategoriesProp", "EndProp", "UnknownProp"]
class BeginProp(StringProp):
def __init__(self, value="VCARD", params=None, groups=None):
super().__init__(value, params, groups)
@property
def authorized_params(self):
return []
name = "BEGIN"
class CategoriesProp(ListProp):
name = "CATEGORIES"
sep = ","
class EndProp(StringProp):
def __init__(self, value="VCARD", params=None, groups=None):
super().__init__(value, params, groups)
name = "END"
class UnknownProp(StringProp):
"""A generic Vcard property for non-handled properties.
Any parameters are authorized.
"""
def __init__(self, name, value=None, params=None, groups=None):
self.name = name
super().__init__(value, params, groups)
authorized_params = None
@classmethod
def _check_params(cls, params):
## Allow any parameters, expecially unknown ones.
pass
@classmethod
def from_tuple(cls, tpl):
groups, name, params, value = tpl
params = [param_from_string(p) for p in params]
value = cls.from_tuple_value(value)
return cls(name, value, params, groups)
```
#### File: pycard/prop/name.py
```python
from collections import namedtuple
from ..tools.tools import *
from .base import StringProp, TupleProp
__all__ = ["FormattedName", "Name"]
NameParts = namedtuple("NameParts", ["familly_name", "given_name",
"additional_names",
"name_prefix",
"name_suffix"])
class FormattedName(StringProp):
name = "FN"
class Name(TupleProp):
def __init__(self, value=None, params=None, groups=None):
"""The first argument must be a tuple consisting of five elements:
(familly_name, given_name, additional_names, name_prefix,
name_suffix)
An empty string serves to indicate an empty element.
"""
value = NameParts(*value)
super().__init__(value, params, groups)
name = "N"
sep = ";"
```
#### File: pycard/tools/lexer.py
```python
import re
from .base import *
__all__ = ["escape_value", "logical_lines", "split_line", "split_list", "split_param"]
def escape_value(value):
return value.replace(",","\,").replace(";","\;").replace(":","\:")
def logical_lines(stream):
"""Split the text stream agument to a sequence of logical (unfolded)
lines.
"""
lastline = next(stream)
for line in stream:
if re.match(ws, line):
## Continuation line.
lastline = lastline.rstrip(CRLF) + line.lstrip(SPACE + HTAB)
else:
## New logical line.
yield lastline
lastline = line
yield lastline
def split_line(line):
"""Split a logical line into the tuple:
name, [param0, param1, ...], value
"""
## To avoid matching escaped separators, use: "(?!\\\\):"
## but there must me need to use it.
groups_name_params, value_crlf = re.split(":", line, 1)
groups_name_params = re.split(";", groups_name_params)
groups_name, params = groups_name_params[0], groups_name_params[1:]
groups_name = re.split("\.", groups_name)
groups, name = groups_name[:-1], groups_name[-1]
value = value_crlf.rstrip(CRLF)
return groups, name, params, value
def split_list(value, sep=","):
value = re.split("(?<!\\\\)" + re.escape(sep), value)
return [val.replace("\\" + sep, sep) for val in value]
def split_param(param):
return tuple(param.split("=", 1))
``` |
{
"source": "joris-pries/DutchDraw",
"score": 2
} |
#### File: DutchDraw/DutchDraw/DutchDraw.py
```python
import math
from functools import wraps
import numpy as np
from scipy.stats import hypergeom
from tqdm import tqdm
import time
import sys
__all__ = ['select_all_names_except', 'baseline_functions', 'baseline_functions_given_theta',
'measure_score', 'measure_dictionary', 'optimized_baseline_statistics',
'round_if_close', 'select_names', 'baseline', 'classifier']
# %%
measure_dictionary = {
'TP': ['TP'],
'TN': ['TN'],
'FP': ['FP'],
'FN': ['FN'],
'TPR': ['TPR'],
'TNR': ['TNR'],
'FPR': ['FPR'],
'FNR': ['FNR'],
'PPV': ['PPV'],
'NPV': ['NPV'],
'FDR': ['FDR'],
'FOR': ['FOR'],
'ACC': ['ACC', 'ACCURACY'],
'BACC': ['BACC', 'BALANCED ACCURACY'],
'FBETA': ['FBETA', 'FSCORE', 'F', 'F BETA', 'F BETA SCORE', 'FBETA SCORE'],
'MCC': ['MCC', 'MATTHEW', 'MATTHEWS CORRELATION COEFFICIENT'],
'J': ['BM', 'BOOKMAKER INFORMEDNESS', 'INFORMEDNESS',
'YOUDEN’S J STATISTIC', 'J'],
'MK': ['MARKEDNESS', 'MK'],
'KAPPA': ['COHEN', '<NAME>', 'KAPPA'],
'FM': ['GMEAN1', 'G MEAN 1', 'G1', 'FOWLKES-MALLOWS',
'FOWLKES MALLOWS', 'FOWLKES', 'MALLOWS', 'FM'],
'G2': ['GMEAN2', 'G MEAN 2', 'G2'],
'TS': ['THREAT SCORE', 'CRITICAL SUCCES INDEX', 'TS', 'CSI']
}
def select_names(name_keys):
"""
This function creates a list of names using the name_keys as keys for the name dictionary.
"""
return sum([measure_dictionary[key_name] for key_name in name_keys], [])
def select_all_names_except(name_keys):
"""
This function creates a list of all names, except the names with name_keys
as key in the name dictionary.
"""
return sum([list_names for key_name, list_names in measure_dictionary.items()
if key_name not in name_keys], [])
def measure_score(y_true, y_pred, measure, beta=1):
"""
To determine the performance of a predictive model a measure is used.
This function determines the measure for the given input labels.
Args:
--------
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
y_pred (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the predicted labels.
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
beta (float): Default is 1. Parameter for the F-beta score.
Returns:
--------
float: The score of the given measure evaluated with the predicted and true labels.
Raises:
--------
ValueError
If `measure` is not in `select_all_names_except([''])`.
ValueError
If `y_true` or `y_pred` does not only contain zeros and ones.
See also:
--------
select_all_names_except
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_pred = random.choices((0, 1), k=10000, weights=(0.9, 0.1))
>>> y_true = random.choices((0, 1), k=10000, weights=(0.9, 0.1))
>>> print('Markedness: {:06.4f}'.format(measure_score(y_true, y_pred, measure='MK'))) # Measuring markedness (MK)
Markedness: 0.0061
>>> print('F2 Score: {:06.4f}'.format(measure_score(y_true, y_pred, measure='FBETA', beta=2))) # Measuring FBETA for beta = 2
F2 Score: 0.1053
"""
measure = measure.upper()
# convert np.array to list
if isinstance(y_true, np.ndarray):
y_true = y_true.tolist()
if isinstance(y_pred, np.ndarray):
y_pred = y_pred.tolist()
if measure not in select_all_names_except(['']):
raise ValueError("This measure name is not recognized.")
if np.unique(np.array(y_true)) not in np.array([0, 1]):
raise ValueError("y_true should only contain zeros and ones.")
if np.unique(np.array(y_pred)) not in np.array([0, 1]):
raise ValueError("y_pred should only contain zeros and ones.")
P = np.int64(sum(y_true))
M = np.int64(len(y_true))
N = np.int64(M - P)
P_predicted = sum(y_pred)
TP = np.dot(y_true, y_pred)
FP = P_predicted - TP
FN = P - TP
TN = N - FP
if measure in measure_dictionary['TP']:
return TP
if measure in measure_dictionary['TN']:
return TN
if measure in measure_dictionary['FP']:
return FP
if measure in measure_dictionary['FN']:
return FN
if measure in measure_dictionary['TPR']:
return TP / P
if measure in measure_dictionary['TNR']:
return TN / N
if measure in measure_dictionary['FPR']:
return FP / N
if measure in measure_dictionary['FNR']:
return FN / P
if measure in measure_dictionary['PPV']:
return TP / (TP + FP)
if measure in measure_dictionary['NPV']:
return TN / (TN + FN)
if measure in measure_dictionary['FDR']:
return FP / (TP + FP)
if measure in measure_dictionary['FOR']:
return FN / (TN + FN)
if measure in measure_dictionary['ACC']:
return (TP + TN) / M
if measure in measure_dictionary['BACC']:
TPR = TP / P
TNR = TN / N
return (TPR + TNR) / 2
if measure in measure_dictionary['FBETA']:
beta_squared = beta ** 2
return (1 + beta_squared) * TP / (((1 + beta_squared) * TP) + (beta_squared * FN) + FP)
if measure in measure_dictionary['MCC']:
return (TP * TN - FP * FN)/(math.sqrt((TP + FP) * (TN + FN) * P * N))
if measure in measure_dictionary['J']:
TPR = TP / P
TNR = TN / N
return TPR + TNR - 1
if measure in measure_dictionary['MK']:
PPV = TP / (TP + FP)
NPV = TN / (TN + FN)
return PPV + NPV - 1
if measure in measure_dictionary['KAPPA']:
P_o = (TP + TN) / M
P_yes = ((TP + FP) / M) * (P / M)
P_no = ((TN + FN) / M) * (N / M)
P_e = P_yes + P_no
return (P_o - P_e) / (1 - P_e)
if measure in measure_dictionary['FM']:
TPR = TP / P
PPV = TP / (TP + FP)
return math.sqrt(TPR * PPV)
if measure in measure_dictionary['G2']:
TPR = TP / P
TNR = TN / N
return math.sqrt(TPR * TNR)
if measure in measure_dictionary['TS']:
return TP / (TP + FN + FP)
def optimized_baseline_statistics(y_true, measure, beta=1, M_known = True, P_known = True):
"""
This function determines the optimal `theta` that maximizes or minimizes
the measure on the `y_true`. It also determines the corresponding extreme value.
Args:
--------
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
beta (float): Default is 1. Parameter for the F-beta score.
M_known (bool): True if knowledge of the number of samples can be used in determining optimality.
P_known (bool): True if knowledge of the number of positive labels can be used in determining optimality.
Returns:
--------
dict: Containing `Max Expected Value`, `Argmax Expected Value`, `Min Expected Value` and `Argmin Expected Value`.
- `Max Expected Value` (float): Maximum of the expected values for all `theta`.
- `Argmax Expected Value` (list): List of all `theta_star` values that maximize the expected value.
- `Min Expected Value` (float): Minimum of the expected values for all `theta`.
- `Argmin Expected Value` (list): List of all `theta_star` values that minimize the expected value.
Raises:
--------
ValueError
If the combination of M_known, P_known and measure leads to no known statistics.
ValueError
If `measure` is not in `select_all_names_except([''])`.
ValueError
If `y_true` does not only contain zeros and ones.
See also:
--------
select_all_names_except
baseline_functions
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_true = random.choices((0, 1), k=10000, weights=(0.9, 0.1))
>>> optimal_baseline = optimized_baseline_statistics(y_true, measure='FBETA', beta=1)
>>> print('Max Expected Value: {:06.4f}'.format(optimal_baseline['Max Expected Value']))
Max Expected Value: 0.1805
>>> print('Argmax Expected Value: {:06.4f}'.format(optimal_baseline['Argmax Expected Value']))
Argmax Expected Value: 1.0000
>>> print('Min Expected Value: {:06.4f}'.format(optimal_baseline['Min Expected Value']))
Min Expected Value: 0.0000
>>> print('Argmin Expected Value: {:06.4f}'.format(optimal_baseline['Argmin Expected Value']))
Argmin Expected Value: 0.0000
"""
measure = measure.upper()
if return_baseline_information(measure, M_known, P_known) == False:
raise ValueError("No known statistics in this case.")
# convert np.array to list
if isinstance(y_true, np.ndarray):
y_true = y_true.tolist()
if measure not in select_all_names_except(['']):
raise ValueError("This measure name is not recognized.")
if np.unique(np.array(y_true)) not in np.array([0, 1]):
raise ValueError("y_true should only contain zeros and ones.")
P = sum(y_true)
M = len(y_true)
N = M - P
return_statistics = {}
if measure in measure_dictionary['TP']:
return_statistics['Max Expected Value'] = P
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0]
if measure in measure_dictionary['TN']:
return_statistics['Max Expected Value'] = N
return_statistics['Argmax Expected Value'] = [0]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [1]
if measure in measure_dictionary['FN']:
return_statistics['Max Expected Value'] = P
return_statistics['Argmax Expected Value'] = [0]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [1]
if measure in measure_dictionary['FP']:
return_statistics['Max Expected Value'] = N
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0]
if measure in measure_dictionary['TPR']:
return_statistics['Max Expected Value'] = 1
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0]
if measure in measure_dictionary['TNR']:
return_statistics['Max Expected Value'] = 1
return_statistics['Argmax Expected Value'] = [0]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [1]
if measure in measure_dictionary['FNR']:
return_statistics['Max Expected Value'] = 1
return_statistics['Argmax Expected Value'] = [0]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [1]
if measure in measure_dictionary['FPR']:
return_statistics['Max Expected Value'] = 1
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0]
if measure in measure_dictionary['PPV']:
return_statistics['Max Expected Value'] = P/M
return_statistics['Argmax Expected Value'] = [
i/M for i in range(1, M + 1)]
return_statistics['Min Expected Value'] = P/M
return_statistics['Argmin Expected Value'] = [
i/M for i in range(1, M + 1)]
if measure in measure_dictionary['NPV']:
return_statistics['Max Expected Value'] = N/M
return_statistics['Argmax Expected Value'] = [i/M for i in range(0, M)]
return_statistics['Min Expected Value'] = N/M
return_statistics['Argmin Expected Value'] = [i/M for i in range(0, M)]
if measure in measure_dictionary['FDR']:
return_statistics['Max Expected Value'] = N/M
return_statistics['Argmax Expected Value'] = [
i/M for i in range(1, M + 1)]
return_statistics['Min Expected Value'] = N/M
return_statistics['Argmin Expected Value'] = [
i/M for i in range(1, M + 1)]
if measure in measure_dictionary['FOR']:
return_statistics['Max Expected Value'] = P/M
return_statistics['Argmax Expected Value'] = [i/M for i in range(0, M)]
return_statistics['Min Expected Value'] = P/M
return_statistics['Argmin Expected Value'] = [i/M for i in range(0, M)]
if measure in measure_dictionary['FBETA']:
beta_squared = beta ** 2
return_statistics['Max Expected Value'] = (
1 + beta_squared) * P / (beta_squared * P + M)
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = (1 + beta_squared) * P / (M * (beta_squared * P + 1))
return_statistics['Argmin Expected Value'] = [1/M]
if measure in measure_dictionary['J']:
return_statistics['Max Expected Value'] = 0
return_statistics['Argmax Expected Value'] = [
i/M for i in range(0, M + 1)]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [
i/M for i in range(0, M + 1)]
if measure in measure_dictionary['MK']:
return_statistics['Max Expected Value'] = 0
return_statistics['Argmax Expected Value'] = [i/M for i in range(1, M)]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [i/M for i in range(1, M)]
if measure in measure_dictionary['ACC']:
return_statistics['Max Expected Value'] = max((N/M, P/M))
return_statistics['Min Expected Value'] = min((N/M, P/M))
if P == N:
return_statistics['Argmax Expected Value'] = [i/M for i in range(0, M+1)]
return_statistics['Argmin Expected Value'] = [i/M for i in range(0, M+1)]
else:
return_statistics['Argmax Expected Value'] = [int((P >= N))]
return_statistics['Argmin Expected Value'] = [int((P < N))]
if measure in measure_dictionary['BACC']:
return_statistics['Max Expected Value'] = 0.5
return_statistics['Argmax Expected Value'] = [i/M for i in range(0, M+1)]
return_statistics['Min Expected Value'] = 0.5
return_statistics['Argmin Expected Value'] = [i/M for i in range(0, M+1)]
if measure in measure_dictionary['MCC']:
return_statistics['Max Expected Value'] = 0
return_statistics['Argmax Expected Value'] = [i/M for i in range(1, M)]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [i/M for i in range(1, M)]
if measure in measure_dictionary['KAPPA']:
return_statistics['Max Expected Value'] = 0
return_statistics['Argmax Expected Value'] = [
i/M for i in range(0, M + 1)]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [
i/M for i in range(0, M + 1)]
if measure in measure_dictionary['FM']:
return_statistics['Max Expected Value'] = math.sqrt(P / M)
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = math.sqrt(P) / M
return_statistics['Argmin Expected Value'] = [1/M]
if measure in measure_dictionary['G2']:
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0, 1]
result = [np.nan] * (M + 1)
time_to_exc = round(0.000175452 * M ** 1.8841 -0.0512485)
print("Press Control + C to stop the code")
if time_to_exc < 60:
print("Estimated time to execute is: " + str(time_to_exc) + " seconds." )
else:
time_to_exc = round(time_to_exc / 60)
if time_to_exc < 60:
print("Estimated time to execute is: " + str(time_to_exc) + " minutes." )
time_to_exc = round(time_to_exc / 60)
else:
time_to_exc_hour = round(time_to_exc / 60)
print("Estimated time to execute is: " + str(time_to_exc_hour) + " hours." )
time.sleep(2)
try:
for i in tqdm(range(0, M + 1)):
theta = i / M
rounded_m_theta = round(round(M * theta))
TP_rv = hypergeom(M=M, n=P, N=rounded_m_theta)
result[i] = sum([(math.sqrt(k * (N - rounded_m_theta + k) / (P * N))) * TP_rv.pmf(k)
if TP_rv.pmf(k) > 0 else 0 for k in range(int(max(0, rounded_m_theta - N)),
int(min((P + 1, rounded_m_theta + 1))))])
except KeyboardInterrupt:
print("\nThe code is stopped.")
print("This means that the max expected value could not be calculated.")
print("You only get the min and argmin.")
return_statistics['Max Expected Value'] = np.nan
return_statistics['Argmax Expected Value'] = [np.nan]
return return_statistics
return_statistics['Max Expected Value'] = np.nanmax(result)
return_statistics['Argmax Expected Value'] = [
i/M for i, j in enumerate(result) if j == return_statistics['Max Expected Value']]
if measure in measure_dictionary['TS']:
return_statistics['Max Expected Value'] = P / M
return_statistics['Argmax Expected Value'] = [1]
return_statistics['Min Expected Value'] = 0
return_statistics['Argmin Expected Value'] = [0]
return return_statistics
def round_if_close(x):
"""
This function is used to round x if it is close. This is useful for the pmf of the hypergeometric distribution.
"""
if math.isclose(x, round(x), abs_tol=0.000001):
return round(x)
return x
def add_check_theta_generator(measure):
"""
This is a decorator to add a ValueError to a function if theta is not in the proper interval.
"""
include_0 = True
include_1 = True
measure = measure.upper()
# Should 0 be included
if measure in select_names(['PPV', 'FDR', 'MCC', 'MK', 'FM']):
include_0 = False
# Should 1 be included
if measure in select_names(['NPV', 'FOR', 'MCC', 'MK']):
include_1 = False
def add_check_theta(func):
@wraps(func)
def inner(theta, *args, **kwargs):
if (theta > 1 or theta < 0) or (theta == 0 and not include_0) or (theta == 1 and not include_1):
raise ValueError('Theta must be in the interval ' + include_0 * '[' + (
not include_0) * '(' + '0,1' + include_1 * ']' + (not include_1) * ')')
return func(theta, *args, **kwargs)
return inner
return add_check_theta
expectation_docstring = """
Expectation function of measure.
Args:
--------
theta (float): Parameter for the shuffle baseline.
Returns:
--------
float: The expectation of the measure given `theta`.
"""
pmf_docstring = """
Probability mass function of measure.
Args:
--------
y (float): measure score
theta (float): Parameter for the shuffle baseline.
Returns:
--------
float: The probability that the measure is `y` using the shuffle approach.
"""
variance_docstring = """
Variance function of measure.
Args:
--------
theta (float): Parameter for the shuffle baseline.
Returns:
--------
float: The variance of the measure given `theta`.
"""
fast_expectation_docstring = """
Fast expectation function of measure.
Args:
--------
theta (float): Parameter for the shuffle baseline.
Returns:
--------
float: The fast expectation of the measure given `theta`.
"""
domain_docstring = """
Domain function of measure. All scores with non-zero probability.
Args:
--------
theta (float): Parameter for the shuffle baseline.
Returns:
--------
list: List of all scores with non-zero probability.
"""
def add_docstring(docstring):
"""
This function is used to set a docstring of a function
"""
def _add_docstring(func):
func.__doc__ = docstring
return func
return _add_docstring
def baseline_functions(y_true, measure, beta=1, M_known = True, P_known = True):
"""
This function returns a dictionary of functions that can be used to determine
statistics (such as expectation and variance) for all possible values of `theta`.
Args:
--------
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
beta (float): Default is 1. Parameter for the F-beta score.
M_known (bool): True if knowledge of the number of samples can be used in determining optimality.
P_known (bool): True if knowledge of the number of positive labels can be used in determining optimality.
Returns:
--------
dict: Containing `Distribution`, `Domain`, `(Fast) Expectation Function` and `Variance Function`.
- `Distribution` (function): Pmf of the measure, given by: `pmf_Y(y, theta)`, where `y` is a measure score and `theta` is the parameter of the shuffle baseline.
- `Domain` (function): Function that returns attainable measure scores with argument `theta`.
- `(Fast) Expectation Function` (function): Expectation function of the baseline with `theta` as argument. If `Fast Expectation Function` is returned, there exists a theoretical expectation that can be used for fast computation.
- `Variance Function` (function): Variance function for all values of `theta`.
Raises:
--------
ValueError
If the combination of M_known, P_known and measure leads to no known statistics.
ValueError
If `measure` is not in `select_all_names_except([''])`.
ValueError
If `y_true` does not only contain zeros and ones.
See also:
--------
select_all_names_except
select_names
round_if_close
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_true = random.choices((0, 1), k=10000, weights=(0.9, 0.1))
>>> baseline = baseline_functions(y_true, 'MK')
>>> print(baseline.keys())
dict_keys(['Distribution', 'Domain', 'Fast Expectation Function', 'Variance Function', 'Expectation Function'])
"""
measure = measure.upper()
# convert np.array to list
if isinstance(y_true, np.ndarray):
y_true = y_true.tolist()
if measure not in select_all_names_except(['']):
raise ValueError("This measure name is not recognized.")
if np.unique(np.array(y_true)) not in np.array([0, 1]):
raise ValueError("y_true should only contain zeros and ones.")
P = sum(y_true)
M = len(y_true)
N = M - P
# Used to return all functions
return_functions = {}
# Used to generate pmf functions
def generate_hypergeometric_distribution(a, b):
@add_docstring(pmf_docstring)
@add_check_theta_generator(measure)
def pmf_Y(y, theta):
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
# Use round_if_close function, because of small computation errors in python
return TP_rv.pmf(round_if_close((y - b) / a))
return pmf_Y
# Used to generate variance functions
def generate_variance_function(a):
@add_docstring(variance_docstring)
@add_check_theta_generator(measure)
def variance_function(theta):
theta_star = round(theta * M) / M
rounded_m_theta = round(theta * M)
var_tp = (theta_star * (1 - theta_star) * P * N) / (M - 1)
return (eval(a) ** 2) * var_tp
return variance_function
# Used to generate expectation functions
def generate_expectation_function(a, b):
@add_docstring(expectation_docstring)
@add_check_theta_generator(measure)
def expectation_function(theta):
theta_star = round(theta * M) / M
rounded_m_theta = round(theta * M)
mean_tp = theta_star * P
return eval(a) * mean_tp + eval(b)
return expectation_function
# Used to generate fast expectation functions. The expectation string is used to alter the function.
def generate_fast_expectation_function(expectation_string):
@add_docstring(fast_expectation_docstring)
@add_check_theta_generator(measure)
def fast_expectation_function(theta):
theta_star = round(theta * M) / M
return eval(expectation_string)
return fast_expectation_function
# Used to generate domain functions
def generate_domain_function(a, b):
@add_docstring(domain_docstring)
@add_check_theta_generator(measure)
def domain_function(theta):
theta_star = round(theta * M) / M
rounded_m_theta = round(theta * M)
return [(eval(a) * x) + eval(b) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))]
return domain_function
# Used to generate domain function for TS and G2.
def generate_domain_function_given_x(given_x_function):
@add_check_theta_generator(measure)
def domain_function(theta):
rounded_m_theta = round(theta * M)
return np.unique([given_x_function(x, theta) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))])
return domain_function
if measure in measure_dictionary['TP']:
a = '1'
b = '0'
expectation_string = 'theta_star * ' + str(P)
if measure in measure_dictionary['TN']:
a = '1'
b = str(N) + ' - rounded_m_theta'
expectation_string = '(1 - theta_star) * ' + str(N)
if measure in measure_dictionary['FP']:
a = '-1'
b = 'rounded_m_theta'
expectation_string = 'theta_star * ' + str(N)
if measure in measure_dictionary['FN']:
a = '-1'
b = str(P)
expectation_string = '(1 - theta_star) * ' + str(P)
if measure in measure_dictionary['TPR']:
a = '1 / ' + str(P)
b = '0'
expectation_string = 'theta_star'
if measure in measure_dictionary['TNR']:
a = '1 / ' + str(N)
b = '(' + str(N) + ' - rounded_m_theta) / ' + str(N)
expectation_string = '1 - theta_star'
if measure in measure_dictionary['FPR']:
a = '-1 / ' + str(N)
b = 'rounded_m_theta / ' + str(N)
expectation_string = 'theta_star'
if measure in measure_dictionary['FNR']:
a = '-1 / ' + str(P)
b = '1'
expectation_string = '1 - theta_star'
if measure in measure_dictionary['PPV']:
a = '1 / rounded_m_theta'
b = '0'
expectation_string = str(P) + ' / ' + str(M)
if measure in measure_dictionary['NPV']:
a = '1 / (' + str(M) + ' - rounded_m_theta)'
b = '(' + str(N) + ' - rounded_m_theta) / (' + \
str(M) + ' - rounded_m_theta)'
expectation_string = str(N) + ' / ' + str(M)
if measure in measure_dictionary['FDR']:
a = '-1 / rounded_m_theta'
b = '1'
expectation_string = str(N) + ' / ' + str(M)
if measure in measure_dictionary['FOR']:
a = '-1 / (' + str(M) + ' - rounded_m_theta)'
b = '1 - ((' + str(N) + ' - rounded_m_theta) / (' + \
str(M) + ' - rounded_m_theta))'
expectation_string = str(P) + ' / ' + str(M)
if measure in measure_dictionary['ACC']:
a = '2 / ' + str(M)
b = '(' + str(N) + ' - rounded_m_theta) / ' + str(M)
expectation_string = '((1 - theta_star) * ' + str(N) + \
' + (theta_star * ' + str(P) + ')) / ' + str(M)
if measure in measure_dictionary['BACC']:
a = '(1 / (2 * ' + str(P) + ')) + (1 / (2 * ' + str(N) + '))'
b = '(' + str(N) + ' - rounded_m_theta) / (2 * ' + str(N) + ')'
expectation_string = '1 / 2'
if measure in measure_dictionary['FBETA']:
a = '(1 + (' + str(beta) + ' ** 2)) / ((' + str(beta) + \
' ** 2) * ' + str(P) + ' + ' + str(M) + ' * theta_star)'
b = '0'
expectation_string = '((1 + (' + str(beta) + ' ** 2)) * theta_star * ' + str(
P) + ') / (' + str(beta) + ' ** 2) * ' + str(P) + ' + ' + str(M) + ' * theta_star)'
if measure in measure_dictionary['MCC']:
a = '1 / (math.sqrt(theta_star * (1 - theta_star) * ' + \
str(P) + ' * ' + str(N) + '))'
b = '- theta_star * ' + \
str(P) + ' / (math.sqrt(theta_star * (1 - theta_star) * ' + \
str(P) + ' * ' + str(N) + '))'
expectation_string = '0'
if measure in measure_dictionary['J']:
a = '(1 / ' + str(P) + ') + (1 / ' + str(N) + ')'
b = '- rounded_m_theta / ' + str(N)
expectation_string = '0'
if measure in measure_dictionary['MK']:
a = '(1 / rounded_m_theta) + (1 / (' + str(M) + ' - rounded_m_theta))'
b = '-' + str(P) + ' / (' + str(M) + ' - rounded_m_theta)'
expectation_string = '0'
if measure in measure_dictionary['KAPPA']:
a = '2 / ((1 - theta_star) * ' + str(P) + \
' + theta_star * ' + str(N) + ')'
b = '- 2 * theta_star * ' + \
str(P) + ' / ((1 - theta_star) * ' + \
str(P) + ' + theta_star * ' + str(N) + ')'
expectation_string = '0'
if measure in measure_dictionary['FM']:
a = '1 / (math.sqrt(' + str(P) + ' * rounded_m_theta))'
b = '0'
expectation_string = 'math.sqrt(theta_star * ' + \
str(P) + ' / ' + str(M) + ')'
if measure in measure_dictionary['G2']:
@add_docstring(pmf_docstring)
@add_check_theta_generator(measure)
def pmf_Y(y, theta):
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
rounded_m_theta = round(theta * M)
help_constant = math.sqrt(
(rounded_m_theta ** 2) - 2 * rounded_m_theta * N + (N ** 2) + 4 * P * N * (y ** 2))
value_1 = (1/2) * ((- help_constant) + rounded_m_theta - N)
value_2 = (1/2) * (help_constant + rounded_m_theta - N)
return TP_rv.pmf(round_if_close(value_1)) + TP_rv.pmf(round_if_close(value_2))
def given_x_function(x, theta):
rounded_m_theta = round(theta * M)
return math.sqrt((x / P) * ((N - rounded_m_theta + x) / N))
@add_docstring(expectation_docstring)
@add_check_theta_generator(measure)
def expectation_function(theta):
rounded_m_theta = round(theta * M)
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
return sum([TP_rv.pmf(x) * given_x_function(x, theta) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))])
@add_docstring(variance_docstring)
@add_check_theta_generator(measure)
def variance_function(theta):
rounded_m_theta = round(theta * M)
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
return sum([TP_rv.pmf(x) * (given_x_function(x, theta) ** 2) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))])
if measure in measure_dictionary['TS']:
@add_docstring(pmf_docstring)
@add_check_theta_generator(measure)
def pmf_Y(y, theta):
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
rounded_m_theta = round(theta * M)
return TP_rv.pmf(round_if_close((y * (P + rounded_m_theta)) / (1 + y)))
def given_x_function(x, theta):
rounded_m_theta = round(theta * M)
if P + rounded_m_theta - x == 0:
return 0
return x / (P + rounded_m_theta - x)
@add_docstring(expectation_docstring)
@add_check_theta_generator(measure)
def expectation_function(theta):
rounded_m_theta = round(theta * M)
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
return sum([TP_rv.pmf(x) * given_x_function(x, theta) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))])
@add_docstring(variance_docstring)
@add_check_theta_generator(measure)
def variance_function(theta):
rounded_m_theta = round(theta * M)
TP_rv = hypergeom(M=M, n=P, N=round(theta * M))
return sum([TP_rv.pmf(x) * (given_x_function(x, theta) ** 2) for x in range(int(max(0, rounded_m_theta - N)), int(min((P + 1, rounded_m_theta + 1))))])
if measure in select_names(['G2', 'TS']):
return_functions['Distribution'] = pmf_Y
return_functions['Expectation Function'] = expectation_function
return_functions['Variance Function'] = variance_function
return_functions['Domain'] = generate_domain_function_given_x(
given_x_function)
if measure in select_all_names_except(['G2', 'TS']):
return_functions['Distribution'] = generate_hypergeometric_distribution(
a, b)
return_functions['Domain'] = generate_domain_function(a, b)
return_functions['Fast Expectation Function'] = generate_fast_expectation_function(
expectation_string)
return_functions['Variance Function'] = generate_variance_function(a)
return_functions['Expectation Function'] = generate_expectation_function(
a, b)
return return_functions
def baseline_functions_given_theta(theta, y_true, measure, beta=1, M_known = True, P_known = True):
"""
This function determines the mean and variance of the baseline for a given `theta` using `baseline_functions`.
Args:
--------
theta (float): Parameter for the shuffle baseline.
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
beta (float): Default is 1. Parameter for the F-beta score.
M_known (bool): True if knowledge of the number of samples can be used in determining optimality.
P_known (bool): True if knowledge of the number of positive labels can be used in determining optimality.
Returns:
--------
dict: Containing `Mean` and `Variance`
- `Mean` (float): Expected baseline given `theta`.
- `Variance` (float): Variance baseline given `theta`.
Raises:
--------
ValueError
If the combination of M_known, P_known and measure leads to no known statistics.
See also:
--------
baseline_functions
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_true = random.choices((0, 1), k=10000, weights=(0.9, 0.1))
>>> baseline = baseline_functions_given_theta(theta= 0.9, y_true=y_true, measure='FBETA', beta=1)
>>> print('Mean: {:06.4f} and Variance: {:06.4f}'.format(baseline['Mean'], baseline['Variance']))
Mean: 0.1805 and Variance: 0.0000
"""
baseline = baseline_functions(y_true=y_true,
measure=measure, beta=beta)
return {'Mean': baseline['Expectation Function'](theta), 'Variance': baseline['Variance Function'](theta)}
# %%
def return_baseline_information(measure = '', M_known = True, P_known = True):
if measure in select_names(['ACC']) and (P_known == False or M_known == False):
return False
if measure in select_names(['FM', 'FBETA']) and M_known == False and P_known == False:
return False
else:
return True
def baseline(y_true, measure= '', theta = 'optimal', M_known = True, P_known = True, beta = 1):
"""
Statistics/information about the Dutch Draw baseline, combining the functions: optimized_baseline_statistics, baseline_functions, baseline_functions_given_theta.
Args:
--------
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
theta (float or string):
- 'optimal' (default): statistics of the optimal baseline are returned. (See `optimized_baseline_statistics`).
- 'all': functions of the baseline are returned for all theta. (See `baseline_functions`).
- float: statistics of the baseline for this given `theta`. (See `baseline_functions_given_theta`).
M_known (bool): True if knowledge of the number of samples can be used in determining optimality.
P_known (bool): True if knowledge of the number of positive labels can be used in determining optimality.
beta (float): Default is 1. Parameter for the F-beta score.
Returns:
--------
Dependent on theta. See `optimized_baseline_statistics`, `baseline_functions` and `baseline_functions_given_theta`.
Raises:
--------
ValueError
If `M_known` is False and `P_known` is True
See also:
--------
optimized_baseline_statistics
baseline_functions
baseline_functions_given_theta
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_true = random.choices((0, 1), k=1000, weights=(0.9, 0.1))
>>> stats = baseline(y_true, measure = 'ACC', theta = 'optimal')
>>> print(stats)
{'Max Expected Value': 0.888, 'Min Expected Value': 0.112, 'Argmax Expected Value': [0], 'Argmin Expected Value': [1]}
>>> stats = baseline(y_true, measure = 'FBETA', theta = 0.2)
>>> print(stats)
{'Mean': 0.1435897435897436, 'Variance': 0.0006545401417196289}
>>> stats = baseline(y_true, measure = 'TS', theta = 'all')
>>> print(stats["Expectation Function"](0.5)) #Function depends on theta, here 0.5.
0.10080806593812942
"""
if M_known == False and P_known == True:
raise ValueError("This case has not been investigated. If M is unknown, P must also be unknown.")
if theta == 'optimal':
return optimized_baseline_statistics(y_true, measure, beta, M_known = True, P_known = True)
elif theta == 'all':
return baseline_functions(y_true, measure, beta, M_known = True, P_known = True)
else:
return baseline_functions_given_theta(theta, y_true, measure, beta, M_known = True, P_known = True)
def generate_y_true(M, P):
return [1] * P + [0] * (M - P)
def classifier(y_true=None, theta='max', measure='', beta = 1,
M_known = True, P_known = True, E_P_x_E_N = None):
"""
This function gives the outcome of the Dutch Draw classifier given some parameters
Args:
--------
y_true (list or numpy.ndarray): 1-dimensional boolean list/numpy.ndarray containing the true labels.
theta (float): Parameter for the shuffle baseline. Can be a float between 0 or 1 or
it can be the optimal theta (min or max).
measure (string): Measure name, see `select_all_names_except([''])` for possible measure names.
beta (float): Default is 1. Parameter for the F-beta score.
M_known (bool): True if knowledge of the number of samples can be used in determining optimality.
P_known (bool): True if knowledge of the number of positive labels can be used in determining optimality.
E_P_x_E_N (string): With this parameter, if we do not know P, we can still say something about P.
The x shows whether or not the expected P is bigger (>), smaller (<) or equal (=) to the expected number of
negatives. If this is unknown, we can set it None.
Returns:
--------
y_pred: prediction 1-dimensional boolean containing predicted labels of the Dutch Draw.
Raises:
--------
ValueError
If `y_true' is not a list consisting of zeros and ones.
ValueError
If 'theta' is not a float between zero and one or "max" or "min".
ValueError
If `measure' is not considered.
ValueError
If `M_known' is False and `P_known' is True.
ValueError
If `beta' is negative.
ValueError
If `E_P_x_E_N' is not None, <, = or >.
See also:
--------
optimized_baseline_statistics
Example:
--------
>>> import random
>>> random.seed(123) # To ensure similar outputs
>>> y_true = random.choices((0, 1), k=1000, weights=(0.9, 0.1))
>>> y_pred = classifier(y_true=y_true, theta = "max", measure='ACC',
P_known = False, E_P_x_E_N = ">")
>>> print("Length y_pred:", len(y_pred), ", number of positives:", np.sum(y_pred))
Length y_pred: 1000 , number of positives: 1000
>>> y_pred = classifier(y_true=y_true, theta = "min", measure='TS')
>>> print("Length y_pred:", len(y_pred), ", number of positives:", np.sum(y_pred))
Length y_pred: 1000 , number of positives: 0
"""
if y_true is None :
raise ValueError("y_true must be given")
if isinstance(y_true, np.ndarray):
y_true = y_true.tolist()
if np.unique(np.array(y_true)) not in np.array([0, 1]):
raise ValueError("y_true should only contain zeros and ones.")
if isinstance(theta, float):
if theta < 0 or theta > 1:
raise ValueError("theta must be between 0 and 1.")
else:
if not theta in ["min","max"]:
raise ValueError("theta must be float, 'min' or 'max'.")
if measure not in select_all_names_except(['']):
raise ValueError("This measure name is not recognized.")
if M_known == False and P_known == True:
raise ValueError("This case has not been investigated. If M is unknown, P must also be unknown.")
if beta < 0:
raise ValueError("beta must be positive or 0.")
if not E_P_x_E_N in [None, "<","=",">"]:
raise ValueError("Variable E_P_x_E_N contains non-ommited value.")
M = len(y_true)
if isinstance(theta, float):
return [1] * round(M * theta) + [0] * round(M * ( 1- theta) )
if measure == "FM" or measure == "FBETA":
if not M_known and not P_known:
if theta == "max":
return [1] * M
if theta == "min":
return [1] + [0] * (M - 1)
if measure == "ACC":
if not M_known and not P_known:
if theta == "max":
y_pred = []
while len(y_pred) < M:
y_pred.append(0)
y_pred.append(1)
return y_pred[:M]
if theta == "min":
return [1] * M
if M_known and not P_known:
if theta == "max":
if E_P_x_E_N == None :
y_pred = [1] * math.ceil(M * 0.5) + [0] * math.ceil(M * 0.5)
return y_pred[:M]
if E_P_x_E_N in ["<","="]:
return [0] * M
if E_P_x_E_N == ">":
return [1] * M
if theta == "min":
if E_P_x_E_N in [None,">"]:
return [0] * M
if E_P_x_E_N in ["<","="]:
return [1] * M
if theta == "max":
t = optimized_baseline_statistics(y_true, measure, beta)["Argmax Expected Value"][0]
if theta == "min":
t = optimized_baseline_statistics(y_true, measure, beta)["Argmin Expected Value"][0]
return [1] * round(M * t) + [0] * round(M * (1 - t))
#%%
``` |
{
"source": "joris-pries/Official_Dependency_Function",
"score": 3
} |
#### File: bp_feature_importance/DeepKnockoffs/machine.py
```python
import os
import sys
import math
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from DeepKnockoffs.mmd import mix_rbf_mmd2_loss
np.warnings.filterwarnings('ignore')
def covariance_diff_biased(X, Xk, SigmaHat, Mask, scale=1.0):
""" Second-order loss function, as described in deep knockoffs manuscript
:param X: input data
:param Xk: generated knockoffs
:param SigmaHat: target covariance matrix
:param Mask: masking the diagonal of Cov(X,Xk)
:param scale: scaling the loss function
:return: second-order loss function
"""
# Center X,Xk
mX = X - torch.mean(X,0,keepdim=True)
mXk = Xk - torch.mean(Xk,0,keepdim=True)
# Compute covariance matrices
SXkXk = torch.mm(torch.t(mXk),mXk)/mXk.shape[0]
SXXk = torch.mm(torch.t(mX),mXk)/mXk.shape[0]
# Compute loss
T = (SigmaHat-SXkXk).pow(2).sum() / scale
T += (Mask*(SigmaHat-SXXk)).pow(2).sum() / scale
return T
def create_checkpoint_name(pars):
""" Defines the filename of the network
:param pars: training hyper-parameters
:return: filename composed of the hyper-parameters
"""
checkpoint_name = 'net'
for key, value in pars.items():
checkpoint_name += '_' + key
if key == 'alphas':
for i in range(len(pars['alphas'])):
checkpoint_name += '_' + str(pars['alphas'][i])
else:
checkpoint_name += '_' + str(value)
return checkpoint_name
def save_checkpoint(state, filename):
""" Saves the most updatated network to filename and store the previous
machine in filename + _prev.pth.tar' file
:param state: training state of the machine
:filename: filename to save the current machine
"""
# keep the previous model
if os.path.isfile(filename):
os.rename(filename, filename + '_prev.pth.tar')
# save new model
torch.save(state, filename)
def gen_batches(n_samples, batch_size, n_reps):
""" Divide input data into batches.
:param data: input data
:param batch_size: size of each batch
:return: data divided into batches
"""
batches = []
for rep_id in range(n_reps):
idx = np.random.permutation(n_samples)
for i in range(0, math.floor(n_samples/batch_size)*batch_size, batch_size):
window = np.arange(i,i+batch_size)
new_batch = idx[window]
batches += [new_batch]
return(batches)
class Net(nn.Module):
""" Deep knockoff network
"""
def __init__(self, p, dim_h, family="continuous"):
""" Constructor
:param p: dimensions of data
:param dim_h: width of the network (~6 layers are fixed)
:param family: data type, either "continuous" or "binary"
"""
super(Net, self).__init__()
self.p = p
self.dim_h = dim_h
if (family=="continuous"):
self.main = nn.Sequential(
nn.Linear(2*self.p, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h),
nn.PReLU(),
nn.Linear(self.dim_h, self.p),
)
elif (family=="binary"):
self.main = nn.Sequential(
nn.Linear(2*self.p, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.dim_h, bias=False),
nn.BatchNorm1d(self.dim_h, eps=1e-02),
nn.PReLU(),
nn.Linear(self.dim_h, self.p),
nn.Sigmoid(),
nn.BatchNorm1d(self.p, eps=1e-02),
)
else:
sys.exit("Error: unknown family");
def forward(self, x, noise):
""" Sample knockoff copies of the data
:param x: input data
:param noise: random noise seed
:returns the constructed knockoffs
"""
x_cat = torch.cat((x,noise),1)
x_cat[:,0::2] = x
x_cat[:,1::2] = noise
return self.main(x_cat)
def norm(X, p=2):
if(p==np.inf):
return(torch.max(torch.abs(X)))
else:
return(torch.norm(X,p))
class KnockoffMachine:
""" Deep Knockoff machine
"""
def __init__(self, pars, checkpoint_name=None, logs_name=None):
""" Constructor
:param pars: dictionary containing the following keys
'family': data type, either "continuous" or "binary"
'p': dimensions of data
'epochs': number of training epochs
'epoch_length': number of iterations over the full data per epoch
'batch_size': batch size
'test_size': size of test set
'lr': learning rate for main training loop
'lr_milestones': when to decrease learning rate, unused when equals to number of epochs
'dim_h': width of the network
'target_corr': target correlation between variables and knockoffs
'LAMBDA': penalty encouraging second-order knockoffs
'DELTA': decorrelation penalty hyper-parameter
'GAMMA': penalty for MMD distance
'alphas': kernel widths for the MMD measure (uniform weights)
:param checkpoint_name: location to save the machine
:param logs_name: location to save the logfile
"""
# architecture parameters
self.p = pars['p']
self.dim_h = pars['dim_h']
self.family = pars['family']
# optimization parameters
self.epochs = pars['epochs']
self.epoch_length = pars['epoch_length']
self.batch_size = pars['batch_size']
self.test_size = pars['test_size']
self.lr = pars['lr']
self.lr_milestones = pars['lr_milestones']
# loss function parameters
self.alphas = pars['alphas']
self.target_corr = torch.from_numpy(pars['target_corr']).float()
self.DELTA = pars['DELTA']
self.GAMMA = pars['GAMMA']
self.LAMBDA = pars['LAMBDA']
# noise seed
self.noise_std = 1.0
self.dim_noise = self.p
# higher-order discrepency function
self.matching_loss = mix_rbf_mmd2_loss
self.matching_param = self.alphas
# Normalize learning rate to avoid numerical issues
self.lr = self.lr / np.max([self.DELTA, self.GAMMA, self.GAMMA, self.LAMBDA, 1.0])
self.pars = pars
if checkpoint_name == None:
self.checkpoint_name = None
self.best_checkpoint_name = None
else:
self.checkpoint_name = checkpoint_name + "_checkpoint.pth.tar"
self.best_checkpoint_name = checkpoint_name + "_best.pth.tar"
if logs_name == None:
self.logs_name = None
else:
self.logs_name = logs_name
self.resume_epoch = 0
# init the network
self.net = Net(self.p, self.dim_h, family=self.family)
def compute_diagnostics(self, X, Xk, noise, test=False):
""" Evaluates the different components of the loss function
:param X: input data
:param Xk: knockoffs of X
:param noise: allocated tensor that is used to sample the noise seed
:param test: compute the components of the loss on train (False) or test (True)
:return diagnostics: a dictionary containing the following keys:
'Mean' : distance between the means of X and Xk
'Corr-Diag': correlation between X and Xk
'Corr-Full: ||Cov(X,X) - Cov(Xk,Xk)||_F^2 / ||Cov(X,X)||_F^2
'Corr-Swap: ||M(Cov(X,X) - Cov(Xk,Xk))||_F^2 / ||Cov(X,X)||_F^2
where M is a mask that excludes the diagonal
'Loss': the value of the loss function
'MMD-Full': discrepancy between (X',Xk') and (Xk'',X'')
'MMD-Swap': discrepancy between (X',Xk') and (X'',Xk'')_swap(s)
"""
# Initialize dictionary of diagnostics
diagnostics = dict()
if test:
diagnostics["Data"] = "test"
else:
diagnostics["Data"] = "train"
##############################
# Second-order moments
##############################
# Difference in means
D_mean = X.mean(0) - Xk.mean(0)
D_mean = (D_mean*D_mean).mean()
diagnostics["Mean"] = D_mean.data.cpu().item()
# Center and scale X, Xk
mX = X - torch.mean(X,0,keepdim=True)
mXk = Xk - torch.mean(Xk,0,keepdim=True)
scaleX = (mX*mX).mean(0,keepdim=True)
scaleXk = (mXk*mXk).mean(0,keepdim=True)
# Correlation between X and Xk
scaleX[scaleX==0] = 1.0 # Prevent division by 0
scaleXk[scaleXk==0] = 1.0 # Prevent division by 0
mXs = mX / torch.sqrt(scaleX)
mXks = mXk / torch.sqrt(scaleXk)
corr = (mXs*mXks).mean()
diagnostics["Corr-Diag"] = corr.data.cpu().item()
# Cov(Xk,Xk)
Sigma = torch.mm(torch.t(mXs),mXs)/mXs.shape[0]
Sigma_ko = torch.mm(torch.t(mXks),mXks)/mXk.shape[0]
DK_2 = norm(Sigma_ko-Sigma) / norm(Sigma)
diagnostics["Corr-Full"] = DK_2.data.cpu().item()
# Cov(Xk,X) excluding the diagonal elements
SigIntra_est = torch.mm(torch.t(mXks),mXs)/mXk.shape[0]
DS_2 = norm(self.Mask*(SigIntra_est-Sigma)) / norm(Sigma)
diagnostics["Corr-Swap"] = DS_2.data.cpu().item()
##############################
# Loss function
##############################
_, loss_display, mmd_full, mmd_swap = self.loss(X[:noise.shape[0]], Xk[:noise.shape[0]], test=True)
diagnostics["Loss"] = loss_display.data.cpu().item()
diagnostics["MMD-Full"] = mmd_full.data.cpu().item()
diagnostics["MMD-Swap"] = mmd_swap.data.cpu().item()
# Return dictionary of diagnostics
return diagnostics
def loss(self, X, Xk, test=False):
""" Evaluates the loss function
:param X: input data
:param Xk: knockoffs of X
:param test: evaluate the MMD, regardless the value of GAMMA
:return loss: the value of the effective loss function
loss_display: a copy of the loss variable that will be used for display
mmd_full: discrepancy between (X',Xk') and (Xk'',X'')
mmd_swap: discrepancy between (X',Xk') and (X'',Xk'')_swap(s)
"""
# Divide the observations into two disjoint batches
n = int(X.shape[0]/2)
X1,Xk1 = X[:n], Xk[:n]
X2,Xk2 = X[n:(2*n)], Xk[n:(2*n)]
# Joint variables
Z1 = torch.cat((X1,Xk1),1)
Z2 = torch.cat((Xk2,X2),1)
Z3 = torch.cat((X2,Xk2),1).clone()
swap_inds = np.where(np.random.binomial(1,0.5,size=self.p))[0]
Z3[:,swap_inds] = Xk2[:,swap_inds]
Z3[:,swap_inds+self.p] = X2[:,swap_inds]
# Compute the discrepancy between (X,Xk) and (Xk,X)
mmd_full = 0.0
# Compute the discrepancy between (X,Xk) and (X,Xk)_s
mmd_swap = 0.0
if(self.GAMMA>0 or test):
# Evaluate the MMD by following section 4.3 in
# Li et al. "Generative Moment Matching Networks". Link to
# the manuscript -- https://arxiv.org/pdf/1502.02761.pdf
mmd_full = self.matching_loss(Z1, Z2, self.matching_param)
mmd_swap = self.matching_loss(Z1, Z3, self.matching_param)
# Match first two moments
loss_moments = 0.0
if self.LAMBDA>0:
# First moment
D_mean = X.mean(0) - Xk.mean(0)
loss_1m = D_mean.pow(2).sum()
# Second moments
loss_2m = covariance_diff_biased(X, Xk, self.SigmaHat, self.Mask, scale=self.Sigma_norm)
# Combine moments
loss_moments = loss_1m + loss_2m
# Penalize correlations between variables and knockoffs
loss_corr = 0.0
if self.DELTA>0:
# Center X and Xk
mX = X - torch.mean(X,0,keepdim=True)
mXk = Xk - torch.mean(Xk,0,keepdim=True)
# Correlation between X and Xk
eps = 1e-3
scaleX = mX.pow(2).mean(0,keepdim=True)
scaleXk = mXk.pow(2).mean(0,keepdim=True)
mXs = mX / (eps+torch.sqrt(scaleX))
mXks = mXk / (eps+torch.sqrt(scaleXk))
corr_XXk = (mXs*mXks).mean(0)
loss_corr = (corr_XXk-self.target_corr).pow(2).mean()
# Combine the loss functions
loss = self.GAMMA*mmd_full + self.GAMMA*mmd_swap + self.LAMBDA*loss_moments + self.DELTA*loss_corr
loss_display = loss
return loss, loss_display, mmd_full, mmd_swap
def train(self, X_in, resume = False):
""" Fit the machine to the training data
:param X_in: input data
:param resume: proceed the training by loading the last checkpoint
"""
# Divide data into training/test set
X = torch.from_numpy(X_in[self.test_size:]).float()
if(self.test_size>0):
X_test = torch.from_numpy(X_in[:self.test_size]).float()
else:
X_test = torch.zeros(0, self.p)
# used to compute statistics and diagnostics
self.SigmaHat = np.cov(X,rowvar=False)
self.SigmaHat = torch.from_numpy(self.SigmaHat).float()
self.Mask = torch.ones(self.p, self.p) - torch.eye(self.p)
# allocate a matrix for the noise realization
noise = torch.zeros(self.batch_size,self.dim_noise)
noise_test = torch.zeros(X_test.shape[0],self.dim_noise)
use_cuda = torch.cuda.is_available()
if resume == True: # load the last checkpoint
self.load(self.checkpoint_name)
self.net.train()
else: # start learning from scratch
self.net.train()
# Define the optimization method
self.net_optim = optim.SGD(self.net.parameters(), lr = self.lr, momentum=0.9)
# Define the scheduler
self.net_sched = optim.lr_scheduler.MultiStepLR(self.net_optim, gamma=0.1,
milestones=self.lr_milestones)
# bandwidth parameters of the Gaussian kernel
self.matching_param = self.alphas
# move data to GPU if available
if use_cuda:
self.SigmaHat = self.SigmaHat.cuda()
self.Mask = self.Mask.cuda()
self.net = self.net.cuda()
X = X.cuda()
X_test = X_test.cuda()
noise = noise.cuda()
noise_test = noise_test.cuda()
self.target_corr = self.target_corr.cuda()
Xk = 0*X
self.Sigma_norm = self.SigmaHat.pow(2).sum()
self.Sigma_norm_cross = (self.Mask*self.SigmaHat).pow(2).sum()
# Store diagnostics
diagnostics = pd.DataFrame()
losses_test = []
# main training loop
for epoch in range(self.resume_epoch, self.epochs):
# prepare for training phase
self.net.train()
# update the learning rate scheduler
self.net_sched.step()
# divide the data into batches
batches = gen_batches(X.size(0), self.batch_size, self.epoch_length)
losses = []
losses_dist_swap = []
losses_dist_full = []
for batch in batches:
# Extract data for this batch
X_batch = X[batch,:]
self.net_optim.zero_grad()
# Run the network
Xk_batch = self.net(X_batch, self.noise_std*noise.normal_())
# Compute the loss function
loss, loss_display, mmd_full, mmd_swap = self.loss(X_batch, Xk_batch)
# Compute the gradient
loss.backward()
# Take a gradient step
self.net_optim.step()
# Save history
losses.append(loss_display.data.cpu().item())
if self.GAMMA>0:
losses_dist_swap.append(mmd_swap.data.cpu().item())
losses_dist_full.append(mmd_full.data.cpu().item())
# Save the knockoffs
Xk[batch, :] = Xk_batch.data
##############################
# Compute diagnostics
##############################
# Prepare for testing phase
self.net.eval()
# Evaluate the diagnostics on the training data, the following
# function recomputes the loss on the training data
diagnostics_train = self.compute_diagnostics(X, Xk, noise, test=False)
diagnostics_train["Loss"] = np.mean(losses)
if(self.GAMMA>0 and self.GAMMA>0):
diagnostics_train["MMD-Full"] = np.mean(losses_dist_full)
diagnostics_train["MMD-Swap"] = np.mean(losses_dist_swap)
diagnostics_train["Epoch"] = epoch
diagnostics = diagnostics.append(diagnostics_train, ignore_index=True)
# Evaluate the diagnostics on the test data if available
if(self.test_size>0):
Xk_test = self.net(X_test, self.noise_std*noise_test.normal_())
diagnostics_test = self.compute_diagnostics(X_test, Xk_test, noise_test, test=True)
else:
diagnostics_test = {key:np.nan for key in diagnostics_train.keys()}
diagnostics_test["Epoch"] = epoch
diagnostics = diagnostics.append(diagnostics_test, ignore_index=True)
# If the test loss is at a minimum, save the machine to
# the location pointed by best_checkpoint_name
losses_test.append(diagnostics_test["Loss"])
if((self.test_size>0) and (diagnostics_test["Loss"] == np.min(losses_test)) and \
(self.best_checkpoint_name is not None)):
best_machine = True
save_checkpoint({
'epochs': epoch+1,
'pars' : self.pars,
'state_dict': self.net.state_dict(),
'optimizer' : self.net_optim.state_dict(),
'scheduler' : self.net_sched.state_dict(),
}, self.best_checkpoint_name)
else:
best_machine = False
##############################
# Print progress
##############################
if(self.test_size>0):
print("[%4d/%4d], Loss: (%.4f, %.4f)" %
(epoch + 1, self.epochs, diagnostics_train["Loss"], diagnostics_test["Loss"]), end=", ")
print("MMD: (%.4f,%.4f)" %
(diagnostics_train["MMD-Full"]+diagnostics_train["MMD-Swap"],
diagnostics_test["MMD-Full"]+diagnostics_test["MMD-Swap"]), end=", ")
print("Cov: (%.3f,%.3f)" %
(diagnostics_train["Corr-Full"]+diagnostics_train["Corr-Swap"],
diagnostics_test["Corr-Full"]+diagnostics_test["Corr-Swap"]), end=", ")
print("Decorr: (%.3f,%.3f)" %
(diagnostics_train["Corr-Diag"], diagnostics_test["Corr-Diag"]), end="")
if best_machine:
print(" *", end="")
else:
print("[%4d/%4d], Loss: %.4f" %
(epoch + 1, self.epochs, diagnostics_train["Loss"]), end=", ")
print("MMD: %.4f" %
(diagnostics_train["MMD-Full"] + diagnostics_train["MMD-Swap"]), end=", ")
print("Cov: %.3f" %
(diagnostics_train["Corr-Full"] + diagnostics_train["Corr-Swap"]), end=", ")
print("Decorr: %.3f" %
(diagnostics_train["Corr-Diag"]), end="")
print("")
sys.stdout.flush()
# Save diagnostics to logfile
if self.logs_name is not None:
diagnostics.to_csv(self.logs_name, sep=" ", index=False)
# Save the current machine to location checkpoint_name
if self.checkpoint_name is not None:
save_checkpoint({
'epochs': epoch+1,
'pars' : self.pars,
'state_dict': self.net.state_dict(),
'optimizer' : self.net_optim.state_dict(),
'scheduler' : self.net_sched.state_dict(),
}, self.checkpoint_name)
def load(self, checkpoint_name):
""" Load a machine from a stored checkpoint
:param checkpoint_name: checkpoint name of a trained machine
"""
filename = checkpoint_name + "_checkpoint.pth.tar"
flag = 1
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
sys.stdout.flush()
try:
checkpoint = torch.load(filename, map_location='cpu')
except:
print("error loading saved model, trying the previous version")
sys.stdout.flush()
flag = 0
if flag == 0:
try:
checkpoint = torch.load(filename + '_prev.pth.tar', map_location='cpu')
flag = 1
except:
print("error loading prev model, starting from scratch")
sys.stdout.flush()
flag = 0
else:
print("=> no checkpoint found at '{}'".format(filename))
sys.stdout.flush()
flag = 0
if flag == 1:
self.net.load_state_dict(checkpoint['state_dict'])
if torch.cuda.is_available():
self.net = self.net.cuda()
self.net_optim = optim.SGD(self.net.parameters(), lr = self.lr, momentum=0.9)
self.net_optim.load_state_dict(checkpoint['optimizer'])
self.net_sched = optim.lr_scheduler.MultiStepLR(self.net_optim, gamma=0.1,
milestones=self.lr_milestones)
self.resume_epoch = checkpoint['epochs']
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epochs']))
sys.stdout.flush()
else:
self.net.train()
self.net_optim = optim.SGD(self.net.parameters(), lr = self.lr, momentum=0.9)
self.net_sched = optim.lr_scheduler.MultiStepLR(self.net_optim, gamma=0.1,
milestones=self.lr_milestones)
self.resume_epoch = 0
def generate(self, X_in):
""" Generate knockoff copies
:param X_in: data samples
:return Xk: knockoff copy per each sample in X
"""
X = torch.from_numpy(X_in).float()
self.net = self.net.cpu()
self.net.eval()
# Run the network in evaluation mode
Xk = self.net(X, self.noise_std*torch.randn(X.size(0),self.dim_noise))
Xk = Xk.data.cpu().numpy()
return Xk
``` |
{
"source": "JorisRoels/mri-inflammation-prediction",
"score": 2
} |
#### File: mri-inflammation-prediction/test/deep-inflammation-classifier.py
```python
import argparse
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from neuralnets.util.io import print_frm
from neuralnets.util.tools import set_seed
from neuralnets.util.augmentation import *
from pytorch_lightning.callbacks import ModelCheckpoint
from data.datasets import SPARCCDataset
from models.sparcc_cnn import DeepInflammation_CNN
from util.constants import *
from train.sparcc_base import get_checkpoint_location
factor = {INFLAMMATION_MODULE: 64, DEEP_INFLAMMATION_MODULE: 12, SPARCC_MODULE: 1, JOINT: 1}
def _test_module(net, test_data, args):
checkpoint_callback = ModelCheckpoint(save_top_k=5, verbose=True, monitor='val/roc-auc', mode='max')
test_data.mode = DEEP_INFLAMMATION_MODULE
trainer = pl.Trainer(max_epochs=args.epochs, gpus=args.gpus, accelerator=args.accelerator,
default_root_dir=args.log_dir, flush_logs_every_n_steps=args.log_freq,
log_every_n_steps=args.log_freq, callbacks=[checkpoint_callback],
progress_bar_refresh_rate=args.log_refresh_rate, num_sanity_val_steps=0, deterministic=True)
test_loader = DataLoader(test_data, batch_size=factor[DEEP_INFLAMMATION_MODULE]*args.test_batch_size,
num_workers=args.num_workers, pin_memory=True)
trainer.test(net, test_loader)
return trainer
if __name__ == '__main__':
# parse all the arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str,
required=True)
parser.add_argument("--si-joint-model", help="Path to the SI joint detection checkpoint", type=str, required=True)
parser.add_argument("--model-checkpoint-illium", help="Path to the illium U-Net checkpoint", type=str,
required=True)
parser.add_argument("--model-checkpoint-sacrum", help="Path to the sacrum U-Net checkpoint", type=str,
required=True)
parser.add_argument("--repetitions", help="Number of repetitions", type=int, default=1)
parser.add_argument("--folds", help="Number of folds (overrides repetitions parameter if provided)", type=int,
default=None)
parser.add_argument("--filter-domain", help="Select a specific domain to filter out", type=str, default=None)
# network parameters
parser.add_argument("--train_val_test_split", help="Train/validation/test split", type=str, default=None)
parser.add_argument("--checkpoint", help="Path to pretrained inflammation model checkpoints top directory "
"(or path to the checkpoint if train_val_test_split is set)",
type=str, required=True)
parser.add_argument("--backbone", help="Backbone feature extractor of the inflammation model", type=str,
default='ResNet18')
parser.add_argument("--omit_t1_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_t2_input", help="Boolean flag that omits usage of T1 slices", action='store_true',
default=False)
parser.add_argument("--omit_weighting", help="Boolean flag that specifies ROI masking", action='store_true',
default=False)
# optimization parameters
parser.add_argument("--epochs", help="Number of training epochs", type=int, default=400)
parser.add_argument("--lr", help="Learning rate for the optimization", type=float, default=1e-3)
# compute parameters
parser.add_argument("--train_batch_size", help="Batch size during training", type=int, default=1)
parser.add_argument("--test_batch_size", help="Batch size during testing", type=int, default=1)
parser.add_argument("--num_workers", help="Amount of workers", type=int, default=12)
parser.add_argument("--gpus", help="Devices available for computing", type=str, default='0')
parser.add_argument("--accelerator", help="Acceleration engine for computations", type=str, default='dp')
# logging parameters
parser.add_argument("--log_dir", help="Logging directory", type=str, default='logs')
parser.add_argument("--log_freq", help="Frequency to log results", type=int, default=50)
parser.add_argument("--log_refresh_rate", help="Refresh rate for logging", type=int, default=1)
parser.add_argument("--seed", help="Seed for reproducibility", type=int, default=0)
args = parser.parse_args()
if args.train_val_test_split is not None:
args.train_val_test_split = [float(item) for item in args.train_val_test_split.split(',')]
metrics = []
if args.folds is not None:
reps = args.folds
range_split = ((0, 1), (0, 1))
else:
reps = args.repetitions
f = None
split = args.train_val_test_split
range_split = ((0, split[1]), (0, split[1]), (split[1], 1))
for i in range(reps):
rep_str = 'fold' if args.folds is not None else 'repetition'
print_frm('')
print_frm('Start processing %s %d/%d ...' % (rep_str, i+1, reps))
print_frm('')
"""
Fix seed (in case of cross validation), or increment if repetitive training
"""
if args.folds is not None:
set_seed(args.seed)
f = i
else:
args.seed = args.seed + 1
set_seed(args.seed)
"""
Load the data
"""
print_frm('Loading data')
if range_split[1][1] > 0:
val = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[1], folds=args.folds, f=i,
train=False, seed=args.seed, mode=DEEP_INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting,
filter_domain=args.filter_domain)
else:
val = None
if args.folds is None:
test = SPARCCDataset(args.data_dir, args.si_joint_model, args.model_checkpoint_illium,
args.model_checkpoint_sacrum, range_split=range_split[2], seed=args.seed,
mode=DEEP_INFLAMMATION_MODULE, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, apply_weighting=not args.omit_weighting,
filter_domain=args.filter_domain)
"""
Build the network
"""
print_frm('Building the network')
if val is not None:
weights = val.score_weights[0]
else:
weights = test.score_weights[0]
net = DeepInflammation_CNN(backbone=args.backbone, lr=args.lr, use_t1_input=not args.omit_t1_input,
use_t2_input=not args.omit_t2_input, weights=weights)
## load networks checkpoint ##
ckpt_i_file = get_checkpoint_location(args.checkpoint, f) if f is not None else args.checkpoint
net.load_state_dict(torch.load(ckpt_i_file)['state_dict'])
print_frm('Balancing weights for loss function: %s' % (weights))
"""
Testing the inflammation network
"""
print_frm('Testing network')
trainer = _test_module(net, val if args.folds is not None else test, args)
metrics.append([float(trainer.logged_metrics['test/' + m].cpu()) for m in METRICS])
"""
Report final performance results
"""
metrics = np.asarray(metrics)
metrics_avg = np.mean(metrics, axis=0)
print_frm('Final performance report:')
print_frm('=========================')
for i, m in enumerate(METRICS):
print_frm(' %s: %f' % (m, metrics_avg[i]))
```
#### File: mri-inflammation-prediction/test/efficientdet.py
```python
import argparse
import torch
import torch.nn.parallel
from contextlib import suppress
import cv2
import matplotlib.pyplot as plt
import os
from effdet import create_model
from timm.utils import setup_default_logging
from timm.models.layers import set_layer_config
from neuralnets.util.tools import normalize
from util.constants import *
from util.tools import load
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument("--data-dir", help="Path to the directory that contains a preprocessed dataset", type=str, required=True)
parser.add_argument('--model', default='tf_efficientdet_d0_mri', type=str, metavar='MODEL',
help='Name of model to train (default: "tf_efficientdet_d0_mri"')
add_bool_arg(parser, 'redundant-bias', default=None,
help='override model config for redundant bias layers')
add_bool_arg(parser, 'soft-nms', default=None, help='override model config for soft-nms')
parser.add_argument('--num-classes', type=int, default=1, metavar='N',
help='Override num_classes in model config if set. For fine-tuning from pretrained.')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=4, type=int,
metavar='N', help='mini-batch size (default: 4)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='bilinear', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default=None, type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--model-checkpoint', required=True, default='', type=str, metavar='PATH',
help='path to checkpoint to test')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=True,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--amp', action='store_true', default=True,
help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--results', default='./results.json', type=str, metavar='FILENAME',
help='JSON filename for evaluation results')
parser.add_argument('--out_dir', default='results', type=str, help='destination directory of the resulting detections')
classes = ['joint', 'joint-left', 'joint-right']
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.75
colors_pred = [(0, 0, 0), (0, 0, 255), (0, 0, 255)]
colors_target = [(0, 0, 0), (0, 255, 0), (0, 255, 0)]
thickness = 2
def draw_bbox(img, bbox_pred, bbox_target=None):
def _draw_box(img, box, color, text=None, font=None, fontScale=None, thickness=None):
x, y, x_, y_ = box[:4].astype(int)
img = cv2.rectangle(img, (x, y), (x_, y_), color, thickness)
if text is not None:
textsize = cv2.getTextSize(text, font, fontScale, thickness)[0]
textX = int((x + x_) / 2 - textsize[0] / 2)
textY = int(y - textsize[1] / 2)
img = cv2.putText(img, text, (textX, textY), font, fontScale, color, thickness, cv2.LINE_AA)
return img
for i in range(bbox_pred.shape[0]):
b = int(bbox_pred[i, -1])
img = _draw_box(img, bbox_pred[i], colors_pred[b], thickness=thickness)
if bbox_target is not None:
# img = _draw_box(img, bbox_target[i], colors_target[b], text=classes[b], font=font, fontScale=fontScale,
# thickness=thickness)
img = _draw_box(img, bbox_target[i], colors_target[b], thickness=thickness)
# plt.imshow(img)
# plt.show()
return img
def validate(args):
setup_default_logging()
args.checkpoint = args.model_checkpoint
if args.amp:
if has_apex:
args.apex_amp = True
elif has_native_amp:
args.native_amp = True
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
args.pretrained = args.pretrained or not args.checkpoint # might as well try to validate something
args.prefetcher = not args.no_prefetcher
# create model
with set_layer_config(scriptable=args.torchscript):
bench = create_model(
args.model,
bench_task='predict',
num_classes=args.num_classes,
pretrained=args.pretrained,
redundant_bias=args.redundant_bias,
soft_nms=args.soft_nms,
checkpoint_path=args.checkpoint,
checkpoint_ema=args.use_ema,
)
param_count = sum([m.numel() for m in bench.parameters()])
print('Model %s created, param count: %d' % (args.model, param_count))
bench = bench.cuda()
amp_autocast = suppress
if args.apex_amp:
bench = amp.initialize(bench, opt_level='O1')
print('Using NVIDIA APEX AMP. Validating in mixed precision.')
elif args.native_amp:
amp_autocast = torch.cuda.amp.autocast
print('Using native Torch AMP. Validating in mixed precision.')
else:
print('AMP not enabled. Validating in float32.')
if args.num_gpu > 1:
bench = torch.nn.DataParallel(bench, device_ids=list(range(args.num_gpu)))
t1_data = load(os.path.join(args.data_dir, T1_PP_FILE))
bench.eval()
with torch.no_grad():
i = np.random.randint(len(t1_data))
s = np.random.randint(len(t1_data[i]))
x = t1_data[i][s]
clahe = cv2.createCLAHE(clipLimit=T1_CLIPLIMIT)
x = normalize(x.astype(float), type='minmax')
x = clahe.apply((x * (2 ** 16 - 1)).astype('uint16')) / (2 ** 16 - 1)
x = (x - X_MU) / X_STD
input = torch.from_numpy(np.repeat(x[np.newaxis, np.newaxis, ...], 3, axis=1)).cuda()
with amp_autocast():
output = bench(input.float())[0].cpu().numpy()
pred = output[:2, :]
img = np.zeros((3, x.shape[0], x.shape[1]))
for c in range(3):
img[c] = x
img_ = img.transpose(1, 2, 0)
m = img_.min()
M = img_.max()
img_ = ((img_ - m) / (M - m) * 255).astype('uint8').copy()
img_ = draw_bbox(img_, pred)
plt.imshow(img_)
plt.axis('off')
plt.show()
# cv2.imwrite(os.path.join(args.out_dir, '%d.jpg' % i), img_)
def main():
args = parser.parse_args()
validate(args)
if __name__ == '__main__':
main()
``` |
{
"source": "jorisroovers/gusto",
"score": 2
} |
#### File: gusto/gusto/controllers.py
```python
from .models import GustoModel
class GustoController:
def __init__(self, request, model: GustoModel = None) -> None:
self.request = request
self.db = self.request.app.state.db_session
self.model = model
@staticmethod
def as_dict(object):
if not object:
return None
if isinstance(object, GustoModel):
return object.as_dict()
return [o.as_dict() for o in object ]
def list(self):
return self.as_dict(self.db.query(self.model).all())
def filter(self, *args):
return self.as_dict(self.db.query(self.model).filter(*args))
def first(self, *args):
return self.as_dict(self.db.query(self.model).filter(*args).first())
def get(self, *args):
return self.as_dict(self.db.query(self.model).get(*args))
def create(self, obj):
self.db.add(obj)
self.db.commit()
def delete(self, filter):
self.db.query(self.model).filter(filter).delete()
self.db.commit()
def update(self, filter, update):
self.db.query(self.model).filter(filter).update(update)
self.db.commit()
```
#### File: gusto/gusto/mealplan.py
```python
import copy
import csv
import logging
import random
import uuid
from os import name
import arrow
from rich.console import Console
from collections import Counter
LOG = logging.getLogger("gusto.mealplan")
console = Console()
from . import models
from .controllers import GustoController
from .constraints import Constraint, TagConstraint, CONSTRAINTS
class Importer:
def __init__(self, db_session) -> None:
self.db = db_session
def import_from_csv(self, filename) -> None:
LOG.debug("Reading from %s", filename)
existing_recipes = dict({(recipe.name, recipe) for recipe in self.db.query(models.Recipe).all()})
existing_tags = dict({(tag.name, tag) for tag in self.db.query(models.Tag).all()})
counters = Counter({"records":0, "tags":0, "recipes":0, "meals":0})
with open(filename) as csv_file:
records = csv.DictReader(csv_file)
for record in records:
counters['records']+=1
# Import tags
# Note: because we always import tags that we find, there might be cases where we import a tag
# but don't end up importing the recipe (because it's a duplicate or if some error occurs), that's
# probably good enough for now
parsed_tags = [l.strip() for l in record['Tags'].split(",") if l.strip() != ""]
canonical_tags = []
for tag in parsed_tags:
canonical_tag = existing_tags.get(tag.lower(), False)
if not canonical_tag:
canonical_tag = models.Tag(name = tag.lower(), display_name=tag)
self.db.add(canonical_tag)
existing_tags[canonical_tag.name] = canonical_tag
counters['tags']+=1
canonical_tags.append(canonical_tag)
self.db.commit()
# Import recipes
recipe = existing_recipes.get(record['Name'], False)
if not recipe:
recipe = models.Recipe(name=record['Name'], description="", comments=record['Comments'],
url=record['URL'], tags=",".join([t.name for t in canonical_tags]))
self.db.add(recipe)
existing_recipes[recipe.name] = recipe
self.db.commit()
counters['recipes']+=1
# Import meals
if record['Date'] != '':
mealplan_date = arrow.get(record['Date'], "MMM D, YYYY").date()
existing_meal = self.db.query(models.Meal).filter(models.Meal.date==mealplan_date).first()
# If there's already a meal in the database for the date,
# don't overwrite (this would error out anyways because of unique constraint)
if not existing_meal:
meal = models.Meal(recipe_id=recipe.id, date=mealplan_date)
self.db.add(meal)
self.db.commit()
counters['meals']+=1
self.db.commit()
LOG.info(f"Read {counters['records']} records from {filename}.")
LOG.info(f"Inserted {counters['recipes']} recipes, {counters['meals']} meals, {counters['tags']} tags")
class Meal:
def __init__(self, recipe: dict, date, constraint) -> None:
self.recipe = recipe
self.date = date
self.constraint = constraint
def for_json(self) -> dict:
return {"recipe": self.recipe, "date": self.date.for_json(), "constraint": self.constraint.for_json() }
def __str__(self) -> str:
return self.recipe
class MealPlan:
def __init__(self) -> None:
self.meals = []
def for_json(self) -> dict:
return {"meals": [meal.for_json() for meal in self.meals] }
def export_to_csv(self, filename: str):
LOG.info(f"Exporting to [yellow]{filename}[/]")
with open(filename, 'w') as export_file:
fieldnames = ["Done", "Weekd", "Date", "Name", "Tags", "Comments", "URL", "Score"]
exporter = csv.DictWriter(export_file, fieldnames=fieldnames, extrasaction="ignore")
exporter.writeheader()
for meal in self.meals:
meal.recipe.update({
'Date': meal.date.format('MMM D, YYYY'),
'Done':"No",
'Weekd': ""
})
exporter.writerow(meal.recipe)
WEEK_CONSTRAINTS = [
CONSTRAINTS['veggie-day'], CONSTRAINTS['fish-day'], CONSTRAINTS['asian-day'], CONSTRAINTS['steak-day'], CONSTRAINTS['pasta-day'], CONSTRAINTS['free-day'],
]
class MealPlanGenerator:
def __init__(self, recipes) -> None:
self.recipe_pool = { r['Name']: r for r in recipes.recipes}
def generate_mealplan(self, start_date, num_weeks: int) -> MealPlan:
meals = []
day_offset = 0
for _ in range(num_weeks):
# Add some randomness to when we eat what, but ensure Friday is "Zondigen"
day_constraints = copy.copy(WEEK_CONSTRAINTS)
random.shuffle(day_constraints)
day_constraints.insert(4, Constraint("Vettige Vrijdag", lambda r: "Zondigen" in r['parsed-tags']))
for constraint in day_constraints:
recipe = random.choice(self.generate_recipe_set(constraint))
meal = Meal(recipe, start_date.shift(days=day_offset), constraint)
self.recipe_pool.pop(meal.recipe['Name'])
meals.append(meal)
day_offset += 1
mealplan = MealPlan()
mealplan.meals = meals
return mealplan
def generate_recipe_set(self, constraint: Constraint) -> list:
return [ r for r in self.recipe_pool.values() if constraint.match(r) ]
def regenerate_meal(self, meal) -> Meal:
recipe = random.choice([ r for r in self.recipe_pool.values() if meal.constraint.match(r) ])
# Add original meal recipe back to to the pool, so we can use it again
self.recipe_pool[recipe['Name']] = recipe
meal.recipe = recipe
return meal
``` |
{
"source": "jorisroovers/linux-playground",
"score": 2
} |
#### File: linux-playground/dbus/dbus_server.py
```python
from gi.repository import GLib
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
class MyDBusService(dbus.service.Object):
def __init__(self):
# IMPORTANT: you can only use dbus.Systembus() here. Sessionbus is only supported when you have an actual
# X11/desktop environment with sessions.
bus_name = dbus.service.BusName('org.me.test', bus=dbus.SystemBus())
dbus.service.Object.__init__(self, bus_name, '/org/me/test')
# Interface and Method
@dbus.service.method('org.me.test1')
def bus_message_1(self):
return "System Bus 1 method call return"
# Method with arguments
@dbus.service.method('org.me.test2')
def bus_message_2(self, string1, string2):
return string1 + " " + string2
DBusGMainLoop(set_as_default=True)
dbus_service = MyDBusService()
try:
GLib.MainLoop().run()
except KeyboardInterrupt:
print("\nThe MainLoop will close...")
GLib.MainLoop().quit()
``` |
{
"source": "jorisroovers/python-playground",
"score": 2
} |
#### File: python-playground/C-extensions/helloworld.py
```python
import greeter
def main():
greeter.greet('World2')
if __name__ == "__main__":
main()
``` |
{
"source": "jorisroovers/roofcam",
"score": 3
} |
#### File: roofcam/cli/tools.py
```python
import click
import json
import os
@click.group()
def cli():
pass
@cli.command("compactdb")
@click.option('-d', '--dir', required=True, help="Directory containing target.json and images",
type=click.Path(exists=True, resolve_path=True, file_okay=False, readable=True))
def compact_db(dir):
""" Compacts the target.json db file by removing non-existing file entries from it """
target_file = os.path.join(dir, "target.json")
if not os.path.exists(target_file):
click.echo("FATAL: No file '{0}'".format(target_file))
exit(1)
print "Compacting db", dir, "..."
with open(target_file) as json_data:
db = json.load(json_data)
for filename in db.keys():
full_path = os.path.join(dir, filename)
if not os.path.exists(full_path):
print "Removing {}".format(filename)
del db[filename]
with open(target_file, 'w') as f:
json.dump(db, f)
print "DONE"
if __name__ == "__main__":
cli()
``` |
{
"source": "jorisroovers/systems-playground",
"score": 2
} |
#### File: grpc/backend/book_store_pb2_grpc.py
```python
import grpc
import book_store_pb2 as book__store__pb2
class BookStoreStub(object):
"""(Method definitions not shown)
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetBook = channel.unary_unary(
'/BookStore/GetBook',
request_serializer=book__store__pb2.BookReference.SerializeToString,
response_deserializer=book__store__pb2.Book.FromString,
)
class BookStoreServicer(object):
"""(Method definitions not shown)
"""
def GetBook(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BookStoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetBook': grpc.unary_unary_rpc_method_handler(
servicer.GetBook,
request_deserializer=book__store__pb2.BookReference.FromString,
response_serializer=book__store__pb2.Book.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'BookStore', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
``` |
{
"source": "jorisslob/RoguelikeFantasyWorldSimulator",
"score": 3
} |
#### File: jorisslob/RoguelikeFantasyWorldSimulator/gameobject.py
```python
class GameObject:
# This is a generic object: the player, a monster, an item, the stairs...
# It's always represented by a character on screen
def __init__(self, x, y, char, name, color, blocks=False):
self.x = x
self.y = y
self.char = char
self.name = name
self.color = color
self.blocks = blocks
def move(self, dx, dy, the_map, objects):
# move by the given amount
if not is_blocked(self.x + dx, self.y + dy, the_map, objects):
self.x += dx
self.y += dy
def draw(self, visible_tiles, console):
# only show if it's visible to the player
if (self.x, self.y) in visible_tiles:
# draw the character that represents this object at its position
console.draw_char(self.x, self.y, self.char, self.color, bg=None)
def clear(self, console):
# erase the character that represents this object
console.draw_char(self.x, self.y, ' ', self.color, bg=None)
def is_blocked(x, y, the_map, objects):
# first test the map tile
if the_map[x][y].blocked:
return True
# now check for any blocking objects
for obj in objects:
if obj.blocks and obj.x == x and obj.y == y:
return True
return False
```
#### File: jorisslob/RoguelikeFantasyWorldSimulator/main.py
```python
from controls import handle_keys
from gameobject import GameObject
from themap import make_map
import colors
import config
import tdl
def is_visible_tile(x, y, the_map):
if x >= config.MAP_WIDTH or x < 0:
return False
elif y >= config.MAP_HEIGHT or y < 0:
return False
elif the_map[x][y].blocked == True:
return False
elif the_map[x][y].block_sight == True:
return False
else:
return True
def render_all(fov_recompute, visible_tiles):
def visibility(x, y):
return is_visible_tile(x, y, my_map)
if fov_recompute:
fov_recompute = False
visible_tiles = tdl.map.quickFOV(player.x, player.y,
visibility,
fov=config.FOV_ALGO,
radius=config.TORCH_RADIUS,
lightWalls=config.FOV_LIGHT_WALLS)
# go through all tiles, and set their background color according to the FOV
for y in range(config.MAP_HEIGHT):
for x in range(config.MAP_WIDTH):
visible = (x, y) in visible_tiles
wall = my_map[x][y].block_sight
if not visible:
# it's not visible right now, the player can only see it if it's explored
if my_map[x][y].explored:
if wall:
con.draw_char(x, y, None, fg=None,
bg=colors.dark_blue)
else:
con.draw_char(x, y, None, fg=None,
bg=colors.desaturated_blue)
else:
# it's visible
if wall:
con.draw_char(x, y, None, fg=None,
bg=colors.desaturated_amber)
else:
con.draw_char(x, y, None, fg=None,
bg=colors.brass)
# since it's visible, explore it
my_map[x][y].explored = True
# draw all objects in the list
for obj in objects:
obj.draw(visible_tiles, con)
# blit the contents of "con" to the root console and present it
root.blit(con, 0, 0, config.SCREEN_WIDTH, config.SCREEN_HEIGHT, 0, 0)
##############################
# Initialization & Main Loop #
##############################
tdl.set_font('arial10x10.png', greyscale=True, altLayout=True)
root = tdl.init(config.SCREEN_WIDTH, config.SCREEN_HEIGHT,
title="Roguelike Fantasy World Simulator", fullscreen=False)
tdl.setFPS(config.LIMIT_FPS)
con = tdl.init(config.SCREEN_WIDTH, config.SCREEN_HEIGHT)
# create object representing the player
player = GameObject(config.SCREEN_WIDTH//2,
config.SCREEN_HEIGHT//2, '@', 'player', colors.white, blocks=True)
# the list of objects, will be larger later
objects = [player]
# generate map (at this point it's not drawn to the screen), and extend the list of objects
(my_map, new_objects) = make_map(player)
objects.extend(new_objects)
visible_tiles = None
fov_recompute = True
while not tdl.event.is_window_closed():
# draw all objects in the list
render_all(fov_recompute, visible_tiles)
tdl.flush()
# erase all objects at their old locations, before they move
for obj in objects:
obj.clear(con)
# handle keys and exit game if needed
(exit_game, fov_recompute) = handle_keys(
fov_recompute, player, my_map, objects)
if exit_game:
break
``` |
{
"source": "Joristiebosch/deshima-sensitivity",
"score": 3
} |
#### File: deshima-sensitivity/deshima_sensitivity/instruments.py
```python
from typing import List, Union, Tuple
# dependent packages
import numpy as np
from .physics import c, e, h, rad_trans
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# constants
Delta_Al = 188.0 * 10 ** -6 * e # gap energy of Al
eta_pb = 0.4 # Pair breaking efficiency
eta_Al_ohmic_850 = 0.9975 # Ohmic loss of an Al surface at 850 GHz.
# Shitov+, ISSTT2008. https://www.nrao.edu/meetings/isstt/papers/2008/2008263266.pdf
# main functions
def D2HPBW(F: ArrayLike) -> ArrayLike:
"""Get half-power beam width of DESHIMA 2.0 at given frequency (frequencies).
Parameters
----------
F
Frequency. Units: Hz.
Returns
-------
hpbw
Half-power beam width. Units: radian.
"""
return 29.0 * 240.0 / (F / 1e9) * np.pi / 180.0 / 60.0 / 60.0
def eta_mb_ruze(F: ArrayLike, LFlimit: float, sigma: float) -> ArrayLike:
"""Get main-beam efficiency by Ruze's equation.
Parameters
----------
F
Frequency. Units: Hz.
LFlimit
Main-beam efficiency at 0 Hz.
sigma
Surface error. Units: m.
Returns
-------
eta_mb
Main-beam efficiency. Units: None.
"""
return LFlimit * np.exp(-((4.0 * np.pi * sigma * F / c) ** 2.0))
def photon_NEP_kid(F: ArrayLike, Pkid: ArrayLike, W_F: ArrayLike) -> ArrayLike:
"""NEP of the KID, with respect to the absorbed power.
Parameters
-----------
F
Frequency of the signal responsible for loading. Units: Hz.
Pkid
Power absorbed by the KID. Units: W.
W_F
Detection bandwidth, with respect to the power that sets the loading. Units: Hz.
Returns
-------
NEP_kid
Noise-equivalent power of the KID.
Notes
-----
Pkid/(W_F * h * F) gives the occupation number.
"""
# photon_term = 2 * Pkid * (h*F + Pkid/W_F)
poisson_term = 2 * Pkid * h * F
bunching_term = 2 * Pkid * Pkid / W_F
r_term = 4 * Delta_Al * Pkid / eta_pb
return np.sqrt(poisson_term + bunching_term + r_term)
def window_trans(
F: ArrayLike,
psd_in: ArrayLike,
psd_cabin: ArrayLike,
psd_co: ArrayLike,
thickness: ArrayLike = 8.0e-3,
tandelta: float = 4.805e-4,
tan2delta: float = 1.0e-8,
neffHDPE: float = 1.52,
window_AR: bool = True,
) -> Tuple[ArrayLike, ArrayLike]:
"""Calculates the window transmission.
Parameters
----------
F
Frequency. Units: Hz.
psd_in
PSD of the incoming signal. Units : W / Hz.
psd_cabin
Johnson-Nyquist PSD of telescope cabin temperature. Units : W / Hz.
psd_co
Johnson-Nyquist PSD of cold-optics temperature. Units : W / Hz.
thickness
Thickness of the HDPE window. Units: m.
tandelta
Values from Stephen. "# 2.893e-8 %% tan delta, measured Biorat.
I use 1e-8 as this fits the tail of the data better".
tan2delta
Values from Stephen. "# 2.893e-8 %% tan delta, measured Biorat.
I use 1e-8 as this fits the tail of the data better".
neffHDPE
Refractive index of HDPE. Set to 1 to remove reflections. Units : None.
window_AR
Whether the window is supposed to be coated by Ar (True) or not (False).
Returns
-------
psd_after_2nd_refl
PSD looking into the window from the cold optics.
eta_window
Transmission of the window. Units: None.
"""
# Parameters to calcualte the window (HDPE), data from Stephen
# reflection. ((1-neffHDPE)/(1+neffHDPE))^2. Set to 0 for Ar coated.
if window_AR:
HDPErefl = 0.0
else:
HDPErefl = ((1 - neffHDPE) / (1 + neffHDPE)) ** 2
eta_HDPE = np.exp(
-thickness
* 2
* np.pi
* neffHDPE
* (tandelta * F / c + tan2delta * (F / c) ** 2)
)
# most of the reflected power sees the cold.
psd_after_1st_refl = rad_trans(psd_in, psd_co, 1.0 - HDPErefl)
psd_before_2nd_refl = rad_trans(psd_after_1st_refl, psd_cabin, eta_HDPE)
# the reflected power sees the cold.
psd_after_2nd_refl = rad_trans(psd_before_2nd_refl, psd_co, 1.0 - HDPErefl)
eta_window = (1.0 - HDPErefl) ** 2 * eta_HDPE
return psd_after_2nd_refl, eta_window
```
#### File: deshima-sensitivity/deshima_sensitivity/plotting.py
```python
__all__ = ["MDLF_simple", "MS_simple", "PlotD2HPBW"]
# standard library
from typing import List, Union
# dependent packages
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from jupyter_io import savetable_in_notebook
from jupyter_io.output import HTML
from .instruments import D2HPBW, eta_mb_ruze
from .simulator import spectrometer_sensitivity
# type aliases
ArrayLike = Union[np.ndarray, List[float], List[int], float, int]
# main functions
def MDLF_simple(
F: ArrayLike,
pwv: float = 0.5,
EL: float = 60.0,
snr: float = 5.0,
obs_hours: float = 8.0,
) -> HTML:
"""Plot minimum detectable line flux (MDLF) of DESHIMA 2.0 on ASTE.
Parameters
----------
F
Frequency. Units: GHz.
pwv
Precipitable water vapor. Units: mm.
EL
Elevation angle. Units: degrees.
snr
Target S/N of the detection.
obs_hours
Total hours of observation including ON-OFF and calibration overhead.
Returns
-------
html
HTML object for download link (to be used in Jupyter notebook).
"""
# Main beam efficiency of ASTE (0.9 is from EM, ruze is from ASTE)
eta_mb = eta_mb_ruze(F=F, LFlimit=0.805, sigma=37e-6) * 0.9
D2goal_input = {
"F": F,
"pwv": pwv,
"EL": EL,
"snr": snr,
"obs_hours": obs_hours,
"eta_mb": eta_mb,
"theta_maj": D2HPBW(F), # Half power beam width (major axis)
"theta_min": D2HPBW(F), # Half power beam width (minor axis)
"on_source_fraction": 0.4 * 0.9, # ON-OFF 40%, calibration overhead of 10%
}
D2goal = spectrometer_sensitivity(**D2goal_input)
D2baseline_input = {
"F": F,
"pwv": pwv,
"EL": EL,
"snr": snr,
"obs_hours": obs_hours,
"eta_mb": eta_mb,
"theta_maj": D2HPBW(F), # Half power beam width (major axis)
"theta_min": D2HPBW(F), # Half power beam width (minor axis)
"on_source_fraction": 0.3 * 0.8, # Goal 0.4*0.9
"eta_circuit": 0.32 * 0.5, # eta_inst Goal 16%, Baseline 8%
"eta_IBF": 0.4, # Goal 0.6
"KID_excess_noise_factor": 1.2, # Goal 1.1
}
D2baseline = spectrometer_sensitivity(**D2baseline_input)
# Plotting
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.plot(
D2baseline["F"] / 1e9,
D2baseline["MDLF"],
"--",
linewidth=1,
color="b",
alpha=1,
label="Baseline",
)
ax.plot(
D2goal["F"] / 1e9, D2goal["MDLF"], linewidth=1, color="b", alpha=1, label="Goal"
)
ax.fill_between(
D2baseline["F"] / 1e9, D2baseline["MDLF"], D2goal["MDLF"], color="b", alpha=0.2
)
ax.set_xlabel("Frequency (GHz)")
ax.set_ylabel(r"Minimum Detectable Line Flux ($\mathrm{W\, m^{-2}}$)")
ax.set_yscale("log")
ax.set_xlim(200, 460)
ax.set_ylim([10 ** -20, 10 ** -17])
ax.tick_params(direction="in", which="both")
ax.grid(True)
ax.set_title(
f"R = {int(D2goal['R'][0])}, "
f"snr = {int(D2goal['snr'][0])}, "
f"t_obs = {D2goal['obs_hours'][0]} h (incl. overhead), "
f"PWV = {D2goal['PWV'][0]} mm, "
f"EL = {int(D2goal['EL'][0])} deg",
fontsize=12,
)
ax.legend()
fig.tight_layout()
# Create download link
df_download = D2goal[["F", "MDLF"]]
df_download = df_download.rename(columns={"MDLF": "MDLF (goal)"})
df_download = df_download.join(D2baseline[["MDLF"]])
df_download = df_download.rename(columns={"MDLF": "MDLF (baseline)"})
return savetable_in_notebook(df_download, "MDLF.csv")
def MS_simple(F: ArrayLike, pwv: float = 0.5, EL: float = 60.0) -> HTML:
"""Plot mapping speed of DESHIMA 2.0 on ASTE.
Parameters
----------
F
Frequency. Units: GHz.
pwv
Precipitable water vapor. Units: mm.
EL
Elevation angle. Units: degrees.
Returns
-------
html
HTML object for download link (to be used in Jupyter notebook).
"""
# Main beam efficiency of ASTE (0.9 is from EM, ruze is from ASTE)
eta_mb = eta_mb_ruze(F=F, LFlimit=0.805, sigma=37e-6) * 0.9
D2goal_input = {
"F": F,
"pwv": pwv,
"EL": EL,
"eta_mb": eta_mb,
"on_off": False,
"theta_maj": D2HPBW(F), # Half power beam width (major axis)
"theta_min": D2HPBW(F), # Half power beam width (minor axis)
}
D2goal = spectrometer_sensitivity(**D2goal_input)
D2baseline_input = {
"F": F,
"pwv": pwv,
"EL": EL,
"eta_mb": eta_mb,
"on_off": False,
"theta_maj": D2HPBW(F), # Half power beam width (major axis)
"theta_min": D2HPBW(F), # Half power beam width (minor axis)
"eta_circuit": 0.32 * 0.5, # eta_inst Goal 16%, Baseline 8%
"eta_IBF": 0.4, # Goal 0.6
"KID_excess_noise_factor": 1.2, # Goal 1.1
}
D2baseline = spectrometer_sensitivity(**D2baseline_input)
# Plotting
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.plot(
D2baseline["F"] / 1e9,
D2baseline["MS"],
"--",
linewidth=1,
color="b",
alpha=1,
label="Baseline",
)
ax.plot(
D2goal["F"] / 1e9, D2goal["MS"], linewidth=1, color="b", alpha=1, label="Goal"
)
ax.fill_between(
D2baseline["F"] / 1e9, D2baseline["MS"], D2goal["MS"], color="b", alpha=0.2
)
ax.set_xlabel("Frequency (GHz)")
ax.set_ylabel(r"Mapping Speed ($\mathrm{arcmin^2\, mJy^{-2}\, h^{-1}}$)")
ax.set_yscale("log")
ax.set_xlim(200, 460)
ax.set_ylim([10 ** -5, 10 ** -2])
ax.tick_params(direction="in", which="both")
ax.grid(True)
ax.set_title(
f"R = {int(D2goal['R'][0])}, "
f"PWV = {D2goal['PWV'][0]} mm, "
f"EL = {int(D2goal['EL'][0])} deg",
fontsize=12,
)
ax.legend()
fig.tight_layout()
# Create download link
df_download = D2goal[["F", "MS"]]
df_download = df_download.rename(columns={"MS": "MS (goal)"})
df_download = df_download.join(D2baseline[["MS"]])
df_download = df_download.rename(columns={"MS": "MS (baseline)"})
return savetable_in_notebook(df_download, "MS.csv")
def PlotD2HPBW() -> HTML:
"""Plot half power beam width of DESHIMA 2.0 on ASTE.
Returns
-------
html
HTML object for download link (to be used in Jupyter notebook).
"""
F = np.logspace(np.log10(220), np.log10(440), 349) * 1e9
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
ax.plot(
F / 1e9,
D2HPBW(F) * 180 * 60 * 60 / np.pi,
linewidth=1,
color="b",
alpha=1,
label="HPBW",
)
ax.set_xlabel("Frequency (GHz)")
ax.set_ylabel("HPBW (arcsec)")
ax.set_yscale("linear")
ax.set_xlim(200, 460)
ax.tick_params(direction="in", which="both")
ax.grid(True)
ax.legend()
fig.tight_layout()
# Create download link
df_download = pd.DataFrame(data=F, columns=["F"])
df_download["HPBW"] = D2HPBW(F) * 180 * 60 * 60 / np.pi
return savetable_in_notebook(df_download, "HPBW.csv")
``` |
{
"source": "joristork/milovision",
"score": 2
} |
#### File: milovision/admin_modules/argparse.py
```python
__author__ = "<NAME>"
from optparse import OptionParser
from pydc1394.cmdline import add_common_options
def run():
""" parses command line args; adds to options defined in pydc/cmdline.py """
usage = "usage: %prog [options] file"
parser = OptionParser(usage)
add_common_options(parser)
parser.add_option("-v", "--verbosity", dest="verbosity",
help="set stdout verbosity (0: critical, 1: error, 2: warning, 3: info, 4: debug)",
type="int")
parser.add_option("-n", "--modules", dest="nr_modules", default=1,
help="set number of pipeline stages to run (1: edge detection; 2: ellipse fitting; 3: pose-1; 4: identify markers; 5: pose-2; 6: register data), default is all",
type="int")
parser.add_option("-s", "--simulate", dest="simulate",
help="set simulation mode (-2: linear generated markers; -1: random generated markers; 0<:preset marker configurations by index nr)",
type="int")
parser.add_option("-w", "--windows", dest="windows",
help="set image display (0: off; 1: on [default])",
type="int")
parser.add_option("-d", "--disk", dest="disk",
help="load marker poses from disk (0: off [default]; 1: on)",
type="int")
parser.add_option("-t", "--simtime", dest="simtime",
help="number of seconds to run simulation (default: 60)",
type="int")
(options, args) = parser.parse_args()
if not options.verbosity:
options.verbosity = 2
if not options.simulate:
options.simulate = 0
return options, args
```
#### File: milovision/admin_modules/loginit.py
```python
__author__ = "<NAME>"
import logging
def run(verbosity):
logging_levels = {0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG}
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(name)-14s %(message)s',
level=4,
filename='log',
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging_levels[int(verbosity)])
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-7s %(name)-14s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging.getLogger('main')
```
#### File: joristork/milovision/main.py
```python
import sys
import os
import logging
import cv2
import signal
from pydc1394 import DC1394Library, camera
from pydc1394.cmdline import add_common_options, handle_common_options
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing
# milovision libraries
from pipeline import Pipeline
from admin_modules import loginit
from admin_modules import argparse
from output import Printer
pipeline = None
def signal_handler(signal, frame):
""" enables clean shutdown with ctrl-c """
process_id = multiprocessing.current_process().name
if process_id == 'child':
return
logger = logging.getlogger('signal_handler')
logger.info('ctrl-c received.')
logger.info('telling pipeline to shutdown')
global pipeline
pipeline.shutdown()
def main():
"""
Parses arguments; initialises logger; initialises camera driver if
necessary; loads single image from disk if necessary; and runs desired parts
of pipeline, or loads output from previous execution for printout.
"""
options, args = argparse.run()
loginit.run(options.verbosity)
logger = logging.getLogger('main')
logger.info(' '.join(sys.argv[1:]))
if options.simulate == 0:
options.simulate = None
l = DC1394Library()
elif options.simulate > 0:
options.simulate -= 1
elif options.simtime is None:
options.simtime = 36000
global pipeline
pipeline = Pipeline(options)
if options.disk:
logger.info('using poses from disk')
pipe = Pipeline()
pipe.options = options
printer = Printer(pipe=pipe)
printer.final()
logger.info('done. exiting')
sys.exit(0)
if args:
try:
image = cv2.imread('images/'+args[0], cv2.CV_LOAD_IMAGE_GRAYSCALE)
pipeline.set_image(image)
logger.info('opening image file %s from disk' % args[0])
except IOError:
logger.error('image file not found: %s' % args[0])
exit(1)
elif options.simulate is not None:
logger.info('running in simulation mode')
else:
try:
fwcam = handle_common_options(options, l)
pipeline.set_fwcam(fwcam)
logger.info('init. pydc1394 camera object')
logger.info('camera: %s' % fwcam.model)
logger.info('mode: %s' % fwcam.mode)
logger.info('framerate: %d' % fwcam.framerate.val)
except:
logger.error('unable to open camera capture')
exit(1)
pipeline.run()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
```
#### File: milovision/output/pipeline_output.py
```python
import logging
import time
import numpy as np
class Pipeline_Output(object):
"""
Holds all relevant pose estimation and performance data emerging from the
pipeline.
"""
def __init__(self, sim = False):
"""
Sets simulator, camera, marker and timestamp attributes. Records time at
which each image is received (secs since Epoch).
"""
self.start_time = time.time()
self.sim = sim
self.cam = None
self.markers = []
self.est_markers = []
self.end_time = None
def set(self, sim = False, cam = None, markers = None, estimates = None):
"""
sets:
sim: (boolean) flag to indicate whether this was a simulation
cam: (GL_Camera_Vals or Real_Camera_Vals) camera's key parameters
markers: (Marker or GL_Marker) fiducial marker object
est_markers: (Marker) pipeline pose estimation values
"""
if sim:
self.sim = sim
if cam:
self.cam = cam
if markers:
self.markers = markers
if est_markers:
self.est_markers = estimates
def complete(self, failed = False):
""" records time at which all values have been filled in """
self.end_time = time.time()
def time(self):
""" returns time from instantiation to completion, in seconds """
return self.end_time - self.start_time
def reset_markers_and_time(self):
""" prepares output for next pipeline loop """
self.start_time = time.time()
self.markers = []
self.est_markers = []
def get_est_Cs_flat_mm(self):
"""
Returns estimated centres in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
eCs = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, e in enumerate(m.get_C_mm()): # two centre estimates
for k, c in enumerate(e): # three coordinates
eCs[i*2+j,k] = c
return eCs
def get_est_Ns_flat_mm(self):
"""
Returns estimated normals in a nx3 flat array of 3d vectors (useful for
single marker). Note: posea always generates two estimates per
est_marker.
"""
enrms = np.zeros((len(self.est_markers)*2,3))
for i, m in enumerate(self.est_markers): # any nr of markers
for j, n in enumerate(m.get_N_mm()): # two normal estimates
for k, c in enumerate(n): # three coordinates
enrms[i*2+j,k] = c
return enrms
def get_data(self, stub= False, match= False, get= None):
"""
This function is designed to facilitate the retrieval of data to produce
plots as in the printer module, by returning data a dict whose keys are
the names of the required classes of data, such as 'est. Cs' for
estimated centres.
The array of vectors corresonding to each key is in chronological order
by virtue of the same order of the outputs array.
The function checks whether the combination of parameters is
contradictory and returns None if the data in the current output object
is incomplete for the given data request.
"get" specifies the required data classes as a list of ID strings.
If "match" is set, the actual values are duplicated so that the arrays
of actual values are as long as the arrays of estimates and each actual
value has a corresonding estimated value at the same array index in the
appropriate array.
If "stub" is set, the dict is returned with empty lists as values.
NB: this function does not take multiple marker scenes into account.
"""
if stub:
stub = {}
for key in get:
stub[key] = []
return stub
data = {}
nr_eCs, nr_eNs, nr_Cs, nr_Ns = 0, 0, 0, 0
recognised = 0
if match and ('recognition' in get):
self.logger.error('tried to retrieve recognition in matched mode')
return None
eCs = self.get_est_Cs_flat_mm()
nr_eCs = len(eCs)
C = self.markers[0].get_C_mm()
nr_Cs = len(C)
eNs = self.get_est_Ns_flat_mm()
nr_eNs = len(eNs)
N = self.markers[0].get_N_mm()
nr_Ns = len(N)
if 'est. Cs' in get:
if nr_eCs:
data['est. Cs'] = eCs
elif match:
return None
else:
data['est. Cs'] = []
if 'actual Cs' in get:
if match and nr_eCs and nr_Cs:
data['actual Cs'] = np.tile(C, nr_eCs).reshape(nr_eCs, 3)
elif match:
return None
elif nr_Cs:
data['actual Cs'] = np.tile(C, nr_Cs).reshape(nr_Cs, 3)
else:
data['actual Cs'] = []
if 'est. Ns' in get:
if nr_eNs:
data['est. Ns'] = eNs
elif match:
return None
else:
data['est. Ns'] = []
if 'actual Ns' in get:
if match and nr_eNs and nr_Ns:
data['actual Ns'] = np.tile(N, nr_eNs).reshape(nr_eNs, 3)
elif match:
return None
elif nr_Ns:
data['actual Ns'] = np.tile(N, nr_Ns).reshape(nr_Ns, 3)
else:
data['actual Ns'] = []
if 'recognition' in get:
if (nr_eCs and nr_Cs):
data['recognition'] = np.ones((1,1))
elif nr_Cs:
data['recognition'] = np.zeros((1,1))
elif nr_eCs + nr_Cs + nr_eNs + nr_Ns == 0:
return None
return data
```
#### File: joristork/milovision/pipeline.py
```python
import cv2
import numpy as np
import sys
import time
import copy
import multiprocessing
import logging
import Image
# milovision libraries
from pipeline_modules import ContourFinder
from pipeline_modules import EllipseFitter
from pipeline_modules import PoseEstimatorA
from simulator import GL_Simulator
from output import Pipeline_Output, Printer
from camera_values import Camera_Vals
from marker import Marker
class Pipeline(object):
"""
The Pipeline class contains the main application loop and instantiates and
runs the required image processing modules as well as the optional
simulator.
"""
def __init__(self, options = None):
""" sets logger, verbosity, and other execution variables """
self.logger = logging.getLogger('Pipeline')
self.options = options
self.logger.info('initialised')
self.windows = []
self.modules = []
self.loops = 0
self.single_img = False
self.fwcam = None
self.processes = []
self.outputs = []
self.new_output = False
self.start = time.time()
self.ellipses = None
self.already_shutting_down = False
def set_fwcam(self, fwcam):
""" sets the pipeline's pydc1394 camera object """
self.fwcam = fwcam
def set_image(self, image):
""" loads a single image from disk """
self.orig = np.copy(image)
self.canvas = np.copy(self.orig)
self.single_img = True
def stop(self):
"""
Sets the stop variables that trigger pipeline shutdown. Sends a stop
message to the simulator if present, and waits for the simulator process
to halt.
"""
self.end = time.time()
self.running = False
self.logger.info('stopping pipeline')
if self.options.simulate is not None:
self.logger.info('waiting for simulator')
for process in self.processes:
self.q2sim.put('stop')
process.join()
self.logger.info('simulator complete')
def cleanup(self):
""" closes any OpenCV windows and/or camera to enable a clean exit """
for window in self.windows:
cv2.destroyWindow(window) # opencv bug: only closes windows at exit
if hasattr(self, 'fwcam'):
if self.fwcam:
self.fwcam.stop()
self.logger.info('cleanup completed')
def shutdown(self):
""" main exit routine with logging tasks; runs printer if required """
self.stop()
if self.already_shutting_down:
self.logger.error('multiple shutdown attempts')
sys.exit(0)
self.already_shutting_down = True
duration = self.end - self.start
if self.modules:
self.logger.info('processed %d images' % self.loops)
self.logger.info('avg rate: %f fps' % (self.loops / duration))
else:
self.logger.info('loops: %d' % self.loops)
self.logger.info('avg rate: %f loops /sec' % (self.loops / duration))
avcts = 0.0
avels = 0.0
avmels = 0.0
if self.modules:
if (len(self.modules) > 0) and self.loops > 0:
avcts = self.modules[0].nr_conts / self.loops
self.modules[0].logger.info('avg contours /img: %f' % avcts)
if (len(self.modules) > 1) and self.loops > 0:
avels = self.modules[1].nr_ellipses / (self.loops * 1.0)
avmels = self.modules[1].nr_candidates / (self.loops * 1.0)
self.modules[1].logger.info('pre-filter ellipses /img: %f' % avels)
self.modules[1].logger.info('post-filter ellipses /img: %f' % avmels)
if len(self.modules) > 2:
msg = 'used lopt1 %d times' % self.modules[2].nrlopt1
self.modules[2].logger.info(msg)
msg = 'used lopt2 %d times' % self.modules[2].nrlopt2
self.modules[2].logger.info(msg)
msg = 'used lopt3 %d times' % self.modules[2].nrlopt3
self.modules[2].logger.info(msg)
self.cleanup()
printer = Printer(pipe = self)
printer.final(outputs = self.outputs)
self.logger.info('shutdown completed')
sys.exit(0)
def run(self):
"""
Main application function. Starts image stream from real or simulated
camera (or loads a single image); initialises any pipeline modules; and
then enters the main pipeline processing loop. Once in the loop the
pipeline runs until a shutdown flag is set. The message queue from the
simulator, if there is one, is checked on each loop iteration for image
data and synchronisation messages (such as a shutdown message).
"""
self.running = True
if self.fwcam and not self.single_img:
self.fwcam.start(interactive = True)
time.sleep(1)
self.orig = np.copy(np.asarray(self.fwcam.current_image))
self.canv = np.copy(self.orig)
self.init_output = Pipeline_Output(sim=False)
self.init_output.cam = Camera_Vals(camera_id = 'chameleon1')
self.init_output.markers.append(Marker(cam=self.init_output.cam))
elif self.options.simulate is not None:
self.q2sim = multiprocessing.Queue()
self.q2pipe = multiprocessing.Queue()
queues = self.q2sim, self.q2pipe
args = queues, self.options
process = multiprocessing.Process(name='child', target=GL_Simulator, args=args)
self.processes.append(process)
process.start()
self.init_output = Pipeline_Output(sim = True)
self.q2sim.put(copy.deepcopy(self.init_output))
incoming = self.q2pipe.get()
if 'stop' in incoming:
self.shutdown()
elif 'simulglob' in incoming:
_, self.orig, output = incoming
self.outputs.append(copy.deepcopy(output))
self.init_output.cam = self.outputs[0].cam
m = []
if self.options.nr_modules == 0:
self.logger.info('running an empty pipeline')
else:
if self.options.nr_modules >=1:
self.modules.append(ContourFinder(pipeline = self))
if self.options.nr_modules >=2:
self.modules.append(EllipseFitter(pipeline = self))
if self.options.nr_modules >=3:
self.modules.append(PoseEstimatorA(pipeline = self))
self.logger.info('running with %d modules' % self.options.nr_modules)
if self.options.windows:
for module in self.modules:
if not (module.__class__.__name__ == 'PoseEstimatorA'):
self.windows.append(module.__class__.__name__)
if self.single_img:
for module in self.modules:
module.run()
if self.options.windows:
cv2.imshow(module.__class__.__name__, self.canv)
cv2.waitKey(2)
time.sleep(5)
self.shutdown()
while self.running:
if self.fwcam:
self.orig = np.copy(np.asarray(self.fwcam.current_image))
if self.options.windows:
cv2.imshow("original", self.orig)
elif (self.options.simulate is not None) and self.outputs[-1].end_time:
incoming = self.q2pipe.get()
if 'stop' in incoming:
self.running = False
continue
elif 'simulglob' in incoming:
_, self.orig, output = incoming
self.outputs.append(copy.deepcopy(output))
self.new_output = True
else:
self.logger.error('unknown in queue: \'%s\''% incoming)
self.shutdown()
self.canv = np.copy(self.orig)
for module in self.modules:
module.run()
classname = module.__class__.__name__
if not (self.options.windows and classname == 'PoseEstimatorA'):
cv2.imshow(module.__class__.__name__, self.canv)
self.loops += 1
self.outputs[-1].complete()
if self.ellipses:
self.ellipses = None
if self.options.windows:
cv2.waitKey(2)
if time.time() - self.start >= self.options.simtime:
self.running = False
self.shutdown()
```
#### File: ui/wx/LiveImageDisplay.py
```python
import wx
# OpenGL Stuff
from wx import glcanvas
from OpenGL.GL import *
from OpenGL.GLU import *
import time
# Numpy array is our picture
from numpy import empty
__all__ = [ "NewImageEvent", "LiveImageDisplay", "LiveImageDisplayPanel" ]
###########################################################################
# NEW IMAGE EVENT #
###########################################################################
EVT_NEW_IMAGE_ID = wx.NewId()
def EVT_NEW_IMAGE(win, func):
"""
Define new Image event. This Event is send by the acquisition threads
or the function that sends new images to display. The receiver then
displays the image and updates it's display accordingly.
"""
win.Connect(-1, -1, EVT_NEW_IMAGE_ID, func)
class NewImageEvent(wx.PyEvent):
"""
Simple event to carry arbitrary result data. I our case we expect a
numpy array with picture data inside.
"""
def __init__(self, data):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_NEW_IMAGE_ID)
self.data = data
###########################################################################
# class LiveImageDisplayPanel #
###########################################################################
class LiveImageDisplayPanel(glcanvas.GLCanvas):
imageUpdateEvent = wx.NewEventType()
def __init__(self,parent, shape, dtype, zoom = 1.0, defcapture_dir = "."):
"""
This function implements a Panel which can display Live Video stream
to the user. It also implements saving the current image to a file by
keyboard stroke.
The Display is implemented using OpenGL, it defines a GLGanvas with
the same coordinate frame as the shown image (0,0 is therefore the top
left border). The user can also register points and lines to be
displayed in the display while drawing
parent - parent of this panel
shape - numpy shape tuple of data to display
dtype - numpy data type of image data to display
zoom - how much should the image be resized
defcapture_dir - Directory to save captured images to
"""
wx.glcanvas.GLCanvas.__init__(self,parent,-1)
# Initialize Variables
self._caputure_dir = defcapture_dir
self._fps = 0
self._gldrawmode = GL_LUMINANCE if len(shape) == 2 else GL_RGB
if dtype[-1] in ['1','8']:
self._glinternal = GL_UNSIGNED_BYTE
elif dtype[-2:] == '16' or dtype[-1] == '2':
self._glinternal = GL_UNSIGNED_SHORT
else:
raise RuntimeError, "Unknown datatype!"
self._gl_initialized=False
# For dot drawing
self._quadric = None
self._dots = []
# Empty array to make sure we always have something to draw
self._arr = empty( shape, dtype )
# Initialisations for FPS calculation
self._ltime = time.time()
self._drawn_frames = 0
self._totframes = 0
# Inform WX of our desired size
self.SetSize((shape[1]*zoom,shape[0]*zoom))
self.SetSizeHints(-1,-1,maxW=shape[1]*zoom,maxH=shape[0]*zoom)
# Bind
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
def InitGL( self ):
"""
This function initalizes OpenGL according to what we need
in this context
"""
glEnable(GL_TEXTURE_2D); # Enable Texture Mapping
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST); # Set Texture Max Filter
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST); # Set Texture Min Filter
# Determine the texture size (which must be 2**x)
texdim_w, texdim_h = 32,32
while texdim_w < self._arr.shape[1]:
texdim_w *= 2
while texdim_h < self._arr.shape[0]:
texdim_h *= 2
self._texture_coords = (float(self._arr.shape[1])/texdim_w,float(self._arr.shape[0])/texdim_h)
# Generate our Texture
glTexImage2D(GL_TEXTURE_2D, 0, self._gldrawmode, texdim_w, texdim_h, 0, self._gldrawmode, self._glinternal, None)
# Set our viewport
w,h = self.GetSize()
glViewport(0, 0, w, h)
glClear( GL_COLOR_BUFFER_BIT );
# Set our Projection to Orthographic and the coordinate system
# like the picture
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glOrtho(0.0, self._arr.shape[1], self._arr.shape[0], 0.0, -1.0, 1.0);
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
self._quadric = gluNewQuadric()
self._gl_initialized = True
def __del__( self ):
del self._arr
if self._quadric:
gluDeleteQuadric( self._quadric )
self._quadric = None
def OnKeyDown( self, event ):
# If our Parent has a KeyDown Function written,
# he might want to overwrite this our default behaviour
try:
if self.GetParent().OnKeyDown(event,self._arr):
return True
except AttributeError, e:
pass
if chr( event.GetUniChar() ) in ['F', 'f']:
print self.get_fps()
# elif chr( event.GetUniChar() ) == ' ':
# Sl.save_image_with_number(
# self._arr, "image", "jpg",self._caputure_dir)
def OnEraseBackground( self, event ):
pass # Do nothing, to avoid flashing on MSW
def OnResize( self, event ):
# Reset our Viewpoint.
size = self.GetClientSize()
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, size.width, size.height)
event.Skip()
def OnPaint( self, event ):
"This function draws our GLContext with the image"
dc = wx.PaintDC( self )
self.SetCurrent()
if not self._gl_initialized:
self.InitGL()
# Remake the Texture from the new image data
glTexSubImage2D (GL_TEXTURE_2D, 0, 0, 0, self._arr.shape[1], self._arr.shape[0], self._gldrawmode, self._glinternal, self._arr);
glColor3f( 1.,1.,1. )
# Draw the imageplane
x,y = self._texture_coords
glBegin(GL_QUADS)
glTexCoord2f(0.0,0.); glVertex3f( 0.,0., -.5 )
glTexCoord2f(x,0.); glVertex3f( self._arr.shape[1],0., -.5 )
glTexCoord2f(x,y); glVertex3f( self._arr.shape[1],self._arr.shape[0], -.5 )
glTexCoord2f(0.,y); glVertex3f( 0.,self._arr.shape[0], -.5 )
glEnd()
# Draw the dots
glDisable(GL_TEXTURE_2D); # Disable Texture Mapping
for idx,(pos,radius,color) in enumerate(self._dots):
x,y = pos
glTranslate( x,y,0 )
glColor3fv( color )
gluDisk( self._quadric, 0, radius, 25, 1 )
glTranslate( -x,-y,0 )
# print "Done with dots!"
glEnable(GL_TEXTURE_2D); # Enable Texture Mapping
self.SwapBuffers()
# Calculate the FPS
ctime = time.time()
dtime = ctime-self._ltime
if dtime > 1:
fps= self._drawn_frames/dtime
self._ltime = ctime
self._drawn_frames = 0
self._fps = fps
# print "\r%.2f fps" % (fps),
self._drawn_frames += 1
self._totframes += 1
def get_fps( self ):
return self._fps
class LiveImageDisplay(wx.Frame):
def __init__(self,parent,id,title, shape, dtype,
zoom = 1.0, pos = wx.DefaultPosition, style = wx.DEFAULT_FRAME_STYLE):
"""
This is the parent frame for a LiveImageDisplayPanel.
It is not necessary, but if the Panel is used alone in one frame
it is quite handy.
"""
wx.Frame.__init__(self, parent, id, title, pos, wx.Size(200, 150),
style = wx.DEFAULT_FRAME_STYLE & ~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
self._ldp = LiveImageDisplayPanel( self, shape, dtype, zoom )
sizer = wx.BoxSizer( wx.HORIZONTAL )
sizer.Add( self._ldp, 1, wx.EXPAND)
self.SetSizer( sizer )
sizer.Fit(self)
self.SetAutoLayout( True )
self.Fit()
self.get_fps = self._ldp.get_fps
# Connect, so we can send this event to this livedisplay
EVT_NEW_IMAGE(self, self.OnNewImage)
def __del__( self ):
try:
del self._ldp
except AttributeError: # already deleted, also ok
pass
def OnNewImage( self, event ):
self.SetNewImage(event.data)
def SetNewImage( self, img ):
"""
Note: This function must not be called from another
thread, use wx.Post(win,NewImageEvent(img)) for that
"""
self._ldp._arr = img
self._ldp.Refresh()
def ResetDots( self ):
self._ldp._dots = []
def AddDot( self, pos, radius, color ):
self._ldp._dots.append(
(pos,radius,color)
)
``` |
{
"source": "JorisTruong/homeaccountant",
"score": 2
} |
#### File: homeaccountant/log/utils.py
```python
import copy
import enum
import logging
import logging.handlers
class LogLevel(enum.IntEnum):
NOSET = 0x0
TRACE = 0x10
DEBUG = 0x20
INFO = 0x30
WARNING = 0x40
ERROR = 0x50
CRITICAL = 0x60
class ColoredConsoleHandler(logging.StreamHandler):
def emit(self, record):
myrecord = copy.copy(record)
levelno = myrecord.levelno
if levelno >= LogLevel.CRITICAL:
color = '\x1b[35m'
elif levelno >= LogLevel.ERROR:
color = '\x1b[31m'
elif levelno >= LogLevel.WARNING:
color = '\x1b[33m'
elif levelno >= LogLevel.INFO:
color = '\x1b[32m'
elif levelno >= LogLevel.DEBUG:
color = '\x1b[36m'
elif levelno >= LogLevel.TRACE:
color = '\x1b[34m'
else:
color = '\x1b[0m'
myrecord.msg = color + str(myrecord.msg) + '\x1b[0m'
super().emit(myrecord)
class ColoredRotatingFileHandler(logging.handlers.RotatingFileHandler):
def emit(self, record):
myrecord = copy.copy(record)
levelno = myrecord.levelno
if levelno >= LogLevel.CRITICAL:
color = '\x1b[35m'
elif levelno >= LogLevel.ERROR:
color = '\x1b[31m'
elif levelno >= LogLevel.WARNING:
color = '\x1b[33m'
elif levelno >= LogLevel.INFO:
color = '\x1b[32m'
elif levelno >= LogLevel.DEBUG:
color = '\x1b[36m'
elif levelno >= LogLevel.TRACE:
color = '\x1b[34m'
else:
color = '\x1b[0m'
myrecord.msg = color + str(myrecord.msg) + '\x1b[0m'
super().emit(myrecord)
``` |
{
"source": "jorisvandenbossche/cartoframes",
"score": 2
} |
#### File: dataset/registry/dataframe_dataset.py
```python
from __future__ import absolute_import
import pandas as pd
from carto.exceptions import CartoException, CartoRateLimitException
from tqdm import tqdm
from ....utils.columns import DataframeColumnsInfo, _first_value
from ....utils.geom_utils import (compute_geodataframe, decode_geometry,
save_index_as_column)
from ....utils.utils import is_geojson, load_geojson, map_geom_type, encode_row, PG_NULL
from .base_dataset import BaseDataset
# avoid _lock issue: https://github.com/tqdm/tqdm/issues/457
tqdm(disable=True, total=0) # initialise internal lock
class DataFrameDataset(BaseDataset):
def __init__(self, data, credentials=None, schema=None):
super(DataFrameDataset, self).__init__()
self._df = data
@staticmethod
def can_work_with(data, credentials):
return isinstance(data, pd.DataFrame) or is_geojson(data)
@classmethod
def create(cls, data, credentials=None, schema=None):
if is_geojson(data):
data = load_geojson(data)
save_index_as_column(data)
return cls(data)
@property
def dataframe(self):
"""Dataset DataFrame"""
return self._df
def get_geodataframe(self):
"""Converts DataFrame into GeoDataFrame if possible"""
gdf = compute_geodataframe(self)
if not gdf.empty:
self._df = gdf
return self._df
def download(self, limit, decode_geom, retry_times):
self._is_ready_for_dowload_validation()
def upload(self, if_exists, with_lnglat):
self._is_ready_for_upload_validation()
self._rename_index_for_upload()
dataframe_columns_info = DataframeColumnsInfo(self._df, with_lnglat)
if if_exists == BaseDataset.IF_EXISTS_REPLACE or not self.exists():
self._create_table(dataframe_columns_info.columns)
elif if_exists == BaseDataset.IF_EXISTS_FAIL:
raise self._already_exists_error()
self._copyfrom(dataframe_columns_info, with_lnglat)
def delete(self):
raise ValueError('Method not allowed in DataFrameDataset. You should use a TableDataset: `Dataset(my_table)`')
def compute_geom_type(self):
"""Compute the geometry type from the data"""
return self._get_geom_type()
def get_column_names(self, exclude=None):
"""Get column names"""
columns = list(self.dataframe.columns)
if self.dataframe.index.name is not None and self.dataframe.index.name not in columns:
columns.append(self.dataframe.index.name)
if exclude and isinstance(exclude, list):
columns = list(set(columns) - set(exclude))
return columns
def get_num_rows(self):
"""Get the number of rows in the dataframe"""
return len(self._df.index)
def _copyfrom(self, dataframe_columns_info, with_lnglat):
query = """
COPY {table_name}({columns}) FROM stdin WITH (FORMAT csv, DELIMITER '|', NULL '{null}');
""".format(
table_name=self._table_name, null=PG_NULL,
columns=','.join(c.database for c in dataframe_columns_info.columns)).strip()
data = _rows(self._df, dataframe_columns_info, with_lnglat)
self._context.upload(query, data)
def _create_table(self, columns):
query = '''BEGIN; {drop}; {create}; {cartodbfy}; COMMIT;'''.format(
drop=self._drop_table_query(),
create=self._create_table_query(columns),
cartodbfy=self._cartodbfy_query())
try:
self._context.execute_long_running_query(query)
except CartoRateLimitException as err:
raise err
except CartoException as err:
raise CartoException('Cannot create table: {}.'.format(err))
def _create_table_query(self, columns):
cols = ['{column} {type}'.format(column=c.database, type=c.database_type) for c in columns]
return '''CREATE TABLE {table_name} ({cols})'''.format(
table_name=self._table_name,
cols=', '.join(cols))
def _get_geom_type(self):
"""Compute geom type of the local dataframe"""
if not self._df.empty and 'geometry' in self._df and len(self._df.geometry) > 0:
geometry = _first_value(self._df.geometry)
if geometry and geometry.geom_type:
return map_geom_type(geometry.geom_type)
return None
def _rename_index_for_upload(self):
if self._df.index.name != 'cartodb_id':
if 'cartodb_id' not in self._df:
if _is_valid_index_for_cartodb_id(self._df.index):
# rename a integer unnamed index to cartodb_id
self._df.index.rename('cartodb_id', inplace=True)
else:
if self._df.index.name is None:
# replace an unnamed index by a cartodb_id column
self._df.set_index('cartodb_id')
def _is_valid_index_for_cartodb_id(index):
return index.name is None and index.nlevels == 1 and index.dtype == 'int' and index.is_unique
def _rows(df, dataframe_columns_info, with_lnglat):
for index, _ in df.iterrows():
row_data = []
for c in dataframe_columns_info.columns:
col = c.dataframe
if col not in df.columns:
if df.index.name and col == df.index.name:
val = index
else: # we could have filtered columns in the df. See DataframeColumnsInfo
continue
else:
val = df.at[index, col]
if dataframe_columns_info.geom_column and col == dataframe_columns_info.geom_column:
geom = decode_geometry(val, dataframe_columns_info.enc_type)
if geom:
val = 'SRID=4326;{}'.format(geom.wkt)
else:
val = ''
row_data.append(encode_row(val))
if with_lnglat:
lng_val = df.at[index, with_lnglat[0]]
lat_val = df.at[index, with_lnglat[1]]
if lng_val is not None and lat_val is not None:
val = 'SRID=4326;POINT ({lng} {lat})'.format(lng=lng_val, lat=lat_val)
else:
val = ''
row_data.append(encode_row(val))
csv_row = b'|'.join(row_data)
csv_row += b'\n'
yield csv_row
```
#### File: observatory/catalog/category.py
```python
from __future__ import absolute_import
from .entity import CatalogEntity
from .repository.constants import CATEGORY_FILTER
from .repository.category_repo import get_category_repo
from .repository.dataset_repo import get_dataset_repo
from .repository.geography_repo import get_geography_repo
class Category(CatalogEntity):
entity_repo = get_category_repo()
@property
def datasets(self):
return get_dataset_repo().get_all({CATEGORY_FILTER: self.id})
@property
def geographies(self):
return get_geography_repo().get_all({CATEGORY_FILTER: self.id})
@property
def name(self):
return self.data['name']
```
#### File: observatory/catalog/entity.py
```python
import pandas as pd
from warnings import warn
from google.api_core.exceptions import NotFound
from carto.exceptions import CartoException
from ...clients.bigquery_client import BigQueryClient
from ....auth import Credentials, defaults
try:
from abc import ABC
except ImportError:
from abc import ABCMeta
ABC = ABCMeta('ABC', (object,), {'__slots__': ()})
_WORKING_PROJECT = 'carto-do-customers'
class CatalogEntity(ABC):
id_field = 'id'
entity_repo = None
export_excluded_fields = ['summary_json']
def __init__(self, data):
self.data = data
@property
def id(self):
return self.data[self.id_field]
@property
def slug(self):
try:
return self.data['slug']
except KeyError:
return None
@classmethod
def get(cls, id_):
return cls.entity_repo.get_by_id(id_)
@classmethod
def get_all(cls, filters=None):
return cls.entity_repo.get_all(filters)
@classmethod
def get_list(cls, id_list):
return cls.entity_repo.get_by_id_list(id_list)
def to_series(self):
return pd.Series(self.data)
def to_dict(self):
return {key: value for key, value in self.data.items() if key not in self.export_excluded_fields}
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self == other
def __str__(self):
return '{classname}({data})'.format(classname=self.__class__.__name__, data=self.data.__str__())
def __repr__(self):
return "<{classname}('{entity_id}')>".format(classname=self.__class__.__name__, entity_id=self._get_print_id())
def _get_print_id(self):
if 'slug' in self.data.keys():
return self.data['slug']
return self.id
def _download(self, credentials=None):
credentials = self._get_credentials(credentials)
user_dataset = credentials.get_do_user_dataset()
bq_client = _get_bigquery_client(_WORKING_PROJECT, credentials)
project, dataset, table = self.id.split('.')
view = 'view_{}_{}'.format(dataset.replace('-', '_'), table)
try:
file_path = bq_client.download_to_file(_WORKING_PROJECT, user_dataset, view)
except NotFound:
raise CartoException('You have not purchased the dataset `{}` yet'.format(self.id))
warn('Data saved: {}.'.format(file_path))
warn("To read it you can do: `pandas.read_csv('{}')`.".format(file_path))
return file_path
def _get_credentials(self, credentials=None):
_credentials = credentials or defaults.get_default_credentials()
if not isinstance(_credentials, Credentials):
raise ValueError('`credentials` must be a Credentials class instance')
return _credentials
def _get_bigquery_client(project, credentials):
return BigQueryClient(project, credentials)
def is_slug_value(id_value):
return len(id_value.split('.')) == 1
class CatalogList(list):
def __init__(self, data):
super(CatalogList, self).__init__(data)
def get(self, item_id):
return next(iter(filter(lambda item: item.id == item_id or item.slug == item_id, self)), None)
def to_dataframe(self):
return pd.DataFrame([item.data for item in self])
```
#### File: observatory/enrichment/enrichment.py
```python
from .enrichment_service import EnrichmentService, prepare_variables, get_variable_aggregations, \
AGGREGATION_DEFAULT, AGGREGATION_NONE
class Enrichment(EnrichmentService):
def __init__(self, credentials=None):
"""
Dataset enrichment with `Data Observatory <https://carto.com/platform/location-data-streams/>` data
Args:
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
credentials of user account. If not provided,
a default credentials (if set with :py:meth:`set_default_credentials
<cartoframes.auth.set_default_credentials>`) will attempted to be
used.
"""
super(Enrichment, self).__init__(credentials)
def enrich_points(self, data, variables, geom_column='geometry', filters={}):
"""Enrich your dataset with columns from our data, intersecting your points with our
geographies. Extra columns as area and population will be provided with the aims of normalize
these columns.
Args:
data (:py:class:`Dataset <cartoframes.data.Dataset>`, DataFrame, GeoDataFrame):
a Dataset, DataFrame or GeoDataFrame object to be enriched.
variables (:py:class:`Variable <cartoframes.data.observatory.Catalog>`, CatalogList, list, str):
variable(s), discovered through Catalog, for enriching the `data` argument.
geom_column (str): string indicating the 4326 geometry column in `data`.
filters (list, optional): list of `<cartoframes.data.observatory> VariableFilter` to filter rows from
the enrichment data. Example: [VariableFilter(variable1, "= 'a string'")]
Returns:
A DataFrame as the provided one, but with the variables to enrich appended to it.
Note that if the geometry of the `data` you provide intersects with more than one geometry
in the enrichment dataset, the number of rows of the returned DataFrame could be different
than the `data` argument number of rows.
Examples:
Enrich a points dataset with Catalog classes:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog
from cartoframes.auth import set_default_credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
variables = catalog.country('usa').category('demographics').datasets[0].variables
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_points(df, variables)
Enrich a points dataset with several Variables using their ids:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog
from cartoframes.auth import set_default_credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
all_variables = catalog.country('usa').category('demographics').datasets[0].variables
variable1 = all_variables[0]
variable2 = all_variables[1]
variables = [
variable1.id,
variable2.id
]
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_points(df, variables)
Enrich a points dataset with filters:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog, VariableFilter
from cartoframes.auth import set_default_credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
variable = catalog.country('usa').category('demographics').datasets[0].variables[0]
filter = VariableFilter(variable, '=', '2019-09-01')
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_points(df, variables=[variable], filters=[filter])
"""
variables = prepare_variables(variables)
data_copy = self._prepare_data(data, geom_column)
temp_table_name = self._get_temp_table_name()
self._upload_dataframe(temp_table_name, data_copy, geom_column)
queries = self._get_points_enrichment_sql(temp_table_name, geom_column, variables, filters)
return self._execute_enrichment(queries, data_copy, geom_column)
AGGREGATION_DEFAULT = AGGREGATION_DEFAULT
"""Use default aggregation method for polygons enrichment. More info in :py:attr:`Enrichment.enrich_polygons`"""
AGGREGATION_NONE = AGGREGATION_NONE
"""Do not aggregate data in polygons enrichment. More info in :py:attr:`Enrichment.enrich_polygons`"""
def enrich_polygons(self, data, variables, geom_column='geometry', filters=[], aggregation=AGGREGATION_DEFAULT):
"""Enrich your dataset with columns from our data, intersecting your polygons with our geographies.
When a polygon intersects with multiple geographies of our dataset, the proportional part of the
intersection will be used to interpolate the quantity of the polygon value intersected, aggregating them
with the operator provided by `agg_operators` argument.
Args:
data (Dataset, DataFrame, GeoDataFrame): a Dataset, DataFrame or GeoDataFrame object to be enriched.
variables (list): list of `<cartoframes.data.observatory> Variable` entities discovered through Catalog to
enrich your data. To refer to a Variable, You can use a `<cartoframes.data.observatory> Variable`
instance, the Variable `id` property or the Variable `slug` property. Please, take a look at the
examples.
geom_column (str): string indicating the 4326 geometry column in `data`.
filters (list, optional): list of `<cartoframes.data.observatory> VariableFilter` to filter rows from
the enrichment data. Example: [VariableFilter(variable1, "= 'a string'")]
aggregation (str, str, list, optional): set the data aggregation. Your polygons can intersect with one or
more polygons from the DO. With this method you can select how to aggregate the variables data from the
intersected polygons. Options are:
- :py:attr:`Enrichment.AGGREGATION_DEFAULT` (default): Every `<cartoframes.data.observatory> Variable`
has an aggregation method in the Variable `agg_method` property and it will be used to aggregate the
data. In case it is not defined, `array_agg` function will be used.
- :py:attr:`Enrichment.AGGREGATION_NONE`: use this option to do the aggregation locally by yourself.
you will receive an array with all the data from each polygon instersected.
- list of `<cartoframes.data.observatory> VariableAggregation`: if you want to overwrite some default
aggregation methods from your selected variables, you can do it using a list of
`<cartoframes.data.observatory> VariableAggregation`. Example: [VariableAggregation(variable, 'SUM')]
- str: if you want to overwrite every default aggregation method, you can pass a string with the
aggregation method to use.
Returns:
A DataFrame as the provided one but with the variables to enrich appended to it
Note that if the geometry of the `data` you provide intersects with more than one geometry
in the enrichment dataset, the number of rows of the returned DataFrame could be different
than the `data` argument number of rows.
Examples:
Enrich a polygons dataset with one Variable:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog
from cartoframes.auth import set_default_credentials, Credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
variable = catalog.country('usa').category('demographics').datasets[0].variables[0]
variables = [variable]
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_polygons(df, variables)
Enrich a polygons dataset with all Variables from a Catalog Dataset:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog
from cartoframes.auth import set_default_credentials, Credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
variables = catalog.country('usa').category('demographics').datasets[0].variables
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_polygons(df, variables)
Enrich a polygons dataset with several Variables using their ids:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog
from cartoframes.auth import set_default_credentials, Credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
all_variables = catalog.country('usa').category('demographics').datasets[0].variables
variable1 = all_variables[0]
variable2 = all_variables[1]
variables = [
variable1.id,
variable2.id
]
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_polygons(df, variables)
Enrich a polygons dataset with filters:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog, VariableFilter
from cartoframes.auth import set_default_credentials, Credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
variable = catalog.country('usa').category('demographics').datasets[0].variables[0]
filter = VariableFilter(variable, '=', '2019-09-01')
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_polygons(df, variables=[variable], filters=[filter])
Enrich a polygons dataset overwriting some of the variables aggregation methods:
.. code::
import pandas
from cartoframes.data.observatory import Enrichment, Catalog, VariableAggregation
from cartoframes.auth import set_default_credentials, Credentials
set_default_credentials()
df = pandas.read_csv('...')
catalog = Catalog()
all_variables = catalog.country('usa').category('demographics').datasets[0].variables
variable1 = all_variables[0] // variable1.agg_method is 'AVG' but you want 'SUM'
variable2 = all_variables[1] // variable2.agg_method is 'AVG' and it is what you want
variable3 = all_variables[2] // variable3.agg_method is 'SUM' but you want 'AVG'
variables = [variable1, variable2, variable3]
aggregations = [
VariableAggregation(variable1, 'SUM'),
VariableAggregation(variable3, 'AVG')
]
enrichment = Enrichment()
dataset_enrich = enrichment.enrich_polygons(df, variables, aggregations=aggregations)
"""
variables = prepare_variables(variables)
data_copy = self._prepare_data(data, geom_column)
temp_table_name = self._get_temp_table_name()
self._upload_dataframe(temp_table_name, data_copy, geom_column)
queries = self._get_polygon_enrichment_sql(
temp_table_name, geom_column, variables, filters, aggregation
)
return self._execute_enrichment(queries, data_copy, geom_column)
def _get_points_enrichment_sql(self, temp_table_name, geom_column, variables, filters):
tables_metadata = self._get_tables_metadata(variables).items()
return [self._build_points_query(table, metadata, temp_table_name, geom_column, filters)
for table, metadata in tables_metadata]
def _build_points_query(self, table, metadata, temp_table_name, geom_column, filters):
variables = ['enrichment_table.{}'.format(variable.column_name) for variable in metadata['variables']]
enrichment_dataset = metadata['dataset']
enrichment_geo_table = metadata['geo_table']
data_table = '{project}.{user_dataset}.{temp_table_name}'.format(
project=self.working_project,
user_dataset=self.user_dataset,
temp_table_name=temp_table_name
)
return '''
SELECT data_table.{enrichment_id}, {variables},
ST_Area(enrichment_geo_table.geom) AS {table}_area
FROM `{enrichment_dataset}` enrichment_table
JOIN `{enrichment_geo_table}` enrichment_geo_table
ON enrichment_table.geoid = enrichment_geo_table.geoid
JOIN `{data_table}` data_table
ON ST_Within(data_table.{geom_column}, enrichment_geo_table.geom)
{where};
'''.format(
variables=', '.join(variables),
geom_column=geom_column,
enrichment_dataset=enrichment_dataset,
enrichment_geo_table=enrichment_geo_table,
enrichment_id=self.enrichment_id,
where=self._build_where_clausule(filters),
data_table=data_table,
table=table
)
def _get_polygon_enrichment_sql(self, temp_table_name, geom_column, variables, filters, aggregation):
variable_aggregations = get_variable_aggregations(variables, aggregation)
tables_metadata = self._get_tables_metadata(variable_aggregations).items()
return [self._build_polygons_query(table, metadata, temp_table_name, geom_column, filters, aggregation)
for table, metadata in tables_metadata]
def _build_polygons_query(self, table, metadata, temp_table_name, geom_column, filters, aggregation):
variable_aggregations = metadata['variables']
enrichment_dataset = metadata['dataset']
enrichment_geo_table = metadata['geo_table']
data_table = '{project}.{user_dataset}.{temp_table_name}'.format(
project=self.working_project,
user_dataset=self.user_dataset,
temp_table_name=temp_table_name
)
if aggregation == AGGREGATION_NONE:
grouper = ''
variables = self._build_polygons_query_variables_without_aggregation(variable_aggregations, geom_column)
else:
grouper = 'group by data_table.{enrichment_id}'.format(enrichment_id=self.enrichment_id)
variables = self._build_polygons_query_variables_with_aggregation(variable_aggregations, geom_column)
return '''
SELECT data_table.{enrichment_id}, {variables}
FROM `{enrichment_dataset}` enrichment_table
JOIN `{enrichment_geo_table}` enrichment_geo_table
ON enrichment_table.geoid = enrichment_geo_table.geoid
JOIN `{data_table}` data_table
ON ST_Intersects(data_table.{geom_column}, enrichment_geo_table.geom)
{where}
{grouper};
'''.format(
geom_column=geom_column,
enrichment_dataset=enrichment_dataset,
enrichment_geo_table=enrichment_geo_table,
enrichment_id=self.enrichment_id,
where=self._build_where_clausule(filters),
data_table=data_table,
grouper=grouper or '',
variables=variables
)
def _build_polygons_query_variables_with_aggregation(self, variable_aggregations, geom_column):
return ', '.join(["""
{operator}(enrichment_table.{variable} *
(ST_Area(ST_Intersection(enrichment_geo_table.geom, data_table.{geom_column}))
/ ST_area(data_table.{geom_column}))) AS {variable}
""".format(
variable=variable_aggregation.variable.column_name,
geom_column=geom_column,
operator=variable_aggregation.aggregation) for variable_aggregation in variable_aggregations])
def _build_polygons_query_variables_without_aggregation(self, variable_aggregations, geom_column):
variables = ['enrichment_table.{}'.format(variable_aggregation.variable.column_name)
for variable_aggregation in variable_aggregations]
return """
{variables},
ST_Area(ST_Intersection(enrichment_geo_table.geom, data_table.{geom_column})) /
ST_area(data_table.{geom_column}) AS measures_proportion
""".format(
variables=', '.join(variables),
geom_column=geom_column)
def _build_where_clausule(self, filters):
where = ''
if len(filters) > 0:
where_clausules = ["enrichment_table.{} {}".format(f.variable.column_name, f.query) for f in filters]
where = 'WHERE {}'.format('AND '.join(where_clausules))
return where
```
#### File: data/services/isolines.py
```python
from __future__ import absolute_import
import pandas as pd
from ...utils.geom_utils import geodataframe_from_dataframe
from ...data import Dataset
from .service import Service
QUOTA_SERVICE = 'isolines'
class Isolines(Service):
"""Time and distance Isoline services using CARTO dataservices.
"""
def __init__(self, credentials=None):
super(Isolines, self).__init__(credentials, quota_service=QUOTA_SERVICE)
def isochrones(self, source, ranges, **args):
"""isochrone areas
This method computes areas delimited by isochrone lines (lines of constant travel time) based upon public roads.
Args:
source (Dataset, Dataframe): containing the source points for the isochrones:
travel routes from the source points are computed to determine areas within
specified travel times.
ranges (list): travel time values in seconds; for each range value and source point a result polygon
will be produced enclosing the area within range of the source.
exclusive (bool, optional): when False (the default), inclusive range areas are generated, each one
containing the areas for smaller time values (so the area is reachable from the source
whithin the given time). When True, areas are exclusive, each one corresponding time values
between the immediately smaller range value (or zero) and the area range value,
table_name (str, optional): the resulting areas will be saved in a new
CARTO table with this name.
if_exists (str, optional): Behavior for creating new datasets, only applicable
if table_name isn't None;
Options are 'fail', 'replace', or 'append'. Defaults to 'fail'.
dry_run (bool, optional): no actual computattion will be performed,
and metadata will be returned including the required quota.
mode (str, optional): defines the travel mode: ``'car'`` (the default) or ``'walk'``.
is_destination (bool, optional): indicates that the source points are to be taken as
destinations for the routes used to compute the area, rather than origins.
mode_type (str, optional): type of routes computed: ``'shortest'`` (default) or ``'fastests'``.
mode_traffic (str, optional): use traffic data to compute routes: ``'disabled'`` (default) or ``'enabled'``.
resolution (float, optional): level of detail of the polygons in meters per pixel.
Higher resolution may increase the response time of the service.
maxpoints (int, optional): Allows to limit the amount of points in the returned polygons.
Increasing the number of maxpoints may increase the response time of the service.
quality: (int, optional): Allows you to reduce the quality of the polygons in favor of the response time.
Admitted values: 1/2/3.
with_lnglat (tuple, optional): Two columns that have the longitude
and latitude information. If used, a point geometry will be
created upon upload to CARTO. Example input: `('long', 'lat')`.
Defaults to `None`.
Returns:
A named-tuple ``(data, metadata)`` containing either a ``data`` Dataset or DataFrame
(same type as the input ``source``) and a ``metadata`` dictionary.
For dry runs the data will be ``None``.
The data contains a ``range_data`` column with a numeric value and a ``the_geom``
geometry with the corresponding area. It will also contain a ``source_id`` column
that identifies the source point corresponding to each area if the source has a
``cartodb_id`` column.
"""
return self._iso_areas(source, ranges, function='isochrone', **args)
def isodistances(self, source, ranges, **args):
"""isodistance areas
This method computes areas delimited by isodistance lines (lines of constant travel distance) based upon public
roads.
Args:
source (Dataset, Dataframe): containing the source points for the isochrones:
travel routes from the source points are computed to determine areas within
specified travel distances.
ranges (list): travel distance values in meters; for each range value and source point a result polygon
will be produced enclosing the area within range of the source.
exclusive (bool, optional): when False (the default), inclusive range areas are generated, each one
containing the areas for smaller distance values (so the area is reachable from the source
whithin the given distance). When True, areas are exclusive, each one corresponding distance values
between the immediately smaller range value (or zero) and the area range value,
table_name (str, optional): the resulting areas will be saved in a new
CARTO table with this name.
if_exists (str, optional): Behavior for creating new datasets, only applicable
if table_name isn't None;
Options are 'fail', 'replace', or 'append'. Defaults to 'fail'.
dry_run (bool, optional): no actual computattion will be performed,
and metadata will be returned including the required quota.
mode (str, optional): defines the travel mode: ``'car'`` (the default) or ``'walk'``.
is_destination (bool, optional): indicates that the source points are to be taken as
destinations for the routes used to compute the area, rather than origins.
mode_type (str, optional): type of routes computed: ``'shortest'`` (default) or ``'fastests'``.
mode_traffic (str, optional): use traffic data to compute routes: ``'disabled'`` (default) or ``'enabled'``.
resolution (float, optional): level of detail of the polygons in meters per pixel.
Higher resolution may increase the response time of the service.
maxpoints (int, optional): Allows to limit the amount of points in the returned polygons.
Increasing the number of maxpoints may increase the response time of the service.
quality: (int, optional): Allows you to reduce the quality of the polygons in favor of the response time.
Admitted values: 1/2/3.
with_lnglat (tuple, optional): Two columns that have the longitude
and latitude information. If used, a point geometry will be
created upon upload to CARTO. Example input: `('long', 'lat')`.
Defaults to `None`.
Returns:
A named-tuple ``(data, metadata)`` containing either a ``data`` Dataset or DataFrame
(same type as the input ``source``) and a ``metadata`` dictionary.
For dry runs the data will be ``None``.
The data contains a ``range_data`` column with a numeric value and a ``the_geom``
geometry with the corresponding area. It will also contain a ``source_id`` column
that identifies the source point corresponding to each area if the source has a
``cartodb_id`` column.
"""
return self._iso_areas(source, ranges, function='isodistance', **args)
def _iso_areas(self,
source,
ranges,
dry_run=False,
table_name=None,
if_exists=None,
is_destination=None,
mode='car',
mode_type=None,
mode_traffic=None,
resolution=None,
maxpoints=None,
quality=None,
exclusive=False,
with_lnglat=None,
function=None):
metadata = {}
input_dataframe = None
if isinstance(source, pd.DataFrame):
input_dataframe = source
source = Dataset(input_dataframe)
num_rows = source.get_num_rows()
metadata['required_quota'] = num_rows * len(ranges)
if dry_run:
return self.result(data=None, metadata=metadata)
source_columns = source.get_column_names()
temporary_table_name = False
if source.table_name:
source_query = 'SELECT * FROM {table}'.format(table=source.table_name)
elif source.get_query():
source_query = source.get_query()
else: # source.is_local()
# upload to temporary table
temporary_table_name = self._new_temporary_table_name()
source.upload(table_name=temporary_table_name, credentials=self._credentials, with_lnglat=with_lnglat)
source_query = 'SELECT * FROM {table}'.format(table=temporary_table_name)
source_columns = source.get_column_names()
source_has_id = 'cartodb_id' in source_columns
iso_function = '_cdb_{function}_exception_safe'.format(function=function)
# TODO: use **options argument?
options = {
'is_destination': is_destination,
'mode_type': mode_type,
'mode_traffic': mode_traffic,
'resolution': resolution,
'maxpoints': maxpoints,
'quality': quality
}
iso_options = ["'{}={}'".format(k, v) for k, v in options.items() if v is not None]
iso_options = "ARRAY[{opts}]".format(opts=','.join(iso_options))
iso_ranges = 'ARRAY[{ranges}]'.format(ranges=','.join([str(r) for r in ranges]))
sql = _areas_query(
source_query, source_columns, iso_function, mode, iso_ranges, iso_options, source_has_id or exclusive)
if exclusive:
sql = _rings_query(sql, source_has_id)
dataset = Dataset(sql, credentials=self._credentials)
if table_name:
dataset.upload(table_name=table_name, if_exists=if_exists)
result = Dataset(table_name, credentials=self._credentials)
if input_dataframe is not None:
result = geodataframe_from_dataframe(result.download())
else:
result = geodataframe_from_dataframe(dataset.download())
if input_dataframe is None:
result = Dataset(result, credentials=self._credentials)
if temporary_table_name:
Dataset(temporary_table_name, credentials=self._credentials).delete()
return self.result(data=result, metadata=metadata)
def _areas_query(source_query, source_columns, iso_function, mode, iso_ranges, iso_options, with_source_id):
select_source_id = 'source_id,' if with_source_id else ''
source_id = ''
if with_source_id:
if 'cartodb_id' in source_columns:
source_id = '_source.cartodb_id AS source_id,'
else:
source_id = 'row_number() over () AS source_id,'
return """
WITH _source AS ({source_query}),
_iso_areas AS (
SELECT
{source_id}
{iso_function}(
_source.the_geom,
'{mode}',
{iso_ranges}::integer[],
{iso_options}::text[]
) AS _area
FROM _source
)
SELECT
row_number() OVER () AS cartodb_id,
{select_source_id}
(_area).data_range,
(_area).the_geom
FROM _iso_areas
""".format(
iso_function=iso_function,
source_query=source_query,
source_id=source_id,
select_source_id=select_source_id,
mode=mode,
iso_ranges=iso_ranges,
iso_options=iso_options
)
def _rings_query(areas_query, with_source_id):
if with_source_id:
select_source_id = 'source_id,'
else:
select_source_id = ''
return """
SELECT
cartodb_id,
{select_source_id}
data_range,
COALESCE(
LAG(data_range, 1) OVER (PARTITION BY source_id ORDER BY data_range),
0
) AS lower_data_range,
COALESCE(
ST_DIFFERENCE(the_geom, LAG(the_geom, 1) OVER (PARTITION BY source_id ORDER BY data_range)),
the_geom
) AS the_geom
FROM ({areas_query}) _areas_query
""".format(
select_source_id=select_source_id,
areas_query=areas_query
)
```
#### File: viz/helpers/color_category_layer.py
```python
from __future__ import absolute_import
from .utils import serialize_palette, get_value
from ..layer import Layer
def color_category_layer(
source, value, title='', top=11, cat=None, palette=None,
size=None, opacity=None, stroke_color=None, stroke_width=None,
description='', footer='', legend=True, popup=True,
widget=False, animate=None, credentials=None):
"""Helper function for quickly creating a category color map.
Args:
source (:py:class:`Dataset <cartoframes.data.Dataset>` or str): Dataset
or text representing a table or query associated with user account.
value (str): Column to symbolize by.
title (str, optional): Title of legend.
top (int, optional): Number of category for map. Default is 11. Values
can range from 1 to 16.
cat (list<str>, optional): Category list. Must be a valid list of categories.
palette (str, optional): Palette that can be a named CARTOColor palette
or other valid CARTO VL palette expression. Default is `bold`.
size (int, optional): Size of point or line features.
opacity (int, optional): Opacity value for point color and line features.
Default is '0.8'.
stroke_width (int, optional): Size of the stroke on point features.
stroke_color (str, optional): Color of the stroke on point features.
Default is '#222'.
description (str, optional): Description text legend placed under legend title.
footer (str, optional): Footer text placed under legend items.
legend (bool, optional): Display map legend: "True" or "False".
Set to "True" by default.
popup (bool, optional): Display popups on hover and click: "True" or "False".
Set to "True" by default.
widget (bool, optional): Display a widget for mapped data.
Set to "False" by default.
animate (str, optional): Animate features by date/time or other numeric field.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. This is only used for the simplified Source API.
When a :py:class:`Source <cartoframes.viz.Source>` is pased as source,
these credentials is simply ignored. If not provided the credentials will be
automatically obtained from the default credentials.
Returns:
cartoframes.viz.Layer: Layer styled by `value`.
Includes a legend, popup and widget on `value`.
"""
func = 'buckets' if cat else 'top'
default_palette = 'bold'
animation_filter = 'animation(linear(${}), 20, fade(1,1))'.format(animate) if animate else '1'
return Layer(
source,
style={
'point': {
'color': 'opacity(ramp({0}(${1}, {2}), {3}),{4})'.format(
func, value, cat or top,
serialize_palette(palette) or default_palette,
get_value(opacity, 'point', 'opacity')
),
'width': get_value(size, 'point', 'width'),
'strokeColor': get_value(stroke_color, 'point', 'strokeColor'),
'strokeWidth': get_value(stroke_width, 'point', 'strokeWidth'),
'filter': animation_filter
},
'line': {
'color': 'opacity(ramp({0}(${1}, {2}), {3}),{4})'.format(
func, value, cat or top,
serialize_palette(palette) or default_palette,
get_value(opacity, 'line', 'opacity')
),
'width': get_value(size, 'line', 'width'),
'filter': animation_filter
},
'polygon': {
'color': 'opacity(ramp({0}(${1}, {2}), {3}), {4})'.format(
func, value, cat or top,
serialize_palette(palette) or default_palette,
get_value(opacity, 'polygon', 'opacity')
),
'strokeColor': get_value(stroke_color, 'polygon', 'strokeColor'),
'strokeWidth': get_value(stroke_width, 'polygon', 'strokeWidth'),
'filter': animation_filter
}
},
popup=popup and not animate and {
'hover': {
'title': title or value,
'value': '$' + value
}
},
legend=legend and {
'type': {
'point': 'color-category-point',
'line': 'color-category-line',
'polygon': 'color-category-polygon'
},
'title': title or value,
'description': description,
'footer': footer
},
widgets=[
animate and {
'type': 'time-series',
'value': animate,
'title': 'Animation'
},
widget and {
'type': 'category',
'value': value,
'title': 'Categories'
}
],
credentials=credentials
)
```
#### File: cartoframes/viz/source.py
```python
from __future__ import absolute_import
from ..data import Dataset
from ..data.dataset.registry.base_dataset import BaseDataset
from ..utils.utils import get_query_bounds, get_geodataframe_bounds, encode_geodataframe
from ..utils.geom_utils import compute_geodataframe, reset_geodataframe
class SourceType:
QUERY = 'Query'
GEOJSON = 'GeoJSON'
class Source(object):
"""Source
Args:
data (str, geopandas.GeoDataFrame, pandas.DataFrame,
:py:class:`Dataset <cartoframes.data.Dataset>` ): a table name,
SQL query, GeoJSON file, GeoDataFrame, DataFrame, or Dataset object.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. If not provided, the credentials will be automatically
obtained from the default credentials if available.
bounds (dict or list, optional): a dict with `west`, `south`, `east`, `north`
keys, or an array of floats in the following structure: [[west,
south], [east, north]]. If not provided the bounds will be automatically
calculated to fit all features.
Example:
Table name.
.. code::
from cartoframes.auth import set_default_credentials
from cartoframes.viz import Source
set_default_credentials('your_user_name', 'your api key')
Source('table_name')
SQL query.
.. code::
from cartoframes.auth import set_default_credentials
from cartoframes.viz import Source
set_default_credentials('your_user_name', 'your api key')
Source('SELECT * FROM table_name')
GeoJSON file.
.. code::
from cartoframes.viz import Source
Source('path/to/file.geojson')
Dataset object.
.. code::
from cartoframes.viz import Source
from cartoframes.data import Dataset
set_default_credentials('your_user_name', 'your api key')
ds = Dataset('table_name')
Source(ds)
Setting the credentials.
.. code::
from cartoframes.auth import Credentials
from cartoframes.viz import Source
credentials = Credentials('your_user_name', 'your api key')
Source('table_name', credentials)
"""
def __init__(self, data, credentials=None, schema=None):
if isinstance(data, Dataset):
self.dataset = data
else:
self.dataset = Dataset(data, credentials, schema)
def get_geom_type(self):
return self.dataset.compute_geom_type() or BaseDataset.GEOM_TYPE_POINT
def get_credentials(self):
credentials = self.dataset.credentials
if credentials:
return {
# CARTO VL requires a username but CARTOframes allows passing only the base_url.
# That's why 'user' is used by default if username is empty.
'username': credentials.username or 'user',
'api_key': credentials.api_key,
'base_url': credentials.base_url
}
def compute_metadata(self, columns=None):
if self.dataset.is_local():
gdf = compute_geodataframe(self.dataset)
gdf = gdf[columns] if columns is not None else gdf
self.type = SourceType.GEOJSON
self.data = self._compute_geojson_data(gdf)
self.bounds = self._compute_geojson_bounds(gdf)
reset_geodataframe(self.dataset)
else:
self.type = SourceType.QUERY
self.data = self._compute_query_data()
self.bounds = self._compute_query_bounds()
def _compute_query_data(self):
return self.dataset.get_query()
def _compute_query_bounds(self):
context = self.dataset._strategy._context
return get_query_bounds(context, self.data)
def _compute_geojson_data(self, gdf):
return encode_geodataframe(gdf)
def _compute_geojson_bounds(self, gdf):
return get_geodataframe_bounds(gdf)
```
#### File: observatory/enrichment/test_service.py
```python
import pandas as pd
from shapely.geometry.point import Point
from cartoframes.auth import Credentials
from cartoframes.data import Dataset
from cartoframes.data.clients.bigquery_client import BigQueryClient
from cartoframes.data.observatory.enrichment.enrichment_service import EnrichmentService, prepare_variables
from cartoframes.data.observatory import Variable
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class TestEnrichmentService(object):
def setup_method(self):
self.original_init_client = BigQueryClient._init_client
BigQueryClient._init_client = Mock(return_value=True)
self.credentials = Credentials('username', 'apikey')
def teardown_method(self):
self.credentials = None
BigQueryClient._init_client = self.original_init_client
def test_prepare_data(self):
geom_column = 'the_geom'
df = pd.DataFrame([[1, 'POINT (1 1)']], columns=['cartodb_id', geom_column])
ds = Dataset(df)
enrichment_service = EnrichmentService(credentials=self.credentials)
expected_df = pd.DataFrame([[1, '{"coordinates": [1.0, 1.0], "type": "Point"}', 0]],
columns=['cartodb_id', geom_column, 'enrichment_id'])
result = enrichment_service._prepare_data(ds, geom_column)
assert result.equals(expected_df) is True
result = enrichment_service._prepare_data(df, geom_column)
assert result.equals(expected_df) is True
def test_upload_dataframe(self):
expected_project = 'carto-do-customers'
user_dataset = 'test_dataset'
geom_column = 'the_geom'
data_copy = pd.DataFrame([[1, '{"coordinates": [1.0, 1.0], "type": "Point"}', 0]],
columns=['cartodb_id', geom_column, 'enrichment_id'])
expected_schema = {'enrichment_id': 'INTEGER', 'the_geom': 'GEOGRAPHY'}
expected_data_copy = pd.DataFrame([['{"coordinates": [1.0, 1.0], "type": "Point"}', 0]],
columns=[geom_column, 'enrichment_id'])
# mock
def assert_upload_dataframe(_, dataframe, schema, tablename, project, dataset):
assert dataframe.equals(expected_data_copy)
assert schema == expected_schema
assert isinstance(tablename, str) and len(tablename) > 0
assert project == expected_project
assert tablename == user_dataset
assert dataset == 'username'
enrichment_service = EnrichmentService(credentials=self.credentials)
original = BigQueryClient.upload_dataframe
BigQueryClient.upload_dataframe = assert_upload_dataframe
enrichment_service._upload_dataframe(user_dataset, data_copy, geom_column)
BigQueryClient.upload_dataframe = original
def test_execute_enrichment(self):
geom_column = 'the_geom'
df = pd.DataFrame([['{"coordinates": [1.0, 1.0], "type": "Point"}', 0]],
columns=[geom_column, 'enrichment_id'])
df_final = pd.DataFrame([[Point(1, 1), 'new data']], columns=[geom_column, 'var1'])
class EnrichMock():
def to_dataframe(self):
return pd.DataFrame([[0, 'new data']], columns=['enrichment_id', 'var1'])
original = BigQueryClient.query
BigQueryClient.query = Mock(return_value=EnrichMock())
enrichment_service = EnrichmentService(credentials=self.credentials)
result = enrichment_service._execute_enrichment(['fake_query'], df, geom_column)
assert result.equals(df_final)
BigQueryClient._init_client = original
@patch.object(Variable, 'get')
def test_prepare_variables(self, get_mock):
variable_id = 'project.dataset.table.variable'
variable = Variable({
'id': variable_id,
'column_name': 'column',
'dataset_id': 'fake_name'
})
get_mock.return_value = variable
one_variable_cases = [
variable_id,
variable
]
for case in one_variable_cases:
result = prepare_variables(case)
assert result == [variable]
several_variables_cases = [
[variable_id, variable_id],
[variable, variable],
[variable, variable_id]
]
for case in several_variables_cases:
result = prepare_variables(case)
assert result == [variable, variable]
```
#### File: unit/utils/test_columns.py
```python
import unittest
import pandas as pd
from cartoframes.utils.columns import (Column, DataframeColumnInfo,
DataframeColumnsInfo, normalize_names)
class TestColumns(unittest.TestCase):
"""Tests for functions in columns module"""
def setUp(self):
self.cols = ['Unnamed: 0',
'201moore',
'201moore',
'Acadia 1.2.3',
'old_soaker',
'_testingTesting',
'test-1',
'test-1--2',
'test;',
'test,',
1,
1.0,
'public',
'SELECT',
'à',
'a',
'_a',
'longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabesplittedrightnow',
'longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabesplittedrightnow',
'all']
self.cols_ans = ['unnamed_0',
'_201moore',
'_201moore_1',
'acadia_1_2_3',
'old_soaker',
'_testingtesting',
'test_1',
'test_1_2',
'test_',
'test__1',
'_1',
'_1_0',
'public',
'_select',
'a',
'a_1',
'_a',
'longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabespli',
'longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabe_1',
'_all']
def test_normalize(self):
other_cols = []
for c, a in zip(self.cols, self.cols_ans):
# changed cols should match answers
column = Column(c)
a_column = Column(a)
column.normalize(other_cols)
a_column.normalize(other_cols)
self.assertEqual(column.name, a)
# already sql-normed cols should match themselves
self.assertEqual(a_column.name, a)
other_cols.append(column.name)
def test_normalize_names(self):
self.assertListEqual(normalize_names(self.cols), self.cols_ans)
def test_normalize_names_unchanged(self):
self.assertListEqual(normalize_names(self.cols_ans), self.cols_ans)
def test_database_column_name_the_geom(self):
geom_column = 'the_geom'
dataframe_column_info = DataframeColumnInfo('other', geom_column)
self.assertEqual(dataframe_column_info.database, 'other')
dataframe_column_info = DataframeColumnInfo('the_geom', geom_column)
self.assertEqual(dataframe_column_info.database, 'the_geom')
geom_column = 'other_geom'
dataframe_column_info = DataframeColumnInfo('other', geom_column)
self.assertEqual(dataframe_column_info.database, 'other')
dataframe_column_info = DataframeColumnInfo('the_geom', geom_column)
self.assertEqual(dataframe_column_info.database, 'the_geom')
def test_column_info_with_geom(self):
df = pd.DataFrame(
[['Gran Vía 46', 'Madrid', 'POINT (0 0)'], ['Ebro 1', 'Sevilla', 'POINT (1 1)']],
columns=['address', 'city', 'geometry'])
expected_columns = [
{
'dataframe': 'address',
'database': 'address',
'database_type': 'text'
},
{
'dataframe': 'city',
'database': 'city',
'database_type': 'text'
},
{
'dataframe': 'geometry',
'database': 'the_geom',
'database_type': 'geometry(Point, 4326)'
}
]
expected_geom_column = 'geometry'
expected_enc_type = 'wkt'
dataframe_columns_info = DataframeColumnsInfo(df, None)
self.assertEqual(expected_columns, dataframe_columns_info.columns)
self.assertEqual(expected_geom_column, dataframe_columns_info.geom_column)
self.assertEqual(expected_enc_type, dataframe_columns_info.enc_type)
def test_column_info_with_lnglat(self):
df = pd.DataFrame([['0', '1'], ['0', '1']], columns=['lng', 'lat'])
expected_columns = [
{
'dataframe': 'lng',
'database': 'lng',
'database_type': 'text'
},
{
'dataframe': 'lat',
'database': 'lat',
'database_type': 'text'
},
{
'dataframe': None,
'database': 'the_geom',
'database_type': 'geometry(Point, 4326)'
}
]
expected_geom_column = None
expected_enc_type = None
dataframe_columns_info = DataframeColumnsInfo(df, ('lng', 'lat'))
self.assertEqual(expected_columns, dataframe_columns_info.columns)
self.assertEqual(expected_geom_column, dataframe_columns_info.geom_column)
self.assertEqual(expected_enc_type, dataframe_columns_info.enc_type)
def test_column_info_without_geom(self):
df = pd.DataFrame(
[['Gran Vía 46', 'Madrid'], ['Ebro 1', 'Sevilla']], columns=['address', 'city'])
expected_columns = [
{
'dataframe': 'address',
'database': 'address',
'database_type': 'text'
},
{
'dataframe': 'city',
'database': 'city',
'database_type': 'text'
}
]
expected_geom_column = None
expected_enc_type = None
dataframe_columns_info = DataframeColumnsInfo(df, None)
self.assertEqual(expected_columns, dataframe_columns_info.columns)
self.assertEqual(expected_geom_column, dataframe_columns_info.geom_column)
self.assertEqual(expected_enc_type, dataframe_columns_info.enc_type)
def test_column_info_basic_troubled_names(self):
df = pd.DataFrame(
[[1, 'POINT (1 1)', 'fake_geom']], columns=['cartodb_id', 'the_geom', 'the_geom_webmercator'])
expected_columns = [
{
'dataframe': 'cartodb_id',
'database': 'cartodb_id',
'database_type': 'bigint'
},
{
'dataframe': 'the_geom',
'database': 'the_geom',
'database_type': 'geometry(Point, 4326)'
}
]
expected_geom_column = 'the_geom'
expected_enc_type = 'wkt'
dataframe_columns_info = DataframeColumnsInfo(df, None)
self.assertEqual(expected_columns, dataframe_columns_info.columns)
self.assertEqual(expected_geom_column, dataframe_columns_info.geom_column)
self.assertEqual(expected_enc_type, dataframe_columns_info.enc_type)
def test_column_info_geometry_troubled_names(self):
df = pd.DataFrame(
[['POINT (0 0)', 'POINT (1 1)', 'POINT (2 2)']], columns=['geom', 'the_geom', 'geometry'])
expected_columns = [
{
'dataframe': 'geom',
'database': 'geom',
'database_type': 'text'
},
{
'dataframe': 'the_geom',
'database': 'the_geom',
'database_type': 'geometry(Point, 4326)'
},
{
'dataframe': 'geometry',
'database': 'geometry',
'database_type': 'text'
},
]
expected_geom_column = 'the_geom'
expected_enc_type = 'wkt'
dataframe_columns_info = DataframeColumnsInfo(df, None)
self.assertEqual(expected_columns, dataframe_columns_info.columns)
self.assertEqual(expected_geom_column, dataframe_columns_info.geom_column)
self.assertEqual(expected_enc_type, dataframe_columns_info.enc_type)
```
#### File: unit/utils/test_utils.py
```python
import unittest
from collections import OrderedDict
import requests
import numpy as np
from cartoframes.utils.utils import (camel_dictionary, cssify, debug_print,
dict_items, importify_params, snake_to_camel,
dtypes2pg, pg2dtypes, encode_row)
class TestUtils(unittest.TestCase):
"""Tests for functions in utils module"""
def setUp(self):
self.point_style = {
"#layer['mapnik::geometry_type'=1]": OrderedDict([
('marker-width', "6"),
('marker-fill', "yellow"),
('marker-fill-opacity', "1"),
('marker-allow-overlap', "true"),
('marker-line-width', "0.5"),
('marker-line-color', "black"),
("marker-line-opacity", "1")])
}
self.polygon_style = {
"#layer['mapnik::geometry_type'=3]": OrderedDict([
('polygon-fill', ('ramp([column], (#ffc6c4, #ee919b, '
'#cc607d, #9e3963, #672044), '
'quantiles)')),
('polygon-opacity', '0.9'),
('polygon-gamma', '0.5'),
('line-color', '#FFF'),
('line-width', '0.5'),
('line-opacity', '0.25'),
('line-comp-op', 'hard-light')])}
self.complex_style = OrderedDict([
("#layer['mapnik::geometry_type'=1]", OrderedDict([
('marker-width', "5"),
('marker-fill', "yellow"),
('marker-fill-opacity', '1'),
('marker-allow-overlap', 'true'),
('marker-line-width', '0.5'),
('marker-line-color', "black"),
('marker-line-opacity', '1')])),
("#layer['mapnik::geometry_type'=2]", OrderedDict([
('line-width', '1.5'),
('line-color', "black")])),
("#layer['mapnik::geometry_type'=3]", OrderedDict([
('polygon-fill', "blue"),
('polygon-opacity', '0.9'),
('polygon-gamma', '0.5'),
('line-color', '#FFF'),
('line-width', '0.5'),
('line-opacity', '0.25'),
('line-comp-op', 'hard-light')]))
])
def test_dict_items(self):
"""utils.dict_items"""
# ensure correct formation of dict items from provided styling
polygon_style_dict = dict_items(self.polygon_style)
self.assertDictEqual(OrderedDict(polygon_style_dict),
self.polygon_style,
msg="pollygon styling")
# point style
point_style_dict = dict_items(self.point_style)
self.assertDictEqual(OrderedDict(point_style_dict),
self.point_style,
msg="point styling")
# multi layer styling
complex_style_dict = dict_items(self.complex_style)
self.assertDictEqual(OrderedDict(complex_style_dict),
self.complex_style,
msg="multi-layer styling")
def test_cssify(self):
"""utils.cssify"""
# point style
point_stylecss = cssify(self.point_style)
self.assertEqual(point_stylecss,
("#layer['mapnik::geometry_type'=1] { "
"marker-width: 6; marker-fill: yellow; "
"marker-fill-opacity: 1; marker-allow-overlap: "
"true; marker-line-width: 0.5; marker-line-color: "
"black; marker-line-opacity: 1;}"),
msg="point style")
# polygon style
polygon_stylecss = cssify(self.polygon_style)
self.assertEqual(polygon_stylecss,
("#layer['mapnik::geometry_type'=3] { "
"polygon-fill: ramp([column], (#ffc6c4, #ee919b, "
"#cc607d, #9e3963, #672044), quantiles); "
"polygon-opacity: 0.9; polygon-gamma: 0.5; "
"line-color: #FFF; line-width: 0.5; line-opacity: "
"0.25; line-comp-op: hard-light;}"),
msg="polygon style")
# complex style
complex_stylecss = cssify(self.complex_style)
self.assertEqual(complex_stylecss,
("#layer['mapnik::geometry_type'=1] { "
"marker-width: 5; marker-fill: yellow; "
"marker-fill-opacity: 1; marker-allow-overlap: "
"true; marker-line-width: 0.5; marker-line-color: "
"black; marker-line-opacity: 1;} "
"#layer['mapnik::geometry_type'=2] { "
"line-width: 1.5; line-color: black;} "
"#layer['mapnik::geometry_type'=3] { "
"polygon-fill: blue; polygon-opacity: 0.9; "
"polygon-gamma: 0.5; line-color: #FFF; line-width: "
"0.5; line-opacity: 0.25; "
"line-comp-op: hard-light;}"),
msg="multi-layer styling")
def test_importify_params(self):
"""utils.importify_params"""
params = [True, False, 'true', '<NAME>', ]
ans = ('true', 'false', 'true', 'gulab jamon', )
for idx, p in enumerate(params):
self.assertTrue(importify_params(p), ans[idx])
def test_dtypes2pg(self):
results = {
'int16': 'smallint',
'int32': 'integer',
'int64': 'bigint',
'float32': 'real',
'float64': 'double precision',
'object': 'text',
'bool': 'boolean',
'datetime64[ns]': 'timestamp',
'datetime64[ns, UTC]': 'timestamp',
'unknown_dtype': 'text'
}
for i in results:
self.assertEqual(dtypes2pg(i), results[i])
def test_pg2dtypes(self):
results = {
'smallint': 'int16', 'int2': 'int16',
'integer': 'int32', 'int4': 'int32', 'int': 'int32',
'bigint': 'int64', 'int8': 'int64',
'real': 'float32', 'float4': 'float32',
'double precision': 'float64', 'float8': 'float64',
'numeric': 'float64', 'decimal': 'float64',
'text': 'object',
'boolean': 'bool',
'date': 'datetime64[D]',
'timestamp': 'datetime64[ns]', 'timestamp without time zone': 'datetime64[ns]',
'timestampz': 'datetime64[ns]', 'timestamp with time zone': 'datetime64[ns]',
'USER-DEFINED': 'object',
}
for i in results:
self.assertEqual(pg2dtypes(i), results[i])
def test_snake_to_camel(self):
self.assertEqual(snake_to_camel('sneaky_snake'), 'sneakySnake')
self.assertEqual(snake_to_camel('coolCamel'), 'coolCamel')
self.assertEqual(snake_to_camel('kinky-kebab'), 'kinky-kebab')
def test_camel_dictionary(self):
test_dictionary = {'sneaky_snake': 'fang', 'coolCamel': 'hunch', 'kinky-kebab': 'spice'}
camel_dictionary(test_dictionary)
self.assertEqual(test_dictionary['sneakySnake'], 'fang')
self.assertEqual(test_dictionary['coolCamel'], 'hunch')
self.assertEqual(test_dictionary['kinky-kebab'], 'spice')
with self.assertRaises(KeyError):
self.assertEqual(test_dictionary['sneaky-snake'], None)
def test_debug_print(self):
# verbose = True
verbose = 1
# request-response usage
resp = requests.get('http://httpbin.org/get')
debug_print(verbose, resp=resp)
debug_print(verbose, resp=resp.text)
# non-requests-response usage
test_str = 'this is a test'
long_test_str = ', '.join([test_str] * 100)
self.assertIsNone(debug_print(verbose, test_str=test_str))
self.assertIsNone(debug_print(verbose, long_str=long_test_str))
# verbose = False
verbose = 0
self.assertIsNone(debug_print(verbose, resp=test_str))
def test_encode_row(self):
assert encode_row('Hello') == b'Hello'
assert encode_row('Hello \'world\'') == b'Hello \'world\''
assert encode_row('Hello "world"') == b'"Hello ""world"""'
assert encode_row('Hello | world') == b'"Hello | world"'
assert encode_row('Hello \n world') == b'"Hello \n world"'
assert encode_row(b'Hello') == b'Hello'
assert encode_row(b'Hello \'world\'') == b'Hello \'world\''
assert encode_row(b'Hello "world"') == b'"Hello ""world"""'
assert encode_row(b'Hello | world') == b'"Hello | world"'
assert encode_row(b'Hello \n world') == b'"Hello \n world"'
assert encode_row(np.inf) == b'Infinity'
assert encode_row(-np.inf) == b'-Infinity'
assert encode_row(np.nan) == b'NaN'
```
#### File: unit/viz/test_source.py
```python
import pytest
from cartoframes.viz import Source
from cartoframes.lib.context.api_context import APIContext
from cartoframes.auth import Credentials
from .utils import simple_dataframe
def setup_mocks(mocker):
mocker.patch.object(Source, 'get_geom_type', return_value='point')
mocker.patch.object(Source, '_compute_query_bounds')
mocker.patch.object(APIContext, 'get_schema', return_value='public')
class TestSource(object):
def test_is_source_defined(self):
"""Source"""
assert Source is not None
def test_source_get_credentials_username(self, mocker):
"""Source should return the correct credentials when username is provided"""
setup_mocks(mocker)
source = Source('faketable', credentials=Credentials(
username='fakeuser', api_key='1234'))
credentials = source.get_credentials()
assert credentials['username'] == 'fakeuser'
assert credentials['api_key'] == '1234'
assert credentials['base_url'] == 'https://fakeuser.carto.com'
def test_source_get_credentials_base_url(self, mocker):
"""Source should return the correct credentials when base_url is provided"""
setup_mocks(mocker)
source = Source('faketable', credentials=Credentials(
base_url='https://fakeuser.carto.com'))
credentials = source.get_credentials()
assert credentials['username'] == 'user'
assert credentials['api_key'] == 'default_public'
assert credentials['base_url'] == 'https://fakeuser.carto.com'
def test_source_no_credentials(self):
"""Source should raise an exception if there are no credentials"""
with pytest.raises(AttributeError) as e:
Source('faketable')
assert str(e.value) == ('Credentials attribute is required. '
'Please pass a `Credentials` instance or use '
'the `set_default_credentials` function.')
def test_source_not_change_dataframe(self):
"""Source should return the correct credentials when username is provided"""
df = simple_dataframe()
source = Source(df.copy())
assert str(df) == str(source.dataset.dataframe)
``` |
{
"source": "jorisvandenbossche/dask",
"score": 2
} |
#### File: dask/dataframe/core.py
```python
from __future__ import absolute_import, division, print_function
from collections import Iterator
from copy import copy
from distutils.version import LooseVersion
import operator
from operator import getitem, setitem
from pprint import pformat
import uuid
import warnings
from toolz import merge, partial, first, unique, partition_all
import pandas as pd
from pandas.util.decorators import cache_readonly
import numpy as np
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from .. import threaded
from ..compatibility import apply, operator_div, bind_method
from ..utils import (repr_long_list, random_state_data,
pseudorandom, derived_from, funcname, memory_repr,
put_lines, M)
from ..base import Base, compute, tokenize, normalize_token
from ..async import get_sync
from . import methods
from .utils import (meta_nonempty, make_meta, insert_meta_param_description,
raise_on_meta_error)
no_default = '__no_default__'
pd.computation.expressions.set_use_numexpr(False)
def _concat(args, **kwargs):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.concatenate3(args)
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
return args[0].append(args[1:])
try:
return pd.Series(args)
except:
return args
def _get_return_type(meta):
if isinstance(meta, _Frame):
meta = meta._meta
if isinstance(meta, pd.Series):
return Series
elif isinstance(meta, pd.DataFrame):
return DataFrame
elif isinstance(meta, pd.Index):
return Index
return Scalar
def new_dd_object(dsk, _name, meta, divisions):
"""Generic constructor for dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
return _get_return_type(meta)(dsk, _name, meta, divisions)
def optimize(dsk, keys, **kwargs):
from .optimize import optimize
return optimize(dsk, keys, **kwargs)
def finalize(results):
return _concat(results)
class Scalar(Base):
""" A Dask object to represent a pandas scalar"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(first)
def __init__(self, dsk, name, meta, divisions=None):
# divisions is ignored, only present to be compatible with other
# objects.
self.dask = dsk
self._name = name
meta = make_meta(meta)
if isinstance(meta, (pd.DataFrame, pd.Series, pd.Index)):
raise ValueError("Expected meta to specify scalar, got "
"{0}".format(type(meta).__name__))
self._meta = meta
@property
def _meta_nonempty(self):
return self._meta
@property
def dtype(self):
return self._meta.dtype
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'dtype'):
o.remove('dtype') # dtype only in `dir` if available
return list(o)
@property
def divisions(self):
"""Dummy divisions to be compat with Series and DataFrame"""
return [None, None]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if hasattr(self._meta, 'dtype'):
extra = ', dtype=%s' % self._meta.dtype
else:
extra = ', type=%s' % type(self._meta).__name__
return "dd.Scalar<%s%s>" % (name, extra)
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@property
def _args(self):
return (self.dask, self._name, self._meta)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta = state
@property
def key(self):
return (self._name, 0)
def _keys(self):
return [self.key]
@classmethod
def _get_unary_operator(cls, op):
def f(self):
name = funcname(op) + '-' + tokenize(self)
dsk = {(name, 0): (op, (self._name, 0))}
meta = op(self._meta_nonempty)
return Scalar(merge(dsk, self.dask), name, meta)
return f
@classmethod
def _get_binary_operator(cls, op, inv=False):
return lambda self, other: _scalar_binary(op, self, other, inv=inv)
def _scalar_binary(op, self, other, inv=False):
name = '{0}-{1}'.format(funcname(op), tokenize(self, other))
dsk = self.dask
return_type = _get_return_type(other)
if isinstance(other, Scalar):
dsk = merge(dsk, other.dask)
other_key = (other._name, 0)
elif isinstance(other, Base):
return NotImplemented
else:
other_key = other
if inv:
dsk.update({(name, 0): (op, other_key, (self._name, 0))})
else:
dsk.update({(name, 0): (op, (self._name, 0), other_key)})
other_meta = make_meta(other)
other_meta_nonempty = meta_nonempty(other_meta)
if inv:
meta = op(other_meta_nonempty, self._meta_nonempty)
else:
meta = op(self._meta_nonempty, other_meta_nonempty)
if return_type is not Scalar:
return return_type(dsk, name, meta,
[other.index.min(), other.index.max()])
else:
return Scalar(dsk, name, meta)
class _Frame(Base):
""" Superclass for DataFrame and Series
Parameters
----------
dsk: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta: pandas.DataFrame, pandas.Series, or pandas.Index
An empty pandas object with names, dtypes, and indices matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_optimize = staticmethod(optimize)
_default_get = staticmethod(threaded.get)
_finalize = staticmethod(finalize)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = make_meta(meta)
if not isinstance(meta, self._partition_type):
raise ValueError("Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__,
type(meta).__name__))
self._meta = meta
self.divisions = tuple(divisions)
@property
def _constructor(self):
return new_dd_object
@property
def npartitions(self):
"""Return number of partitions"""
return len(self.divisions) - 1
@property
def size(self):
return self.reduction(methods.size, np.sum, token='size', meta=int,
split_every=False)
@property
def _meta_nonempty(self):
""" A non-empty version of `_meta` with fake data."""
return meta_nonempty(self._meta)
@property
def _args(self):
return (self.dask, self._name, self._meta, self.divisions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def __repr__(self):
name = self._name if len(self._name) < 10 else self._name[:7] + '...'
if self.known_divisions:
div_text = ', divisions=%s' % repr_long_list(self.divisions)
else:
div_text = ''
return ("dd.%s<%s, npartitions=%s%s>" %
(self.__class__.__name__, name, self.npartitions, div_text))
@property
def index(self):
"""Return dask Index instance"""
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name,
self._meta.index, self.divisions)
@derived_from(pd.DataFrame)
def reset_index(self, drop=False):
return self.map_partitions(M.reset_index, drop=drop).clear_divisions()
@property
def known_divisions(self):
"""Whether divisions are already known"""
return len(self.divisions) > 0 and self.divisions[0] is not None
def clear_divisions(self):
divisions = (None,) * (self.npartitions + 1)
return type(self)(self.dask, self._name, self._meta, divisions)
def get_partition(self, n):
"""Get a dask DataFrame/Series representing the `nth` partition."""
if 0 <= n < self.npartitions:
name = 'get-partition-%s-%s' % (str(n), self._name)
dsk = {(name, 0): (self._name, n)}
divisions = self.divisions[n:n + 2]
return new_dd_object(merge(self.dask, dsk), name,
self._meta, divisions)
else:
msg = "n must be 0 <= n < {0}".format(self.npartitions)
raise ValueError(msg)
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
warnings.warn("Deprecation Warning: The `cache` method is deprecated, "
"and will be removed in the next release. To achieve "
"the same behavior, either write to disk or use "
"`Client.persist`, from `dask.distributed`.")
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = 'cache' + uuid.uuid1().hex
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
self._get(merge(dsk, self.dask), list(dsk.keys()))
# Create new dataFrame pointing to that cache
name = 'from-cache-' + self._name
dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))
for i, key in enumerate(self._keys()))
return new_dd_object(dsk2, name, self._meta, self.divisions)
@derived_from(pd.DataFrame)
def drop_duplicates(self, **kwargs):
split_every = kwargs.pop('split_every', None)
assert all(k in ('keep', 'subset', 'take_last') for k in kwargs)
chunk = M.drop_duplicates
return aca(self, chunk=chunk, aggregate=chunk, meta=self._meta,
token='drop-duplicates', split_every=split_every, **kwargs)
def __len__(self):
return self.reduction(len, np.sum, token='len', meta=int,
split_every=False).compute()
@insert_meta_param_description(pad=12)
def map_partitions(self, func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
$META
Examples
--------
Given a DataFrame, Series, or Index, such as:
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
One can use ``map_partitions`` to apply a function on each partition.
Extra arguments and keywords can optionally be provided, and will be
passed to the function after the partition.
Here we apply a function with arguments and keywords to a DataFrame,
resulting in a Series:
>>> def myadd(df, a, b=1):
... return df.x + df.y + a + b
>>> res = ddf.map_partitions(myadd, 1, b=2)
>>> res.dtype
dtype('float64')
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with no name, and dtype
``float64``:
>>> res = ddf.map_partitions(myadd, 1, b=2, meta=(None, 'f8'))
Here we map a function that takes in a DataFrame, and returns a
DataFrame with a new column:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y))
>>> res.dtypes
x int64
y float64
z float64
dtype: object
As before, the output metadata can also be specified manually. This
time we pass in a ``dict``, as the output is a DataFrame:
>>> res = ddf.map_partitions(lambda df: df.assign(z=df.x * df.y),
... meta={'x': 'i8', 'y': 'f8', 'z': 'f8'})
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.map_partitions(lambda df: df.head(), meta=df)
"""
return map_partitions(func, self, *args, **kwargs)
@insert_meta_param_description(pad=12)
def reduction(self, chunk, aggregate=None, combine=None, meta=no_default,
token=None, split_every=None, chunk_kwargs=None,
aggregate_kwargs=None, combine_kwargs=None, **kwargs):
"""Generic row-wise reductions.
Parameters
----------
chunk : callable
Function to operate on each partition. Should return a
``pandas.DataFrame``, ``pandas.Series``, or a scalar.
aggregate : callable, optional
Function to operate on the concatenated result of ``chunk``. If not
specified, defaults to ``chunk``. Used to do the final aggregation
in a tree reduction.
The input to ``aggregate`` depends on the output of ``chunk``.
If the output of ``chunk`` is a:
- scalar: Input is a Series, with one row per partition.
- Series: Input is a DataFrame, with one row per partition. Columns
are the rows in the output series.
- DataFrame: Input is a DataFrame, with one row per partition.
Columns are the columns in the output dataframes.
Should return a ``pandas.DataFrame``, ``pandas.Series``, or a
scalar.
combine : callable, optional
Function to operate on intermediate concatenated results of
``chunk`` in a tree-reduction. If not provided, defaults to
``aggregate``. The input/output requirements should match that of
``aggregate`` described above.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to
``aggregate``. Default is 8.
chunk_kwargs : dict, optional
Keyword arguments to pass on to ``chunk`` only.
aggregate_kwargs : dict, optional
Keyword arguments to pass on to ``aggregate`` only.
combine_kwargs : dict, optional
Keyword arguments to pass on to ``combine`` only.
kwargs :
All remaining keywords will be passed to ``chunk``, ``combine``,
and ``aggregate``.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
>>> ddf = dd.from_pandas(df, npartitions=4)
Count the number of rows in a DataFrame. To do this, count the number
of rows in each partition, then sum the results:
>>> res = ddf.reduction(lambda x: x.count(),
... aggregate=lambda x: x.sum())
>>> res.compute()
x 50
y 50
dtype: int64
Count the number of rows in a Series with elements greater than or
equal to a value (provided via a keyword).
>>> def count_greater(x, value=0):
... return (x >= value).sum()
>>> res = ddf.x.reduction(count_greater, aggregate=lambda x: x.sum(),
... chunk_kwargs={'value': 25})
>>> res.compute()
25
Aggregate both the sum and count of a Series at the same time:
>>> def sum_and_count(x):
... return pd.Series({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.x.reduction(sum_and_count, aggregate=lambda x: x.sum())
>>> res.compute()
count 50
sum 1225
dtype: int64
Doing the same, but for a DataFrame. Here ``chunk`` returns a
DataFrame, meaning the input to ``aggregate`` is a DataFrame with an
index with non-unique entries for both 'x' and 'y'. We groupby the
index, and sum each group to get the final result.
>>> def sum_and_count(x):
... return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
>>> res = ddf.reduction(sum_and_count,
... aggregate=lambda x: x.groupby(level=0).sum())
>>> res.compute()
count sum
x 50 1225
y 50 3725
"""
if aggregate is None:
aggregate = chunk
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
chunk_kwargs = chunk_kwargs.copy() if chunk_kwargs else {}
chunk_kwargs['aca_chunk'] = chunk
combine_kwargs = combine_kwargs.copy() if combine_kwargs else {}
combine_kwargs['aca_combine'] = combine
aggregate_kwargs = aggregate_kwargs.copy() if aggregate_kwargs else {}
aggregate_kwargs['aca_aggregate'] = aggregate
return aca(self, chunk=_reduction_chunk, aggregate=_reduction_aggregate,
combine=_reduction_combine, meta=meta, token=token,
split_every=split_every, chunk_kwargs=chunk_kwargs,
aggregate_kwargs=aggregate_kwargs,
combine_kwargs=combine_kwargs, **kwargs)
@derived_from(pd.DataFrame)
def pipe(self, func, *args, **kwargs):
# Taken from pandas:
# https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def random_split(self, frac, random_state=None):
""" Pseudorandomly split dataframe into different pieces row-wise
Parameters
----------
frac : list
List of floats that should sum to one.
random_state: int or np.random.RandomState
If int create a new RandomState with this as the seed
Otherwise draw from the passed RandomState
Examples
--------
50/50 split
>>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP
80/10/10 split, consistent random_state
>>> a, b, c = df.random_split([0.8, 0.1, 0.1], random_state=123) # doctest: +SKIP
See Also
--------
dask.DataFrame.sample
"""
if not np.allclose(sum(frac), 1):
raise ValueError("frac should sum to 1")
state_data = random_state_data(self.npartitions, random_state)
token = tokenize(self, frac, random_state)
name = 'split-' + token
dsk = {(name, i): (pd_split, (self._name, i), frac, state)
for i, state in enumerate(state_data)}
out = []
for i in range(len(frac)):
name2 = 'split-%d-%s' % (i, token)
dsk2 = {(name2, j): (getitem, (name, j), i)
for j in range(self.npartitions)}
out.append(type(self)(merge(self.dask, dsk, dsk2), name2,
self._meta, self.divisions))
return out
def head(self, n=5, npartitions=1, compute=True):
""" First n rows of the dataset
Parameters
----------
n : int, optional
The number of rows to return. Default is 5.
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``n`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
msg = "only {} partitions, head received {}"
raise ValueError(msg.format(self.npartitions, npartitions))
name = 'head-%d-%d-%s' % (npartitions, n, self._name)
if npartitions > 1:
name_p = 'head-partial-%d-%s' % (n, self._name)
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (M.head, (self._name, i), n)
concat = (_concat, [(name_p, i) for i in range(npartitions)])
dsk[(name, 0)] = (safe_head, concat, n)
else:
dsk = {(name, 0): (safe_head, (self._name, 0), n)}
result = new_dd_object(merge(self.dask, dsk), name, self._meta,
[self.divisions[0], self.divisions[npartitions]])
if compute:
result = result.compute()
return result
def tail(self, n=5, compute=True):
""" Last n rows of the dataset
Caveat, the only checks the last n rows of the last partition.
"""
name = 'tail-%d-%s' % (n, self._name)
dsk = {(name, 0): (M.tail, (self._name, self.npartitions - 1), n)}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[-2:])
if compute:
result = result.compute()
return result
@property
def loc(self):
""" Purely label-location based indexer for selection by label.
>>> df.loc["b"] # doctest: +SKIP
>>> df.loc["b":"d"] # doctest: +SKIP"""
from .indexing import _LocIndexer
return _LocIndexer(self)
# NOTE: `iloc` is not implemented because of performance concerns.
# see https://github.com/dask/dask/pull/507
def repartition(self, divisions=None, npartitions=None, force=False):
""" Repartition dataframe along new divisions
Parameters
----------
divisions : list, optional
List of partitions to be used. If specified npartitions will be
ignored.
npartitions : int, optional
Number of partitions of output, must be less than npartitions of
input. Only used if divisions isn't specified.
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition(npartitions=10) # doctest: +SKIP
>>> df = df.repartition(divisions=[0, 5, 10, 20]) # doctest: +SKIP
"""
if npartitions is not None and divisions is not None:
warnings.warn("When providing both npartitions and divisions to "
"repartition only npartitions is used.")
if npartitions is not None:
if npartitions > self.npartitions:
raise ValueError("Can only repartition to fewer partitions")
return repartition_npartitions(self, npartitions)
elif divisions is not None:
return repartition(self, divisions, force=force)
else:
raise ValueError(
"Provide either divisions= or npartitions= to repartition")
@derived_from(pd.Series)
def fillna(self, value):
return self.map_partitions(M.fillna, value=value)
def sample(self, frac, replace=False, random_state=None):
""" Random sample of items
Parameters
----------
frac : float, optional
Fraction of axis items to return.
replace: boolean, optional
Sample with or without replacement. Default = False.
random_state: int or ``np.random.RandomState``
If int we create a new RandomState with this as the seed
Otherwise we draw from the passed RandomState
See Also
--------
dask.DataFrame.random_split, pd.DataFrame.sample
"""
if random_state is None:
random_state = np.random.RandomState()
name = 'sample-' + tokenize(self, frac, replace, random_state)
state_data = random_state_data(self.npartitions, random_state)
dsk = {(name, i): (methods.sample, (self._name, i), state, frac, replace)
for i, state in enumerate(state_data)}
return new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions)
def to_hdf(self, path_or_buf, key, mode='a', append=False, get=None, **kwargs):
""" Export frame to hdf file(s)
Export dataframe to one or multiple hdf5 files or nodes.
Exported hdf format is pandas' hdf table format only.
Data saved by this function should be read by pandas dataframe
compatible reader.
By providing a single asterisk in either the path_or_buf or key
parameters you direct dask to save each partition to a different file
or node (respectively). The asterisk will be replaced with a zero
padded partition number, as this is the default implementation of
name_function.
When writing to a single hdf node in a single hdf file, all hdf save
tasks are required to execute in a specific order, often becoming the
bottleneck of the entire execution graph. Saving to multiple nodes or
files removes that restriction (order is still preserved by enforcing
order on output, using name_function) and enables executing save tasks
in parallel.
Parameters
----------
path_or_buf: HDFStore object or string
Destination file(s). If string, can contain a single asterisk to
save each partition to a different file. Only one asterisk is
allowed in both path_or_buf and key parameters.
key: string
A node / group path in file, can contain a single asterisk to save
each partition to a different hdf node in a single file. Only one
asterisk is allowed in both path_or_buf and key parameters.
format: optional, default 'table'
Default hdf storage format, currently only pandas' 'table' format
is supported.
mode: optional, {'a', 'w', 'r+'}, default 'a'
``'a'``
Append: Add data to existing file(s) or create new.
``'w'``
Write: overwrite any existing files with new ones.
``'r+'``
Append to existing files, files must already exist.
append: optional, default False
If False, overwrites existing node with the same name otherwise
appends to it.
complevel: optional, 0-9, default 0
compression level, higher means better compression ratio and
possibly more CPU time. Depends on complib.
complib: {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel > 0 compress using this compression library when
possible
fletcher32: bool, default False
If True and compression is used, additionally apply the fletcher32
checksum.
get: callable, optional
A scheduler `get` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults
for the collections.
dask_kwargs: dict, optional
A dictionary of keyword arguments passed to the `get` function
used.
name_function: callable, optional, default None
A callable called for each partition that accepts a single int
representing the partition number. name_function must return a
string representation of a partition's index in a way that will
preserve the partition's location after a string sort.
If None, a default name_function is used. The default name_function
will return a zero padded string of received int. See
dask.utils.build_name_function for more info.
compute: bool, default True
If True, execute computation of resulting dask graph.
If False, return a Delayed object.
lock: bool, None or lock object, default None
In to_hdf locks are needed for two reasons. First, to protect
against writing to the same file from multiple processes or threads
simultaneously. Second, default libhdf5 is not thread safe, so we
must additionally lock on it's usage. By default if lock is None
lock will be determined optimally based on path_or_buf, key and the
scheduler used. Manually setting this parameter is usually not
required to improve performance.
Alternatively, you can specify specific values:
If False, no locking will occur. If True, default lock object will
be created (multiprocessing.Manager.Lock on multiprocessing
scheduler, Threading.Lock otherwise), This can be used to force
using a lock in scenarios the default behavior will be to avoid
locking. Else, value is assumed to implement the lock interface,
and will be the lock object used.
See Also
--------
dask.DataFrame.read_hdf: reading hdf files
dask.Series.read_hdf: reading hdf files
Examples
--------
Saving data to a single file:
>>> df.to_hdf('output.hdf', '/data') # doctest: +SKIP
Saving data to multiple nodes:
>>> with pd.HDFStore('output.hdf') as fh:
... df.to_hdf(fh, '/data*')
... fh.keys() # doctest: +SKIP
['/data0', '/data1']
Or multiple files:
>>> df.to_hdf('output_*.hdf', '/data') # doctest: +SKIP
Saving multiple files with the multiprocessing scheduler and manually
disabling locks:
>>> df.to_hdf('output_*.hdf', '/data',
... get=dask.multiprocessing.get, lock=False) # doctest: +SKIP
"""
from .io import to_hdf
return to_hdf(self, path_or_buf, key, mode, append, get=get, **kwargs)
def to_csv(self, filename, **kwargs):
"""Write DataFrame to a series of comma-separated values (csv) files
One filename per partition will be created. You can specify the
filenames in a variety of ways.
Use a globstring::
>>> df.to_csv('/path/to/data/export-*.csv') # doctest: +SKIP
The * will be replaced by the increasing sequence 0, 1, 2, ...
::
/path/to/data/export-0.csv
/path/to/data/export-1.csv
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> df.to_csv('/path/to/data/export-*.csv', name_function=name) # doctest: +SKIP
::
/path/to/data/export-2015-01-01.csv
/path/to/data/export-2015-01-02.csv
...
You can also provide an explicit list of paths::
>>> paths = ['/path/to/data/alice.csv', '/path/to/data/bob.csv', ...] # doctest: +SKIP
>>> df.to_csv(paths) # doctest: +SKIP
Parameters
----------
filename : string
Path glob indicating the naming scheme for the output files
name_function : callable, default None
Function accepting an integer (partition index) and producing a
string to replace the asterisk in the given filename globstring.
Should preserve the lexicographic order of partitions
compression : string or None
String like 'gzip' or 'xz'. Must support efficient random access.
Filenames with extensions corresponding to known compression
algorithms (gz, bz2) will be compressed accordingly automatically
sep : character, default ','
Field delimiter for the output file
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
from .io import to_csv
return to_csv(self, filename, **kwargs)
def to_delayed(self):
""" Convert dataframe into dask Delayed objects
Returns a list of delayed values, one value per partition.
"""
from ..delayed import Delayed
return [Delayed(k, [self.dask]) for k in self._keys()]
@classmethod
def _get_unary_operator(cls, op):
return lambda self: elemwise(op, self)
@classmethod
def _get_binary_operator(cls, op, inv=False):
if inv:
return lambda self, other: elemwise(op, other, self)
else:
return lambda self, other: elemwise(op, self, other)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, axis=0):
"""Provides rolling transformations.
Parameters
----------
window : int
Size of the moving window. This is the number of observations used
for calculating the statistic. The window size must not be so large
as to span more than one adjacent partition.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : boolean, default False
Set the labels at the center of the window.
win_type : string, default None
Provide a window type. The recognized window types are identical
to pandas.
axis : int, default 0
Returns
-------
a Rolling object on which to call a method to compute a statistic
Notes
-----
The `freq` argument is not supported.
"""
from dask.dataframe.rolling import Rolling
if not isinstance(window, int):
raise ValueError('window must be an integer')
if window < 0:
raise ValueError('window must be >= 0')
if min_periods is not None:
if not isinstance(min_periods, int):
raise ValueError('min_periods must be an integer')
if min_periods < 0:
raise ValueError('min_periods must be >= 0')
return Rolling(self, window=window, min_periods=min_periods,
freq=freq, center=center, win_type=win_type, axis=axis)
def _reduction_agg(self, name, axis=None, skipna=True,
split_every=False):
axis = self._validate_axis(axis)
meta = getattr(self._meta_nonempty, name)(axis=axis, skipna=skipna)
token = self._token_prefix + name
method = getattr(M, name)
if axis == 1:
return self.map_partitions(method, meta=meta,
token=token, skipna=skipna, axis=axis)
else:
return self.reduction(method, meta=meta, token=token,
skipna=skipna, axis=axis,
split_every=split_every)
@derived_from(pd.DataFrame)
def all(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('all', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def any(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('any', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def sum(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('sum', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def max(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('max', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def min(self, axis=None, skipna=True, split_every=False):
return self._reduction_agg('min', axis=axis, skipna=skipna,
split_every=split_every)
@derived_from(pd.DataFrame)
def idxmax(self, axis=None, skipna=True, split_every=False):
fn = 'idxmax'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.idxmax, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def idxmin(self, axis=None, skipna=True, split_every=False):
fn = 'idxmin'
axis = self._validate_axis(axis)
meta = self._meta_nonempty.idxmax(axis=axis)
if axis == 1:
return map_partitions(M.idxmin, self, meta=meta,
token=self._token_prefix + fn,
skipna=skipna, axis=axis)
else:
scalar = not isinstance(meta, pd.Series)
return aca([self], chunk=idxmaxmin_chunk, aggregate=idxmaxmin_agg,
combine=idxmaxmin_combine, meta=meta,
aggregate_kwargs={'scalar': scalar},
token=self._token_prefix + fn, split_every=split_every,
skipna=skipna, fn=fn)
@derived_from(pd.DataFrame)
def count(self, axis=None, split_every=False):
axis = self._validate_axis(axis)
token = self._token_prefix + 'count'
if axis == 1:
meta = self._meta_nonempty.count(axis=axis)
return self.map_partitions(M.count, meta=meta, token=token,
axis=axis)
else:
meta = self._meta_nonempty.count()
return self.reduction(M.count, aggregate=M.sum, meta=meta,
token=token, split_every=split_every)
@derived_from(pd.DataFrame)
def mean(self, axis=None, skipna=True, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.mean(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.mean, self, meta=meta,
token=self._token_prefix + 'mean',
axis=axis, skipna=skipna)
else:
num = self._get_numeric_data()
s = num.sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'mean-%s' % tokenize(self, axis, skipna)
return map_partitions(methods.mean_aggregate, s, n,
token=name, meta=meta)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var-%s' % tokenize(self, axis, skipna, ddof)
return map_partitions(methods.var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
@derived_from(pd.DataFrame)
def std(self, axis=None, skipna=True, ddof=1, split_every=False):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.std(axis=axis, skipna=skipna)
if axis == 1:
return map_partitions(M.std, self, meta=meta,
token=self._token_prefix + 'std',
axis=axis, skipna=skipna, ddof=ddof)
else:
v = self.var(skipna=skipna, ddof=ddof, split_every=split_every)
token = tokenize(self, axis, skipna, ddof)
name = self._token_prefix + 'std-finish--%s' % token
return map_partitions(np.sqrt, v, meta=meta, token=name)
def quantile(self, q=0.5, axis=0):
""" Approximate row-wise and precise column-wise quantiles of DataFrame
Parameters
----------
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
"""
axis = self._validate_axis(axis)
keyname = 'quantiles-concat--' + tokenize(self, q, axis)
if axis == 1:
if isinstance(q, list):
# Not supported, the result will have current index as columns
raise ValueError("'q' must be scalar when axis=1 is specified")
if LooseVersion(pd.__version__) >= '0.19':
name = q
else:
name = None
meta = pd.Series([], dtype='f8', name=name)
return map_partitions(M.quantile, self, q, axis,
token=keyname, meta=meta)
else:
meta = self._meta.quantile(q, axis=axis)
num = self._get_numeric_data()
quantiles = tuple(quantile(self[c], q) for c in num.columns)
dask = {}
dask = merge(dask, *[_q.dask for _q in quantiles])
qnames = [(_q._name, 0) for _q in quantiles]
if isinstance(quantiles[0], Scalar):
dask[(keyname, 0)] = (pd.Series, qnames, num.columns)
divisions = (min(num.columns), max(num.columns))
return Series(dask, keyname, meta, divisions)
else:
from .multi import _pdconcat
dask[(keyname, 0)] = (_pdconcat, qnames, 1)
return DataFrame(dask, keyname, meta, quantiles[0].divisions)
@derived_from(pd.DataFrame)
def describe(self, split_every=False):
# currently, only numeric describe is supported
num = self._get_numeric_data()
stats = [num.count(split_every=split_every),
num.mean(split_every=split_every),
num.std(split_every=split_every),
num.min(split_every=split_every),
num.quantile([0.25, 0.5, 0.75]),
num.max(split_every=split_every)]
stats_names = [(s._name, 0) for s in stats]
name = 'describe--' + tokenize(self, split_every)
dsk = merge(num.dask, *(s.dask for s in stats))
dsk[(name, 0)] = (methods.describe_aggregate, stats_names)
return new_dd_object(dsk, name, num._meta, divisions=[None, None])
def _cum_agg(self, token, chunk, aggregate, axis, skipna=True,
chunk_kwargs=None):
""" Wrapper for cumulative operation """
axis = self._validate_axis(axis)
if axis == 1:
name = '{0}{1}(axis=1)'.format(self._token_prefix, token)
return self.map_partitions(chunk, token=name, **chunk_kwargs)
else:
# cumulate each partitions
name1 = '{0}{1}-map'.format(self._token_prefix, token)
cumpart = map_partitions(chunk, self, token=name1, meta=self,
**chunk_kwargs)
name2 = '{0}{1}-take-last'.format(self._token_prefix, token)
cumlast = map_partitions(_take_last, cumpart, skipna,
meta=pd.Series([]), token=name2)
name = '{0}{1}'.format(self._token_prefix, token)
cname = '{0}{1}-cum-last'.format(self._token_prefix, token)
# aggregate cumulated partisions and its previous last element
dask = {}
dask[(name, 0)] = (cumpart._name, 0)
for i in range(1, self.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(cname, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(cname, i)] = (aggregate, (cname, i - 1),
(cumlast._name, i - 1))
dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))
return new_dd_object(merge(dask, cumpart.dask, cumlast.dask),
name, chunk(self._meta), self.divisions)
@derived_from(pd.DataFrame)
def cumsum(self, axis=None, skipna=True):
return self._cum_agg('cumsum',
chunk=M.cumsum,
aggregate=operator.add,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cumprod(self, axis=None, skipna=True):
return self._cum_agg('cumprod',
chunk=M.cumprod,
aggregate=operator.mul,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummax(self, axis=None, skipna=True):
return self._cum_agg('cummax',
chunk=M.cummax,
aggregate=methods.cummax_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def cummin(self, axis=None, skipna=True):
return self._cum_agg('cummin',
chunk=M.cummin,
aggregate=methods.cummin_aggregate,
axis=axis, skipna=skipna,
chunk_kwargs=dict(axis=axis, skipna=skipna))
@derived_from(pd.DataFrame)
def where(self, cond, other=np.nan):
# cond and other may be dask instance,
# passing map_partitions via keyword will not be aligned
return map_partitions(M.where, self, cond, other)
@derived_from(pd.DataFrame)
def mask(self, cond, other=np.nan):
return map_partitions(M.mask, self, cond, other)
@derived_from(pd.DataFrame)
def notnull(self):
return self.map_partitions(M.notnull)
@derived_from(pd.DataFrame)
def isnull(self):
return self.map_partitions(M.isnull)
@derived_from(pd.DataFrame)
def astype(self, dtype):
return self.map_partitions(M.astype, dtype=dtype,
meta=self._meta.astype(dtype))
@derived_from(pd.Series)
def append(self, other):
# because DataFrame.append will override the method,
# wrap by pd.Series.append docstring
if isinstance(other, (list, dict)):
msg = "append doesn't support list or dict input"
raise NotImplementedError(msg)
if not isinstance(other, _Frame):
from .io import from_pandas
other = from_pandas(other, 1)
from .multi import _append
if self.known_divisions and other.known_divisions:
if self.divisions[-1] < other.divisions[0]:
divisions = self.divisions[:-1] + other.divisions
return _append(self, other, divisions)
else:
msg = ("Unable to append two dataframes to each other with known "
"divisions if those divisions are not ordered. "
"The divisions/index of the second dataframe must be "
"greater than the divisions/index of the first dataframe.")
raise ValueError(msg)
else:
divisions = [None] * (self.npartitions + other.npartitions + 1)
return _append(self, other, divisions)
@derived_from(pd.DataFrame)
def align(self, other, join='outer', axis=None, fill_value=None):
meta1, meta2 = _emulate(M.align, self, other, join, axis=axis,
fill_value=fill_value)
aligned = self.map_partitions(M.align, other, join=join, axis=axis,
fill_value=fill_value)
token = tokenize(self, other, join, axis, fill_value)
name1 = 'align1-' + token
dsk1 = dict(((name1, i), (getitem, key, 0))
for i, key in enumerate(aligned._keys()))
dsk1.update(aligned.dask)
result1 = new_dd_object(dsk1, name1, meta1, aligned.divisions)
name2 = 'align2-' + token
dsk2 = dict(((name2, i), (getitem, key, 1))
for i, key in enumerate(aligned._keys()))
dsk2.update(aligned.dask)
result2 = new_dd_object(dsk2, name2, meta2, aligned.divisions)
return result1, result2
@derived_from(pd.DataFrame)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
raise NotImplementedError
normalize_token.register((Scalar, _Frame), lambda a: a._name)
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
Parameters
----------
dsk: dict
The dask graph to compute this Series
_name: str
The key prefix that specifies which keys in the dask comprise this
particular Series
meta: pandas.Series
An empty ``pandas.Series`` with names, dtypes, and index matching the
expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
_token_prefix = 'series-'
@property
def name(self):
return self._meta.name
@name.setter
def name(self, name):
self._meta.name = name
renamed = _rename_dask(self, name)
# update myself
self.dask.update(renamed.dask)
self._name = renamed._name
@property
def ndim(self):
""" Return dimensionality """
return 1
@property
def dtype(self):
""" Return data type """
return self._meta.dtype
@cache_readonly
def dt(self):
from .accessor import DatetimeAccessor
return DatetimeAccessor(self)
@derived_from(pd.Series)
def reset_index(self, drop=False):
return super(Series, self).reset_index(drop=drop)
@cache_readonly
def cat(self):
from .accessor import CategoricalAccessor
return CategoricalAccessor(self)
@cache_readonly
def str(self):
from .accessor import StringAccessor
return StringAccessor(self)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
if not hasattr(self._meta, 'cat'):
o.remove('cat') # cat only in `dir` if available
return list(o)
@property
def nbytes(self):
return self.reduction(methods.nbytes, np.sum, token='n<PASSWORD>',
meta=int, split_every=False)
def __array__(self, dtype=None, **kwargs):
x = np.array(self.compute())
if dtype and x.dtype != dtype:
x = x.astype(dtype)
return x
def __array_wrap__(self, array, context=None):
return pd.Series(array, name=self.name)
@derived_from(pd.Series)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def quantile(self, q=0.5):
""" Approximate quantiles of Series
q : list/array of floats, default 0.5 (50%)
Iterable of numbers ranging from 0 to 1 for the desired quantiles
"""
return quantile(self, q)
def _repartition_quantiles(self, npartitions, upsample=1.0):
""" Approximate quantiles of Series used for repartitioning
"""
from .partitionquantiles import partition_quantiles
return partition_quantiles(self, npartitions, upsample=upsample)
@derived_from(pd.Series)
def resample(self, rule, how=None, closed=None, label=None):
from .tseries.resample import _resample
return _resample(self, rule, how=how, closed=closed, label=label)
def __getitem__(self, key):
if isinstance(key, Series) and self.divisions == key.divisions:
name = 'index-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self._meta, self.divisions)
raise NotImplementedError()
@derived_from(pd.DataFrame)
def _get_numeric_data(self, how='any', subset=None):
return self
@derived_from(pd.Series)
def iteritems(self):
for i in range(self.npartitions):
s = self.get_partition(i).compute()
for item in s.iteritems():
yield item
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 'index', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0}.get(axis, axis)
@derived_from(pd.Series)
def groupby(self, index, **kwargs):
from dask.dataframe.groupby import SeriesGroupBy
return SeriesGroupBy(self, index, **kwargs)
@derived_from(pd.Series)
def count(self, split_every=False):
return super(Series, self).count(split_every=split_every)
def unique(self, split_every=None):
"""
Return Series of unique values in the object. Includes NA values.
Returns
-------
uniques : Series
"""
return aca(self, chunk=methods.unique, aggregate=methods.unique,
meta=self._meta, token='unique', split_every=split_every,
series_name=self.name)
@derived_from(pd.Series)
def nunique(self, split_every=None):
return self.drop_duplicates(split_every=split_every).count()
@derived_from(pd.Series)
def value_counts(self, split_every=None):
return aca(self, chunk=M.value_counts,
aggregate=methods.value_counts_aggregate,
combine=methods.value_counts_combine,
meta=self._meta.value_counts(), token='value-counts',
split_every=split_every)
@derived_from(pd.Series)
def nlargest(self, n=5, split_every=None):
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token='series-nlargest-n={0}'.format(n),
split_every=split_every, n=n)
@derived_from(pd.Series)
def isin(self, other):
return elemwise(M.isin, self, list(other))
@derived_from(pd.Series)
def map(self, arg, na_action=None, meta=no_default):
if not (isinstance(arg, (pd.Series, dict)) or callable(arg)):
raise TypeError("arg must be pandas.Series, dict or callable."
" Got {0}".format(type(arg)))
name = 'map-' + tokenize(self, arg, na_action)
dsk = dict(((name, i), (M.map, k, arg, na_action)) for i, k in
enumerate(self._keys()))
dsk.update(self.dask)
if meta is no_default:
meta = _emulate(M.map, self, arg, na_action=na_action)
else:
meta = make_meta(meta)
return Series(dsk, name, meta, self.divisions)
@derived_from(pd.Series)
def dropna(self):
return self.map_partitions(M.dropna)
@derived_from(pd.Series)
def between(self, left, right, inclusive=True):
return self.map_partitions(M.between, left=left,
right=right, inclusive=inclusive)
@derived_from(pd.Series)
def clip(self, lower=None, upper=None):
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.Series)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.Series)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.Series)
def align(self, other, join='outer', axis=None, fill_value=None):
return super(Series, self).align(other, join=join, axis=axis,
fill_value=fill_value)
@derived_from(pd.Series)
def combine_first(self, other):
return self.map_partitions(M.combine_first, other)
def to_bag(self, index=False):
"""Convert to a dask Bag.
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
"""
from .io import to_bag
return to_bag(self, index)
@derived_from(pd.Series)
def to_frame(self, name=None):
return self.map_partitions(M.to_frame, name,
meta=self._meta.to_frame(name))
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
def meth(self, other, level=None, fill_value=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, level=None, axis=0):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, convert_dtype=True, meta=no_default,
name=no_default, args=(), **kwds):
""" Parallel version of pandas.Series.apply
Parameters
----------
func : function
Function to apply
convert_dtype : boolean, default True
Try to find better dtype for elementwise function results.
If False, leave as dtype=object.
$META
name : list, scalar or None, optional
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result.
args : tuple
Positional arguments to pass to function in addition to the value.
Additional keyword arguments will be passed as keywords to the function.
Returns
-------
applied : Series or DataFrame if func returns a Series.
Examples
--------
>>> import dask.dataframe as dd
>>> s = pd.Series(range(5), name='x')
>>> ds = dd.from_pandas(s, npartitions=2)
Apply a function elementwise across the Series, passing in extra
arguments in ``args`` and ``kwargs``:
>>> def myadd(x, a, b=1):
... return x + a + b
>>> res = ds.apply(myadd, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ds.apply(myadd, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ds.apply(lambda x: x + 1, meta=ds)
See Also
--------
dask.Series.map_partitions
"""
if name is not no_default:
warnings.warn("`name` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(name, (pd.DataFrame, pd.Series)):
meta = name
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
convert_dtype=convert_dtype,
args=args, **kwds)
return map_partitions(M.apply, self, func,
convert_dtype, args, meta=meta, **kwds)
@derived_from(pd.Series)
def cov(self, other, min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, scalar=True)
@derived_from(pd.Series)
def corr(self, other, method='pearson', min_periods=None):
from .multi import concat
if not isinstance(other, Series):
raise TypeError("other must be a dask.dataframe.Series")
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
df = concat([self, other], axis=1)
return cov_corr(df, min_periods, corr=True, scalar=True)
class Index(Series):
_partition_type = pd.Index
_token_prefix = 'index-'
@property
def index(self):
msg = "'{0}' object has no attribute 'index'"
raise AttributeError(msg.format(self.__class__.__name__))
def head(self, n=5, compute=True):
""" First n items of the Index.
Caveat, this only checks the first partition.
"""
name = 'head-%d-%s' % (n, self._name)
dsk = {(name, 0): (operator.getitem, (self._name, 0), slice(0, n))}
result = new_dd_object(merge(self.dask, dsk), name,
self._meta, self.divisions[:2])
if compute:
result = result.compute()
return result
@derived_from(pd.Index)
def max(self, split_every=False):
return self.reduction(M.max, meta=self._meta_nonempty.max(),
token=self._token_prefix + 'max',
split_every=split_every)
@derived_from(pd.Index)
def min(self, split_every=False):
return self.reduction(M.min, meta=self._meta_nonempty.min(),
token=self._token_prefix + 'min',
split_every=split_every)
def count(self, split_every=False):
return self.reduction(methods.index_count, np.sum,
token='index-count', meta=int,
split_every=split_every)
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
Parameters
----------
dask: dict
The dask graph to compute this DataFrame
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
meta: pandas.DataFrame
An empty ``pandas.DataFrame`` with names, dtypes, and index matching
the expected output.
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
_token_prefix = 'dataframe-'
@property
def columns(self):
return self._meta.columns
@columns.setter
def columns(self, columns):
renamed = _rename_dask(self, columns)
self._meta = renamed._meta
self._name = renamed._name
self.dask.update(renamed.dask)
def __getitem__(self, key):
name = 'getitem-%s' % tokenize(self, key)
if np.isscalar(key) or isinstance(key, tuple):
if isinstance(self._meta.index, (pd.DatetimeIndex, pd.PeriodIndex)):
if key not in self._meta.columns:
return self.loc[key]
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, list):
# error is raised from pandas
meta = self._meta[_extract_meta(key)]
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
if isinstance(key, Series):
# do not perform dummy calculation, as columns will not be changed.
#
if self.divisions != key.divisions:
from .multi import _maybe_align_partitions
self, key = _maybe_align_partitions([self, key])
dsk = {(name, i): (M._getitem_array, (self._name, i), (key._name, i))
for i in range(self.npartitions)}
return new_dd_object(merge(self.dask, key.dask, dsk), name,
self, self.divisions)
raise NotImplementedError(key)
def __setitem__(self, key, value):
if isinstance(key, (tuple, list)):
df = self.assign(**{k: value[c]
for k, c in zip(key, value.columns)})
else:
df = self.assign(**{key: value})
self.dask = df.dask
self._name = df._name
self._meta = df._meta
def __setattr__(self, key, value):
try:
columns = object.__getattribute__(self, '_meta').columns
except AttributeError:
columns = ()
if key in columns:
self[key] = value
else:
object.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self.columns:
meta = self._meta[key]
name = 'getitem-%s' % tokenize(self, key)
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return new_dd_object(merge(self.dask, dsk), name,
meta, self.divisions)
raise AttributeError("'DataFrame' object has no attribute %r" % key)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(c for c in self.columns if
(isinstance(c, pd.compat.string_types) and
pd.compat.isidentifier(c)))
return list(o)
@property
def ndim(self):
""" Return dimensionality """
return 2
@property
def dtypes(self):
""" Return data types """
return self._meta.dtypes
@derived_from(pd.DataFrame)
def get_dtype_counts(self):
return self._meta.get_dtype_counts()
@derived_from(pd.DataFrame)
def get_ftype_counts(self):
return self._meta.get_ftype_counts()
@derived_from(pd.DataFrame)
def select_dtypes(self, include=None, exclude=None):
cs = self._meta.select_dtypes(include=include, exclude=exclude).columns
return self[list(cs)]
def set_index(self, other, drop=True, sorted=False, **kwargs):
""" Set the DataFrame index (row labels) using an existing column
This operation in dask.dataframe is expensive. If the input column is
sorted then we accomplish the set_index in a single full read of that
column. However, if the input column is not sorted then this operation
triggers a full shuffle, which can take a while and only works on a
single machine (not distributed).
Parameters
----------
other: Series or label
drop: boolean, default True
Delete columns to be used as the new index
sorted: boolean, default False
Set to True if the new index column is already sorted
Examples
--------
>>> df.set_index('x') # doctest: +SKIP
>>> df.set_index(d.x) # doctest: +SKIP
>>> df.set_index(d.timestamp, sorted=True) # doctest: +SKIP
"""
if sorted:
return set_sorted_index(self, other, drop=drop, **kwargs)
else:
from .shuffle import set_index
return set_index(self, other, drop=drop, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See Also
--------
set_index
"""
from .shuffle import set_partition
return set_partition(self, column, divisions, **kwargs)
@derived_from(pd.DataFrame)
def nlargest(self, n=5, columns=None, split_every=None):
token = 'dataframe-nlargest-n={0}'.format(n)
return aca(self, chunk=M.nlargest, aggregate=M.nlargest,
meta=self._meta, token=token, split_every=split_every,
n=n, columns=columns)
@derived_from(pd.DataFrame)
def groupby(self, key, **kwargs):
from dask.dataframe.groupby import DataFrameGroupBy
return DataFrameGroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
"""
Convert columns of the DataFrame to category dtype
Parameters
----------
columns : list, optional
A list of column names to convert to the category type. By
default any column with an object dtype is converted to a
categorical.
kwargs
Keyword arguments are passed on to compute.
Notes
-----
When dealing with columns of repeated text values converting to
categorical type is often much more performant, both in terms of memory
and in writing to disk or communication over the network.
See also
--------
dask.dataframes.categorical.categorize
"""
from dask.dataframe.categorical import categorize
return categorize(self, columns, **kwargs)
@derived_from(pd.DataFrame)
def assign(self, **kwargs):
for k, v in kwargs.items():
if not (isinstance(v, (Series, Scalar, pd.Series)) or
np.isscalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df2 = self._meta.assign(**_extract_meta(kwargs))
return elemwise(methods.assign, self, *pairs, meta=df2)
@derived_from(pd.DataFrame)
def rename(self, index=None, columns=None):
if index is not None:
raise ValueError("Cannot rename index.")
# *args here is index, columns but columns arg is already used
return self.map_partitions(M.rename, None, columns)
def query(self, expr, **kwargs):
""" Blocked version of pd.DataFrame.query
This is like the sequential version except that this will also happen
in many threads. This may conflict with ``numexpr`` which will use
multiple threads itself. We recommend that you set numexpr to use a
single thread
import numexpr
numexpr.set_nthreads(1)
The original docstring follows below:\n
""" + (pd.DataFrame.query.__doc__
if pd.DataFrame.query.__doc__ is not None else '')
name = 'query-%s' % tokenize(self, expr)
if kwargs:
name = name + '--' + tokenize(kwargs)
dsk = dict(((name, i), (apply, M.query,
((self._name, i), (expr,), kwargs)))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (M.query, (self._name, i), expr))
for i in range(self.npartitions))
meta = self._meta.query(expr, **kwargs)
return new_dd_object(merge(dsk, self.dask), name,
meta, self.divisions)
@derived_from(pd.DataFrame)
def eval(self, expr, inplace=None, **kwargs):
if '=' in expr and inplace in (True, None):
raise NotImplementedError("Inplace eval not supported."
" Please use inplace=False")
meta = self._meta.eval(expr, inplace=inplace, **kwargs)
return self.map_partitions(M.eval, expr, meta=meta, inplace=inplace, **kwargs)
@derived_from(pd.DataFrame)
def dropna(self, how='any', subset=None):
return self.map_partitions(M.dropna, how=how, subset=subset)
@derived_from(pd.DataFrame)
def clip(self, lower=None, upper=None):
return self.map_partitions(M.clip, lower=lower, upper=upper)
@derived_from(pd.DataFrame)
def clip_lower(self, threshold):
return self.map_partitions(M.clip_lower, threshold=threshold)
@derived_from(pd.DataFrame)
def clip_upper(self, threshold):
return self.map_partitions(M.clip_upper, threshold=threshold)
@derived_from(pd.DataFrame)
def to_timestamp(self, freq=None, how='start', axis=0):
df = elemwise(M.to_timestamp, self, freq, how, axis)
df.divisions = tuple(pd.Index(self.divisions).to_timestamp())
return df
def to_castra(self, fn=None, categories=None, sorted_index_column=None,
compute=True, get=get_sync):
""" Write DataFrame to Castra on-disk store
See https://github.com/blosc/castra for details
See Also
--------
Castra.to_dask
"""
from .io import to_castra
return to_castra(self, fn, categories, sorted_index_column,
compute=compute, get=get)
def to_bag(self, index=False):
"""Convert to a dask Bag of tuples of each row.
Parameters
----------
index : bool, optional
If True, the index is included as the first element of each tuple.
Default is False.
"""
from .io import to_bag
return to_bag(self, index)
def _get_numeric_data(self, how='any', subset=None):
# calculate columns to avoid unnecessary calculation
numerics = self._meta._get_numeric_data()
if len(numerics.columns) < len(self.columns):
name = self._token_prefix + '-get_numeric_data'
return self.map_partitions(M._get_numeric_data,
meta=numerics, token=name)
else:
# use myself if all numerics
return self
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
@derived_from(pd.DataFrame)
def drop(self, labels, axis=0, dtype=None):
if axis != 1:
raise NotImplementedError("Drop currently only works for axis=1")
if dtype is not None:
return elemwise(drop_columns, self, labels, dtype)
else:
return elemwise(M.drop, self, labels, axis)
@derived_from(pd.DataFrame)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, suffixes=('_x', '_y'),
indicator=False, npartitions=None, shuffle=None):
if not isinstance(right, (DataFrame, pd.DataFrame)):
raise ValueError('right must be DataFrame')
from .multi import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, suffixes=suffixes,
npartitions=npartitions, indicator=indicator,
shuffle=shuffle)
@derived_from(pd.DataFrame)
def join(self, other, on=None, how='left',
lsuffix='', rsuffix='', npartitions=None, shuffle=None):
if not isinstance(other, (DataFrame, pd.DataFrame)):
raise ValueError('other must be DataFrame')
from .multi import merge
return merge(self, other, how=how,
left_index=on is None, right_index=True,
left_on=on, suffixes=[lsuffix, rsuffix],
npartitions=npartitions, shuffle=shuffle)
@derived_from(pd.DataFrame)
def append(self, other):
if isinstance(other, Series):
msg = ('Unable to appending dd.Series to dd.DataFrame.'
'Use pd.Series to append as row.')
raise ValueError(msg)
elif isinstance(other, pd.Series):
other = other.to_frame().T
return super(DataFrame, self).append(other)
@derived_from(pd.DataFrame)
def iterrows(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.iterrows():
yield row
@derived_from(pd.DataFrame)
def itertuples(self):
for i in range(self.npartitions):
df = self.get_partition(i).compute()
for row in df.itertuples():
yield row
@classmethod
def _bind_operator_method(cls, name, op):
""" bind operator method like DataFrame.add to this class """
# name must be explicitly passed for div method whose name is truediv
def meth(self, other, axis='columns', level=None, fill_value=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
if axis in (1, 'columns'):
# When axis=1 and other is a series, `other` is transposed
# and the operator is applied broadcast across rows. This
# isn't supported with dd.Series.
if isinstance(other, Series):
msg = 'Unable to {0} dd.Series with axis=1'.format(name)
raise ValueError(msg)
elif isinstance(other, pd.Series):
# Special case for pd.Series to avoid unwanted partitioning
# of other. We pass it in as a kwarg to prevent this.
meta = _emulate(op, self, other=other, axis=axis,
fill_value=fill_value)
return map_partitions(op, self, other=other, meta=meta,
axis=axis, fill_value=fill_value)
meta = _emulate(op, self, other, axis=axis, fill_value=fill_value)
return map_partitions(op, self, other, meta=meta,
axis=axis, fill_value=fill_value)
meth.__doc__ = op.__doc__
bind_method(cls, name, meth)
@classmethod
def _bind_comparison_method(cls, name, comparison):
""" bind comparison method like DataFrame.add to this class """
def meth(self, other, axis='columns', level=None):
if level is not None:
raise NotImplementedError('level must be None')
axis = self._validate_axis(axis)
return elemwise(comparison, self, other, axis=axis)
meth.__doc__ = comparison.__doc__
bind_method(cls, name, meth)
@insert_meta_param_description(pad=12)
def apply(self, func, axis=0, args=(), meta=no_default,
columns=no_default, **kwds):
""" Parallel version of pandas.DataFrame.apply
This mimics the pandas version except for the following:
1. Only ``axis=1`` is supported (and must be specified explicitly).
2. The user should provide output metadata via the `meta` keyword.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
- 0 or 'index': apply function to each column (NOT SUPPORTED)
- 1 or 'columns': apply function to each row
$META
columns : list, scalar or None
Deprecated, please use `meta` instead. If list is given, the result
is a DataFrame which columns is specified list. Otherwise, the
result is a Series which name is given scalar or None (no name). If
name keyword is not given, dask tries to infer the result type
using its beginning of data. This inference may take some time and
lead to unexpected result
args : tuple
Positional arguments to pass to function in addition to the array/series
Additional keyword arguments will be passed as keywords to the function
Returns
-------
applied : Series or DataFrame
Examples
--------
>>> import dask.dataframe as dd
>>> df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
... 'y': [1., 2., 3., 4., 5.]})
>>> ddf = dd.from_pandas(df, npartitions=2)
Apply a function to row-wise passing in extra arguments in ``args`` and
``kwargs``:
>>> def myadd(row, a, b=1):
... return row.sum() + a + b
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5)
By default, dask tries to infer the output metadata by running your
provided function on some fake data. This works well in many cases, but
can sometimes be expensive, or even fail. To avoid this, you can
manually specify the output metadata with the ``meta`` keyword. This
can be specified in many forms, for more information see
``dask.dataframe.utils.make_meta``.
Here we specify the output is a Series with name ``'x'``, and dtype
``float64``:
>>> res = ddf.apply(myadd, axis=1, args=(2,), b=1.5, meta=('x', 'f8'))
In the case where the metadata doesn't change, you can also pass in
the object itself directly:
>>> res = ddf.apply(lambda row: row + 1, axis=1, meta=ddf)
See Also
--------
dask.DataFrame.map_partitions
"""
axis = self._validate_axis(axis)
if axis == 0:
msg = ("dd.DataFrame.apply only supports axis=1\n"
" Try: df.apply(func, axis=1)")
raise NotImplementedError(msg)
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
meta = _emulate(M.apply, self._meta_nonempty, func,
axis=axis, args=args, **kwds)
return map_partitions(M.apply, self, func, axis,
False, False, None, args, meta=meta, **kwds)
@derived_from(pd.DataFrame)
def applymap(self, func, meta='__no_default__'):
return elemwise(M.applymap, self, func, meta=meta)
@derived_from(pd.DataFrame)
def round(self, decimals=0):
return elemwise(M.round, self, decimals)
@derived_from(pd.DataFrame)
def cov(self, min_periods=None):
return cov_corr(self, min_periods)
@derived_from(pd.DataFrame)
def corr(self, method='pearson', min_periods=None):
if method != 'pearson':
raise NotImplementedError("Only Pearson correlation has been "
"implemented")
return cov_corr(self, min_periods, True)
def info(self, buf=None, verbose=False, memory_usage=False):
"""
Concise summary of a Dask DataFrame.
"""
if buf is None:
import sys
buf = sys.stdout
lines = [str(type(self))]
if len(self.columns) == 0:
lines.append('Index: 0 entries')
lines.append('Empty %s' % type(self).__name__)
put_lines(buf, lines)
return
# Group and execute the required computations
computations = {}
if verbose:
computations.update({'index': self.index, 'count': self.count()})
if memory_usage:
computations.update({'memory_usage': self.map_partitions(M.memory_usage, index=True)})
computations = dict(zip(computations.keys(), da.compute(*computations.values())))
column_template = "{0:<%d} {1}" % (self.columns.str.len().max() + 5)
if verbose:
index = computations['index']
counts = computations['count']
lines.append(index.summary())
column_template = column_template.format('{0}', '{1} non-null {2}')
column_info = [column_template.format(*x) for x in zip(self.columns, counts, self.dtypes)]
else:
column_info = [column_template.format(*x) for x in zip(self.columns, self.dtypes)]
lines.append('Data columns (total {} columns):'.format(len(self.columns)))
lines.extend(column_info)
dtype_counts = ['%s(%d)' % k for k in sorted(self.dtypes.value_counts().iteritems(), key=str)]
lines.append('dtypes: {}'.format(', '.join(dtype_counts)))
if memory_usage:
memory_int = computations['memory_usage'].sum()
lines.append('memory usage: {}\n'.format(memory_repr(memory_int)))
put_lines(buf, lines)
def pivot_table(self, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
from .reshape import pivot_table
return pivot_table(self, index=index, columns=columns, values=values,
aggfunc=aggfunc)
# bind operators
for op in [operator.abs, operator.add, operator.and_, operator_div,
operator.eq, operator.gt, operator.ge, operator.inv,
operator.lt, operator.le, operator.mod, operator.mul,
operator.ne, operator.neg, operator.or_, operator.pow,
operator.sub, operator.truediv, operator.floordiv, operator.xor]:
_Frame._bind_operator(op)
Scalar._bind_operator(op)
for name in ['add', 'sub', 'mul', 'div',
'truediv', 'floordiv', 'mod', 'pow',
'radd', 'rsub', 'rmul', 'rdiv',
'rtruediv', 'rfloordiv', 'rmod', 'rpow']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_operator_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_operator_method(name, meth)
for name in ['lt', 'gt', 'le', 'ge', 'ne', 'eq']:
meth = getattr(pd.DataFrame, name)
DataFrame._bind_comparison_method(name, meth)
meth = getattr(pd.Series, name)
Series._bind_comparison_method(name, meth)
def elemwise_property(attr, s):
meta = pd.Series([], dtype=getattr(s._meta, attr).dtype)
return map_partitions(getattr, s, attr, meta=meta)
for name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',
'hour', 'day', 'dayofweek', 'dayofyear', 'week', 'weekday',
'weekofyear', 'month', 'quarter', 'year']:
setattr(Index, name, property(partial(elemwise_property, name)))
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
meta = kwargs.pop('meta', no_default)
_name = funcname(op) + '-' + tokenize(op, kwargs, *args)
args = _maybe_from_pandas(args)
from .multi import _maybe_align_partitions
args = _maybe_align_partitions(args)
dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]
dfs = [df for df in dasks if isinstance(df, _Frame)]
divisions = dfs[0].divisions
n = len(divisions) - 1
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, (_Frame, Scalar))]
# adjust the key length of Scalar
keys = [d._keys() * n if isinstance(d, Scalar)
else d._keys() for d in dasks]
if other:
dsk = dict(((_name, i),
(apply, partial_by_order, list(frs),
{'function': op, 'other': other}))
for i, frs in enumerate(zip(*keys)))
else:
dsk = dict(((_name, i), (op,) + frs) for i, frs in enumerate(zip(*keys)))
dsk = merge(dsk, *[d.dask for d in dasks])
if meta is no_default:
if len(dfs) >= 2 and len(dasks) != len(dfs):
# should not occur in current funcs
msg = 'elemwise with 2 or more DataFrames and Scalar is not supported'
raise NotImplementedError(msg)
meta = _emulate(op, *args, **kwargs)
return new_dd_object(dsk, _name, meta, divisions)
def _maybe_from_pandas(dfs):
from .io import from_pandas
dfs = [from_pandas(df, 1) if isinstance(df, (pd.Series, pd.DataFrame))
else df for df in dfs]
return dfs
@insert_meta_param_description
def apply_concat_apply(args, chunk=None, aggregate=None, combine=None,
meta=no_default, token=None, split_every=None,
chunk_kwargs=None, aggregate_kwargs=None,
combine_kwargs=None, **kwargs):
"""Apply a function to blocks, then concat, then apply again
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function concatenated-block -> block
Function to operate on the concatenated result of chunk
combine : function concatenated-block -> block, optional
Function to operate on intermediate concatenated results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
Examples
--------
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args
if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(token or (chunk, aggregate), meta, args,
chunk_kwargs, aggregate_kwargs, combine_kwargs,
split_every)
# Chunk
a = '{0}-chunk-{1}'.format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, i): (chunk, key) for i, key in enumerate(args[0]._keys())}
else:
dsk = {(a, i): (apply, chunk, [(x._name, i) if isinstance(x, _Frame)
else x for x in args], chunk_kwargs)
for i in range(args[0].npartitions)}
# Combine
prefix = '{0}-combine-{1}-'.format(token or funcname(combine), token_key)
k = npartitions
b = a
depth = 0
while k > split_every:
b = prefix + str(depth)
for part_i, inds in enumerate(partition_all(split_every, range(k))):
conc = (_concat, [(a, i) for i in inds])
if combine_kwargs:
dsk[(b, part_i)] = (apply, combine, [conc], combine_kwargs)
else:
dsk[(b, part_i)] = (combine, conc)
k = part_i + 1
a = b
depth += 1
# Aggregate
b = '{0}-agg-{1}'.format(token or funcname(aggregate), token_key)
conc = (_concat, [(a, i) for i in range(k)])
if aggregate_kwargs:
dsk[(b, 0)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, 0)] = (aggregate, conc)
if meta is no_default:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [_concat([meta_chunk])],
aggregate_kwargs)
meta = make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
return new_dd_object(dsk, b, meta, [None, None])
aca = apply_concat_apply
def _extract_meta(x, nonempty=False):
"""
Extract internal cache data (``_meta``) from dd.DataFrame / dd.Series
"""
if isinstance(x, (_Frame, Scalar)):
return x._meta_nonempty if nonempty else x._meta
elif isinstance(x, list):
return [_extract_meta(_x, nonempty) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x, nonempty) for _x in x])
elif isinstance(x, dict):
res = {}
for k in x:
res[k] = _extract_meta(x[k], nonempty)
return res
else:
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args, True), **_extract_meta(kwargs, True))
@insert_meta_param_description
def map_partitions(func, *args, **kwargs):
""" Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe.
$META
"""
meta = kwargs.pop('meta', no_default)
if meta is not no_default:
meta = make_meta(meta)
assert callable(func)
if 'token' in kwargs:
name = kwargs.pop('token')
token = tokenize(meta, *args, **kwargs)
else:
name = funcname(func)
token = tokenize(func, meta, *args, **kwargs)
name = '{0}-{1}'.format(name, token)
from .multi import _maybe_align_partitions
args = _maybe_from_pandas(args)
args = _maybe_align_partitions(args)
if meta is no_default:
meta = _emulate(func, *args, **kwargs)
if all(isinstance(arg, Scalar) for arg in args):
dask = {(name, 0):
(apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}
return Scalar(merge(dask, *[arg.dask for arg in args]), name, meta)
elif not isinstance(meta, (pd.Series, pd.DataFrame, pd.Index)):
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = _concat([meta])
meta = make_meta(meta)
dfs = [df for df in args if isinstance(df, _Frame)]
dsk = {}
for i in range(dfs[0].npartitions):
values = [(arg._name, i if isinstance(arg, _Frame) else 0)
if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]
dsk[(name, i)] = (apply_and_enforce, func, values, kwargs, meta)
dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]
return new_dd_object(merge(dsk, *dasks), name, meta, args[0].divisions)
def apply_and_enforce(func, args, kwargs, meta):
"""Apply a function, and enforce the output to match meta
Ensures the output has the same columns, even if empty."""
df = func(*args, **kwargs)
if isinstance(df, (pd.DataFrame, pd.Series, pd.Index)):
if len(df) == 0:
return meta
c = meta.columns if isinstance(df, pd.DataFrame) else meta.name
return _rename(c, df)
return df
def _rename(columns, df):
"""
Rename columns of pd.DataFrame or name of pd.Series.
Not for dd.DataFrame or dd.Series.
Parameters
----------
columns : tuple, string, pd.DataFrame or pd.Series
Column names, Series name or pandas instance which has the
target column names / name.
df : pd.DataFrame or pd.Series
target DataFrame / Series to be renamed
"""
assert not isinstance(df, _Frame)
if columns is no_default:
return df
if isinstance(columns, Iterator):
columns = list(columns)
if isinstance(df, pd.DataFrame):
if isinstance(columns, pd.DataFrame):
columns = columns.columns
columns = pd.Index(columns)
if len(columns) == len(df.columns):
if columns.equals(df.columns):
# if target is identical, rename is not necessary
return df
# deep=False doesn't doesn't copy any data/indices, so this is cheap
df = df.copy(deep=False)
df.columns = columns
return df
elif isinstance(df, (pd.Series, pd.Index)):
if isinstance(columns, (pd.Series, pd.Index)):
columns = columns.name
if df.name == columns:
return df
return df.rename(columns)
# map_partition may pass other types
return df
def _rename_dask(df, names):
"""
Destructively rename columns of dd.DataFrame or name of dd.Series.
Not for pd.DataFrame or pd.Series.
Internaly used to overwrite dd.DataFrame.columns and dd.Series.name
We can't use map_partition because it applies function then rename
Parameters
----------
df : dd.DataFrame or dd.Series
target DataFrame / Series to be renamed
names : tuple, string
Column names/Series name
"""
assert isinstance(df, _Frame)
metadata = _rename(names, df._meta)
name = 'rename-{0}'.format(tokenize(df, metadata))
dsk = {}
for i in range(df.npartitions):
dsk[name, i] = (_rename, metadata, (df._name, i))
return new_dd_object(merge(dsk, df.dask), name, metadata, df.divisions)
def quantile(df, q):
"""Approximate quantiles of Series.
Parameters
----------
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert isinstance(df, Series)
from dask.array.percentile import _percentile, merge_percentiles
# currently, only Series has quantile method
if isinstance(df, Index):
meta = pd.Series(df._meta_nonempty).quantile(q)
else:
meta = df._meta_nonempty.quantile(q)
if isinstance(meta, pd.Series):
# Index.quantile(list-like) must be pd.Series, not pd.Index
df_name = df.name
finalize_tsk = lambda tsk: (pd.Series, tsk, q, None, df_name)
return_type = Series
else:
finalize_tsk = lambda tsk: (getitem, tsk, 0)
return_type = Scalar
q = [q]
# pandas uses quantile in [0, 1]
# numpy / everyone else uses [0, 100]
qs = np.asarray(q) * 100
token = tokenize(df, qs)
if len(qs) == 0:
name = 'quantiles-' + token
empty_index = pd.Index([], dtype=float)
return Series({(name, 0): pd.Series([], name=df.name, index=empty_index)},
name, df._meta, [None, None])
else:
new_divisions = [np.min(q), np.max(q)]
name = 'quantiles-1-' + token
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))
for i, key in enumerate(df._keys()))
name2 = 'quantiles-2-' + token
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))
name3 = 'quantiles-3-' + token
merge_dsk = {(name3, 0): finalize_tsk((merge_percentiles, qs,
[qs] * df.npartitions,
sorted(val_dsk), sorted(len_dsk)))}
dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)
return return_type(dsk, name3, meta, new_divisions)
def cov_corr(df, min_periods=None, corr=False, scalar=False):
"""DataFrame covariance and pearson correlation.
Computes pairwise covariance or correlation of columns, excluding NA/null
values.
Parameters
----------
df : DataFrame
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
corr : bool, optional
If True, compute the Pearson correlation. If False [default], compute
the covariance.
scalar : bool, optional
If True, compute covariance between two variables as a scalar. Only
valid if `df` has 2 columns. If False [default], compute the entire
covariance/correlation matrix.
"""
if min_periods is None:
min_periods = 2
elif min_periods < 2:
raise ValueError("min_periods must be >= 2")
prefix = 'corr' if corr else 'cov'
df = df._get_numeric_data()
name = '{0}-agg-{1}'.format(prefix, tokenize(df, min_periods, scalar))
if scalar and len(df.columns) != 2:
raise ValueError("scalar only valid for 2 column dataframe")
k = '{0}-chunk-{1}'.format(prefix, df._name)
dsk = dict(((k, i), (cov_corr_chunk, f, corr))
for (i, f) in enumerate(df._keys()))
dsk[(name, 0)] = (cov_corr_agg, list(dsk.keys()), df._meta, min_periods,
corr, scalar)
dsk = merge(df.dask, dsk)
if scalar:
return Scalar(dsk, name, 'f8')
meta = make_meta([(c, 'f8') for c in df.columns], index=df._meta.columns)
return DataFrame(dsk, name, meta, (df.columns[0], df.columns[-1]))
def cov_corr_chunk(df, corr=False):
"""Chunk part of a covariance or correlation computation"""
mat = df.values
mask = np.isfinite(mat)
keep = np.bitwise_and(mask[:, None, :], mask[:, :, None])
x = np.where(keep, mat[:, None, :], np.nan)
sums = np.nansum(x, 0)
counts = keep.astype('int').sum(0)
cov = df.cov().values
dtype = [('sum', sums.dtype), ('count', counts.dtype), ('cov', cov.dtype)]
if corr:
m = np.nansum((x - sums / np.where(counts, counts, np.nan)) ** 2, 0)
dtype.append(('m', m.dtype))
out = np.empty(counts.shape, dtype=dtype)
out['sum'] = sums
out['count'] = counts
out['cov'] = cov * (counts - 1)
if corr:
out['m'] = m
return out
def cov_corr_agg(data, meta, min_periods=2, corr=False, scalar=False):
"""Aggregation part of a covariance or correlation computation"""
data = np.concatenate(data).reshape((len(data),) + data[0].shape)
sums = np.nan_to_num(data['sum'])
counts = data['count']
cum_sums = np.cumsum(sums, 0)
cum_counts = np.cumsum(counts, 0)
s1 = cum_sums[:-1]
s2 = sums[1:]
n1 = cum_counts[:-1]
n2 = counts[1:]
d = (s2 / n2) - (s1 / n1)
C = (np.nansum((n1 * n2) / (n1 + n2) * (d * d.transpose((0, 2, 1))), 0) +
np.nansum(data['cov'], 0))
C[cum_counts[-1] < min_periods] = np.nan
nobs = np.where(cum_counts[-1], cum_counts[-1], np.nan)
if corr:
mu = cum_sums[-1] / nobs
counts_na = np.where(counts, counts, np.nan)
m2 = np.nansum(data['m'] + counts * (sums / counts_na - mu) ** 2,
axis=0)
den = np.sqrt(m2 * m2.T)
else:
den = nobs - 1
mat = C / den
if scalar:
return mat[0, 1]
return pd.DataFrame(mat, columns=meta.columns, index=meta.columns)
def pd_split(df, p, random_state=None):
""" Split DataFrame into multiple pieces pseudorandomly
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [2, 3, 4, 5, 6, 7]})
>>> a, b = pd_split(df, [0.5, 0.5], random_state=123) # roughly 50/50 split
>>> a
a b
1 2 3
2 3 4
5 6 7
>>> b
a b
0 1 2
3 4 5
4 5 6
"""
p = list(p)
index = pseudorandom(len(df), p, random_state)
return [df.iloc[index == i] for i in range(len(p))]
def _take_last(a, skipna=True):
"""
take last row (Series) of DataFrame / last value of Series
considering NaN.
Parameters
----------
a : pd.DataFrame or pd.Series
skipna : bool, default True
Whether to exclude NaN
"""
if skipna is False:
return a.iloc[-1]
else:
# take last valid value excluding NaN, NaN location may be different
# in each columns
group_dummy = np.ones(len(a.index))
last_row = a.groupby(group_dummy).last()
if isinstance(a, pd.DataFrame):
return pd.Series(last_row.values[0], index=a.columns)
else:
return last_row.values[0]
def repartition_divisions(a, b, name, out1, out2, force=False):
""" dask graph to repartition dataframe by new divisions
Parameters
----------
a : tuple
old divisions
b : tuple, list
new divisions
name : str
name of old dataframe
out1 : str
name of temporary splits
out2 : str
name of new dataframe
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP
{('b', 0): (<function _loc_repartition at ...>, ('a', 0), 1, 3, False),
('b', 1): (<function _loc_repartition at ...>, ('a', 1), 3, 4, False),
('b', 2): (<function _loc_repartition at ...>, ('a', 1), 4, 6, False),
('b', 3): (<function _loc_repartition at ...>, ('a', 1), 6, 7, False)
('c', 0): (<function concat at ...>,
(<type 'list'>, [('b', 0), ('b', 1)])),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
"""
if not isinstance(b, (list, tuple)):
raise ValueError('New division must be list or tuple')
b = list(b)
if len(b) < 2:
# minimum division is 2 elements, like [0, 0]
raise ValueError('New division must be longer than 2 elements')
if b != sorted(b):
raise ValueError('New division must be sorted')
if len(b[:-1]) != len(list(unique(b[:-1]))):
msg = 'New division must be unique, except for the last element'
raise ValueError(msg)
if force:
if a[0] < b[0]:
msg = ('left side of the new division must be equal or smaller '
'than old division')
raise ValueError(msg)
if a[-1] > b[-1]:
msg = ('right side of the new division must be equal or larger '
'than old division')
raise ValueError(msg)
else:
if a[0] != b[0]:
msg = 'left side of old and new divisions are different'
raise ValueError(msg)
if a[-1] != b[-1]:
msg = 'right side of old and new divisions are different'
raise ValueError(msg)
def _is_single_last_div(x):
"""Whether last division only contains single label"""
return len(x) >= 2 and x[-1] == x[-2]
c = [a[0]]
d = dict()
low = a[0]
i, j = 1, 1 # indices for old/new divisions
k = 0 # index for temp divisions
last_elem = _is_single_last_div(a)
# process through old division
# left part of new division can be processed in this loop
while (i < len(a) and j < len(b)):
if a[i] < b[j]:
# tuple is something like:
# (methods._loc_partition, ('from_pandas-#', 0), 3, 4, False))
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, a[i], False)
low = a[i]
i += 1
elif a[i] > b[j]:
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, b[j], False)
low = b[j]
j += 1
else:
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), low, b[j], False)
low = b[j]
i += 1
j += 1
c.append(low)
k += 1
# right part of new division can remain
if a[-1] < b[-1] or b[-1] == b[-2]:
for _j in range(j, len(b)):
# always use right-most of old division
# because it may contain last element
m = len(a) - 2
d[(out1, k)] = (methods._loc_repartition, (name, m), low, b[_j], False)
low = b[_j]
c.append(low)
k += 1
else:
# even if new division is processed through,
# right-most element of old division can remain
if last_elem and i < len(a):
d[(out1, k)] = (methods._loc_repartition, (name, i - 1), a[i], a[i], False)
k += 1
c.append(a[-1])
# replace last element of tuple with True
d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)
i, j = 0, 1
last_elem = _is_single_last_div(c)
while j < len(b):
tmp = []
while c[i] < b[j]:
tmp.append((out1, i))
i += 1
if last_elem and c[i] == b[-1] and (b[-1] != b[-2] or j == len(b) - 1) and i < k:
# append if last split is not included
tmp.append((out1, i))
i += 1
if len(tmp) == 0:
# dummy slice to return empty DataFrame or Series,
# which retain original data attributes (columns / name)
d[(out2, j - 1)] = (methods._loc_repartition, (name, 0), a[0], a[0], False)
elif len(tmp) == 1:
d[(out2, j - 1)] = tmp[0]
else:
if not tmp:
raise ValueError('check for duplicate partitions\nold:\n%s\n\n'
'new:\n%s\n\ncombined:\n%s'
% (pformat(a), pformat(b), pformat(c)))
d[(out2, j - 1)] = (pd.concat, tmp)
j += 1
return d
def repartition_npartitions(df, npartitions):
""" Repartition dataframe to a smaller number of partitions """
npartitions_ratio = df.npartitions / npartitions
new_partitions_boundaries = [int(new_partition_index * npartitions_ratio)
for new_partition_index in range(npartitions + 1)]
new_name = 'repartition-%d-%s' % (npartitions, tokenize(df))
dsk = {}
for new_partition_index in range(npartitions):
value = (pd.concat, [(df._name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])])
dsk[new_name, new_partition_index] = value
divisions = [df.divisions[new_partition_index]
for new_partition_index in new_partitions_boundaries]
return DataFrame(merge(df.dask, dsk), new_name, df._meta, divisions)
def repartition(df, divisions=None, force=False):
""" Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
token = tokenize(df, divisions)
if isinstance(df, _Frame):
tmp = 'repartition-split-' + token
out = 'repartition-merge-' + token
dsk = repartition_divisions(df.divisions, divisions,
df._name, tmp, out, force=force)
return new_dd_object(merge(df.dask, dsk), out,
df._meta, divisions)
elif isinstance(df, (pd.Series, pd.DataFrame)):
name = 'repartition-dataframe-' + token
from .utils import shard_df_on_index
dfs = shard_df_on_index(df, divisions[1:-1])
dsk = dict(((name, i), df) for i, df in enumerate(dfs))
return new_dd_object(dsk, name, df, divisions)
raise ValueError('Data must be DataFrame or Series')
def set_sorted_index(df, index, drop=True, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
return compute_divisions(result, **kwargs)
def compute_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
if (sorted(mins) != list(mins) or
sorted(maxes) != list(maxes) or
any(a > b for a, b in zip(mins, maxes))):
raise ValueError("Partitions must be sorted ascending with the index",
mins, maxes)
divisions = tuple(mins) + (list(maxes)[-1],)
df = copy(df)
df.divisions = divisions
return df
def _reduction_chunk(x, aca_chunk=None, **kwargs):
o = aca_chunk(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_combine(x, aca_combine=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
o = aca_combine(x, **kwargs)
# Return a dataframe so that the concatenated version is also a dataframe
return o.to_frame().T if isinstance(o, pd.Series) else o
def _reduction_aggregate(x, aca_aggregate=None, **kwargs):
if isinstance(x, list):
x = pd.Series(x)
return aca_aggregate(x, **kwargs)
def drop_columns(df, columns, dtype):
df = df.drop(columns, axis=1)
df.columns = df.columns.astype(dtype)
return df
def idxmaxmin_chunk(x, fn=None, skipna=True):
idx = getattr(x, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x, minmax)(skipna=skipna)
if isinstance(x, pd.DataFrame):
return pd.DataFrame({'idx': idx, 'value': value})
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_row(x, fn=None, skipna=True):
x = x.set_index('idx')
idx = getattr(x.value, fn)(skipna=skipna)
minmax = 'max' if fn == 'idxmax' else 'min'
value = getattr(x.value, minmax)(skipna=skipna)
return pd.DataFrame({'idx': [idx], 'value': [value]})
def idxmaxmin_combine(x, fn=None, skipna=True):
return (x.groupby(level=0)
.apply(idxmaxmin_row, fn=fn, skipna=skipna)
.reset_index(level=1, drop=True))
def idxmaxmin_agg(x, fn=None, skipna=True, scalar=False):
res = idxmaxmin_combine(x, fn, skipna=skipna)['idx']
if scalar:
return res[0]
res.name = None
return res
def safe_head(df, n):
r = df.head(n=n)
if len(r) != n:
msg = ("Insufficient elements for `head`. {0} elements "
"requested, only {1} elements available. Try passing larger "
"`npartitions` to `head`.")
warnings.warn(msg.format(n, len(r)))
return r
``` |
{
"source": "jorisvandenbossche/ICES-python-data",
"score": 3
} |
#### File: notebooks/data/load_casualties.py
```python
import argparse
import urllib
import logging
from tempfile import tempdir
from pathlib import Path
import pandas as pd
import numpy as np
def clean_casualties_data(casualties_raw):
"""Convert raw casualties data to english and restructured format"""
casualties = (
casualties_raw
.drop(columns=[col for col in casualties_raw.columns
if col.endswith("_FR")])
.drop(columns=[col for col in casualties_raw.columns
if col.startswith("CD_") and not col.endswith("_REFNIS")])
.rename(columns={name: name.removeprefix("TX_").removesuffix("_DESCR_NL")
for name in casualties_raw.columns})
.replace("Onbekend", None)
)
casualties["gender"] = casualties["SEX"].replace(
{"Vrouwelijk": "female", "Mannelijk": "male"}
)
casualties["DT_HOUR"] = casualties["DT_HOUR"].replace(99, 0)
casualties["datetime"] = pd.to_datetime(
casualties["DT_DAY"] + " " + casualties["DT_HOUR"].astype(str) + ":00"
)
casualties["age"] = casualties["AGE_CLS"].str.replace(
" tot ", " - ").str.removesuffix("jaar").str.strip()
casualties["age"] = casualties["age"].replace(
{"": None, "75 jaar en meer": ">75", ' ': None})
casualties["DAY_OF_WEEK"] = casualties["DAY_OF_WEEK"].replace({
"maandag": "Monday", "dinsdag": "Tuesday", "woensdag": "Wednesday",
"donderdag": "Thursday", "vrijdag": "Friday", "zaterdag": "Saturday",
"zondag": "Sunday"})
casualties["week_day"] = pd.Categorical(
casualties["DAY_OF_WEEK"],
categories=["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"],
ordered=True
)
casualties["victim_type"] = casualties["VICT_TYPE"].replace({
"Bestuurder": "Driver", "Bromfietser": "Moped driver",
"Passagier": "Passenger", "Motorfietser": 'Motorcyclist',
"Fietser": "Cyclist", "Voetganger": "Pedestrian",
"Autres victimes": None})
casualties["build_up_area"] = casualties["BUILD_UP_AREA"].replace({
"Binnen bebouwde kom": "Inside built-up area",
"Buiten bebouwde kom": "Outside built-up area",
" ": None})
casualties["ROAD_USR_TYPE"] = casualties["ROAD_USR_TYPE"].replace({
'Personenauto': 'Passenger car',
'Auto voor dubbel gebruik': 'Dual-purpose vehicle',
'Lichte vrachtauto': 'Light truck',
'Bromfiets': 'Moped',
'Bromfiets A (tweewielige)': 'Moped',
'Bromfiets B (tweewielige)': 'Moped',
'Bromfiets met 3 of 4 wielen': 'Moped',
'Motorfiets': 'Motorbike',
'Motorfiets meer dan 400 cc': 'Motorbike',
'Motorfiets niet meer dan 400 cc': 'Motorbike',
'Fiets': 'Bicycle',
'Elektrische fiets': 'Electric bicycle',
'Fiets met elektrische hulpmotor (<=250W en <=25km/u)': 'Electric bicycle',
'Gemotoriseerde fiets (<=1000W en <=25km/u)': 'Electric bicycle',
'Speed pedelec (<= 4000W en <=45km/u)': 'Speed pedelec',
'Gemotoriseerd voortbewegingstoestel (<=18km/u)': 'Electric bicycle',
'Trekker + aanhangwagen': 'Trailer',
'Trekker alleen': 'Trailer',
'Vrachtwagen': 'Truck',
'Ruiter': 'Horse rider',
'Bespannen voertuig': 'Horse rider',
'Andere voetganger': 'Pedestrian',
'Gehandicapte in rolstoel': 'Disabled person in a wheelchair',
'Voetganger die zijn (brom)fiets duwt': 'Pedestrian',
'Trolleybus, Tram': 'Tram',
'Minibus': 'Van',
'Autobus': 'Bus',
'Autocar': 'Bus',
'Autobus/Autocar': 'Bus',
'Kampeerwagen': 'Campervan',
'Landbouwtractor': 'Tractor',
'Andere weggebruiker': None,
'Niet ingevuld': None,
np.nan: None
})
casualties["LIGHT_COND"] = casualties["LIGHT_COND"].replace(
{'Bij klaarlichte dag': 'In broad daylight',
'Nacht, ontstoken openbare verlichting': 'Night, public lighting lit',
'Dageraad - schemering': 'Dawn',
'Nacht, openbare verlichting aanwezig, maar niet ontstoken': 'Night, no public lighting',
'Nacht, geen openbare verlichting': 'Night, no public lighting',
' ': None
})
casualties["ROAD_TYPE"] = casualties["ROAD_TYPE"].replace({
'Gemeenteweg': 'Municipal road',
'Gewestweg': 'Regional road',
'Autosnelweg': 'Motorway'
})
casualties["RGN"] = casualties["RGN"].replace({
'Vlaams Gewest': 'Flemish Region',
'Brussels Hoofdstedelijk Gewest': 'Brussels-Capital Region',
'Waals Gewest': 'Walloon Region'
})
casualties["CD_RGN_REFNIS"] = casualties["CD_RGN_REFNIS"].replace(
{'02000': 2000, '03000': 3000, '04000': 4000, ' ': None}
)
casualties = casualties.replace(" ", None)
casualties = casualties.rename(columns={
"MS_VICT": "n_victims",
"MS_VIC_OK": "n_victims_ok",
"MS_SLY_INJ": "n_slightly_injured",
"MS_SERLY_INJ": "n_seriously_injured",
"MS_DEAD_30_DAYS": "n_dead_30days",
"ROAD_USR_TYPE": "road_user_type",
"LIGHT_COND": "light_conditions",
"ROAD_TYPE": "road_type",
"RGN": "region",
"CD_RGN_REFNIS": "refnis_region",
"CD_MUNTY_REFNIS": "refnis_municipality",
"MUNTY": "municipality"
})
casualties_clean = casualties.drop(
columns=[
"DT_DAY", "DT_HOUR", "DAY_OF_WEEK", "SEX", "VICT_TYPE",
"BUILD_UP_AREA", "AGE_CLS", "CD_PROV_REFNIS", "PROV",
"CD_DSTR_REFNIS", "ADM_DSTR"]
)
return casualties_clean
def main(start_year=2005, end_year=2020,
processed_file_name="casualties.csv"):
"""Download casualties data, run cleaning function, concat and save as CSV
Parameters
----------
start_year : int, default 2005
Start year to download data from.
end_year : int, default 2021
End year to download data from.
processed_file_name : str
File name of the concatenated clean data set.
"""
download_folder = Path(tempdir) / "casualties"
download_folder.mkdir(exist_ok=True)
logger.info("Start processing causalties Belgium open data from {start_year} till {end_year}.")
casualties_all = []
for year in range(start_year, end_year+1):
logger.info(f"Handling year {year}")
file_name = download_folder / f"TF_ACCIDENTS_VICTIMS_{year}_.zip"
if not file_name.exists():
logger.info(f"Download year {year}.")
urllib.request.urlretrieve(
f"https://statbel.fgov.be/sites/default/files/files/opendata/Verkeersslachtoffers/TF_ACCIDENTS_VICTIMS_{year}.zip",
file_name)
casualties = pd.read_csv(file_name, compression='zip',
sep="|", low_memory=False)
try:
casualties_clean = clean_casualties_data(casualties)
casualties_all.append(casualties_clean)
except:
logger.error(f"Data processing of year {year} failed")
logger.info("All casualties raw data set donwloads ready.")
logger.info("Combining individual years to single DataFrame.")
casualties_all = pd.concat(casualties_all).sort_values("datetime")
if 'n_victims_ok' in casualties_all.columns:
casualties = casualties_all[["datetime", "week_day",
"n_victims", "n_victims_ok", "n_slightly_injured",
"n_seriously_injured", "n_dead_30days",
"road_user_type", "victim_type", "gender", "age",
"road_type", "build_up_area", "light_conditions",
"refnis_municipality", "municipality",
"refnis_region", "region"
]]
else:
casualties = casualties_all[["datetime", "week_day",
"n_victims", "n_slightly_injured",
"n_seriously_injured", "n_dead_30days",
"road_user_type", "victim_type", "gender", "age",
"road_type", "build_up_area", "light_conditions",
"refnis_municipality", "municipality",
"refnis_region", "region"
]]
logger.info("Writing combined casualties data file to disk.")
casualties.to_csv(Path("./data") / processed_file_name, index=False)
logger.info("Combined casualties data file ready.")
if __name__ == "__main__":
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description='Collect and prepare casualties open data Belgium.'
)
parser.add_argument('start_year', metavar='start-year', type=int, default=2015,
help='First year to download casualties data.')
parser.add_argument('end_year', metavar='end-year', type=int, default=20210,
help='Last year to download casualties data.')
args = parser.parse_args()
print("Start casualties data preparation...")
main(args.start_year, args.end_year)
print("...done!")
``` |
{
"source": "jorisvandenbossche/ircelsos",
"score": 2
} |
#### File: ircelsos/tests/test_interactive.py
```python
from __future__ import print_function, division
import unittest
import datetime
import pytest
import ircelsos
from ircelsos.query_ircelsos import query_ircelsos
from ircelsos.parser import get_observations, parse_observation
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
@pytest.mark.skipif(not HAS_PANDAS, reason='Skipping interactive tests because '
'pandas is not installed.')
class TestInteractiveQuery(unittest.TestCase):
@pytest.mark.network
def test_query(self):
df = ircelsos.query(pollutant='o3', station='BETN060',
utc_start='2015-03-27T00:00:00',
utc_end='2015-03-27T3:00:00')
expected = pd.DataFrame({'43N060':
{'2015-03-27T01:00:00.000+01:00': 48.0,
'2015-03-27T02:00:00.000+01:00': 51.0,
'2015-03-27T03:00:00.000+01:00': 52.0,
'2015-03-27T04:00:00.000+01:00': 47.5}})
expected.index.name = 'time'
assert_frame_equal(df, expected)
``` |
{
"source": "jorisvandenbossche/kartothek",
"score": 2
} |
#### File: kartothek/cli/_cleanup.py
```python
import click
from kartothek.io.dask.bag_cube import cleanup_cube_bag
__all__ = ("cleanup",)
@click.pass_context
def cleanup(ctx):
"""
Remove non-required files from store.
"""
cube = ctx.obj["cube"]
store = ctx.obj["store"]
cleanup_cube_bag(cube=cube, store=store).compute()
```
#### File: kartothek/cli/__init__.py
```python
import logging
from multiprocessing.pool import ThreadPool
import click
import dask
import pandas as pd
from dask.diagnostics import ProgressBar
from kartothek.cli._cleanup import cleanup
from kartothek.cli._copy import copy
from kartothek.cli._delete import delete
from kartothek.cli._index import index
from kartothek.cli._info import info
from kartothek.cli._query import query
from kartothek.cli._stats import stats
from kartothek.cli._utils import get_cube, get_store
__all__ = ("cli",)
@click.group(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option(
"--skv", default="skv.yml", help="Storefact config file.", show_default=True
)
@click.option("--store", default="dataset", help="Store to use.", show_default=True)
@click.option(
"--n_threads",
"-j",
default=0,
type=int,
help="Number of threads to use (use 0 for number of cores).",
show_default=True,
)
@click.option(
"--color",
type=click.Choice(["always", "auto", "off"]),
default="auto",
help="Whether to use colorized outputs or not. Use ``always``, ``auto`` (default), or ``off``.",
show_default=True,
)
@click.argument("cube")
@click.pass_context
def cli(ctx, store, cube, skv, n_threads, color):
"""
Execute certain operations on the given Kartothek cube.
If possible, the operations will be performed in parallel on the current machine.
"""
ctx.ensure_object(dict)
store_obj = get_store(skv, store)
cube, datasets = get_cube(store_obj, cube)
dask.config.set(scheduler="threads")
if n_threads > 0:
dask.config.set(pool=ThreadPool(n_threads))
if color == "always":
ctx.color = True
elif color == "off":
ctx.color = False
pbar = ProgressBar()
pbar.register()
ctx.call_on_close(pbar.unregister)
# silence extremely verbose azure logging
azure_logger = logging.getLogger("azure.storage.common.storageclient")
azure_logger.setLevel(logging.FATAL)
# pandas perf tuning
chained_assignment_old = pd.options.mode.chained_assignment
def reset_pd():
pd.options.mode.chained_assignment = chained_assignment_old
ctx.call_on_close(reset_pd)
pd.options.mode.chained_assignment = None
ctx.obj["skv"] = skv
ctx.obj["store"] = store_obj
ctx.obj["store_name"] = store
ctx.obj["cube"] = cube
ctx.obj["datasets"] = datasets
ctx.obj["pbar"] = pbar
cli.command()(cleanup)
cli.command()(copy)
cli.command()(delete)
cli.command()(index)
cli.command()(info)
cli.command()(query)
cli.command()(stats)
if __name__ == "__main__":
cli()
```
#### File: kartothek/cli/_utils.py
```python
import fnmatch
from functools import partial
import click
import storefact
import yaml
from kartothek.api.discover import discover_cube
__all__ = ("filter_items", "get_cube", "get_store", "to_bold", "to_header")
def get_cube(store, uuid_prefix):
"""
Get cube from store.
Parameters
----------
uuid_prefix: str
Dataset UUID prefix.
store: Union[Callable[[], simplekv.KeyValueStore], simplekv.KeyValueStore]
KV store.
Returns
-------
cube: Cube
Cube specification.
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
All discovered datasets.
Raises
------
click.UsageError
In case cube was not found.
"""
try:
return discover_cube(uuid_prefix, store)
except ValueError as e:
raise click.UsageError("Could not load cube: {e}".format(e=e))
def get_store(skv, store):
"""
Get simplekv store from storefact config file.
Parameters
----------
skv: str
Name of the storefact yaml. Normally ``'skv.yml'``.
store: str
ID of the store.
Returns
-------
store_factory: Callable[[], simplekv.KeyValueStore]
Store object.
Raises
------
click.UsageError
In case something went wrong.
"""
try:
with open(skv, "rb") as fp:
store_cfg = yaml.safe_load(fp)
except IOError as e:
raise click.UsageError("Could not open load store YAML: {e}".format(e=e))
except yaml.YAMLError as e:
raise click.UsageError("Could not parse provided YAML file: {e}".format(e=e))
if store not in store_cfg:
raise click.UsageError(
"Could not find store {store} in {skv}".format(store=store, skv=skv)
)
return partial(storefact.get_store, **store_cfg[store])
def _match_pattern(what, items, pattern):
"""
Match given pattern against given items.
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
"""
result = set()
for part in pattern.split(","):
found = set(fnmatch.filter(items, part.strip()))
if not found:
raise click.UsageError(
"Could not find {what} {part}".format(what=what, part=part)
)
result |= found
return result
def filter_items(what, items, include_pattern=None, exclude_pattern=None):
"""
Filter given string items based on include and exclude patterns
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
exclude_pattern: str
Comma separated items which should be excluded. Can contain glob patterns.
Returns
-------
filtered_datasets: Set[str]
Filtered set of items after applying include and exclude patterns
"""
items = set(items)
if include_pattern is not None:
include_datasets = _match_pattern(what, items, include_pattern)
else:
include_datasets = items
if exclude_pattern is not None:
exclude_datasets = _match_pattern(what, items, exclude_pattern)
else:
exclude_datasets = set()
return include_datasets - exclude_datasets
def to_header(s):
"""
Create header.
Parameters
----------
s: str
Header content.
Returns
-------
s: str
Header content including terminal escpae sequences.
"""
return click.style(s, bold=True, underline=True, fg="yellow")
def to_bold(s):
"""
Create bold text.
Parameters
----------
s: str
Bold text content.
Returns
-------
s: str
Given text including terminal escpae sequences.
"""
return click.style(s, bold=True)
```
#### File: io_components/cube/remove.py
```python
from functools import reduce
from kartothek.core.cube.conditions import Conjunction
from kartothek.core.cube.constants import KTK_CUBE_METADATA_VERSION
from kartothek.io_components.metapartition import MetaPartition
from kartothek.utils.converters import converter_str_set_optional
from kartothek.utils.ktk_adapters import get_partition_dataframe
__all__ = ("prepare_metapartitions_for_removal_action",)
def prepare_metapartitions_for_removal_action(
cube, store, conditions, ktk_cube_dataset_ids, existing_datasets
):
"""
Prepare MetaPartition to express removal of given data range from cube.
The MetaPartition must still be written using ``mp.store_dataframes(...)`` and added to the Dataset using a
kartothek update method.
Parameters
----------
cube: kartothek.core.cube.cube.Cube
Cube spec.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Store.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Conditions that should be applied, optional. Defaults to "entire cube".
ktk_cube_dataset_ids: Optional[Union[Iterable[Union[Str, Bytes]], Union[Str, Bytes]]]
Ktk_cube dataset IDs to apply the remove action to, optional. Default to "all".
existing_datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Existing datasets.
Returns
-------
metapartitions: Dict[str, Tuple[kartothek.core.dataset.DatasetMetadata,
kartothek.io_components.metapartition.MetaPartition, List[Dict[str, Any]]]]
MetaPartitions that should be written and updatet to the kartothek datasets as well as the ``delete_scope`` for
kartothek.
"""
conditions = Conjunction(conditions)
conditions_split = conditions.split_by_column()
if set(conditions_split.keys()) - set(cube.partition_columns):
raise ValueError(
"Can only remove partitions with conditions concerning cubes physical partition columns."
)
ktk_cube_dataset_ids = converter_str_set_optional(ktk_cube_dataset_ids)
if ktk_cube_dataset_ids is not None:
unknown_dataset_ids = ktk_cube_dataset_ids - set(existing_datasets.keys())
if unknown_dataset_ids:
raise ValueError(
"Unknown ktk_cube_dataset_ids: {}".format(
", ".join(sorted(unknown_dataset_ids))
)
)
else:
ktk_cube_dataset_ids = set(existing_datasets.keys())
metapartitions = {}
for ktk_cube_dataset_id in ktk_cube_dataset_ids:
ds = existing_datasets[ktk_cube_dataset_id]
ds = ds.load_partition_indices()
mp = _prepare_mp_empty(ds)
if not ds.partition_keys:
# no partition keys --> delete all
delete_scope = [{}]
else:
df_partitions = get_partition_dataframe(dataset=ds, cube=cube)
df_partitions = df_partitions.drop_duplicates()
local_condition = reduce(
lambda a, b: a & b,
(
cond
for col, cond in conditions_split.items()
if col in df_partitions.columns
),
Conjunction([]),
)
df_partitions = local_condition.filter_df(df_partitions)
delete_scope = df_partitions.to_dict(orient="records")
metapartitions[ktk_cube_dataset_id] = (ds, mp, delete_scope)
return metapartitions
def _prepare_mp_empty(dataset):
"""
Generate empty partition w/o any data for given cube.
Parameters
----------
dataset: kartothek.core.dataset.DatasetMetadata
Dataset to build empty MetaPartition for.
Returns
-------
mp: kartothek.io_components.metapartition.MetaPartition
MetaPartition, must still be added to the Dataset using a kartothek update method.
"""
return MetaPartition(
label=None,
metadata_version=KTK_CUBE_METADATA_VERSION,
partition_keys=dataset.partition_keys,
)
```
#### File: kartothek/io_components/read.py
```python
import warnings
from typing import Callable, Iterator, List, Optional, Set, Union, cast, overload
import pandas as pd
from kartothek.core.factory import DatasetFactory
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.typing import StoreInput
from kartothek.io_components.metapartition import MetaPartition
from kartothek.io_components.utils import normalize_args
from kartothek.serialization import (
PredicatesType,
check_predicates,
columns_in_predicates,
)
@overload
def dispatch_metapartitions_from_factory(
dataset_factory: DatasetFactory,
label_filter: Optional[Callable] = None,
concat_partitions_on_primary_index: bool = False,
predicates: PredicatesType = None,
store: Optional[StoreInput] = None,
dispatch_by: None = None,
dispatch_metadata: bool = False,
) -> Iterator[MetaPartition]:
...
@overload
def dispatch_metapartitions_from_factory(
dataset_factory: DatasetFactory,
label_filter: Optional[Callable],
concat_partitions_on_primary_index: bool,
predicates: PredicatesType,
store: Optional[StoreInput],
dispatch_by: List[str],
dispatch_metadata: bool,
) -> Iterator[List[MetaPartition]]:
...
@normalize_args
def dispatch_metapartitions_from_factory(
dataset_factory: DatasetFactory,
label_filter: Optional[Callable] = None,
concat_partitions_on_primary_index: bool = False,
predicates: PredicatesType = None,
store: Optional[StoreInput] = None,
dispatch_by: Optional[List[str]] = None,
dispatch_metadata: bool = False,
) -> Union[Iterator[MetaPartition], Iterator[List[MetaPartition]]]:
if dispatch_metadata:
warnings.warn(
"The dispatch of metadata and index information as part of the MetaPartition instance is deprecated. "
"The future behaviour will be that this metadata is not dispatched. To set the future behaviour, "
"specifiy ``dispatch_metadata=False``",
DeprecationWarning,
)
if dispatch_by and concat_partitions_on_primary_index:
raise ValueError(
"Both `dispatch_by` and `concat_partitions_on_primary_index` are provided, "
"`concat_partitions_on_primary_index` is deprecated and will be removed in the next major release. "
"Please only provide the `dispatch_by` argument. "
)
if concat_partitions_on_primary_index:
warnings.warn(
"The keyword `concat_partitions_on_primary_index` is deprecated and will be removed in the next major release. Use `dispatch_by=dataset_factory.partition_keys` to achieve the same behavior instead.",
DeprecationWarning,
)
dispatch_by = dataset_factory.partition_keys
if dispatch_by and not set(dispatch_by).issubset(
set(dataset_factory.index_columns)
):
raise RuntimeError(
f"Dispatch columns must be indexed.\nRequested index: {dispatch_by} but available index columns: {sorted(dataset_factory.index_columns)}"
)
check_predicates(predicates)
# Determine which indices need to be loaded.
index_cols: Set[str] = set()
if dispatch_by:
index_cols |= set(dispatch_by)
if predicates:
predicate_cols = set(columns_in_predicates(predicates))
predicate_index_cols = predicate_cols & set(dataset_factory.index_columns)
index_cols |= predicate_index_cols
for col in index_cols:
dataset_factory.load_index(col)
base_df = dataset_factory.get_indices_as_dataframe(
list(index_cols), predicates=predicates
)
if label_filter:
base_df = base_df[base_df.index.map(label_filter)]
indices_to_dispatch = {
name: ix.unload()
for name, ix in dataset_factory.indices.items()
if isinstance(ix, ExplicitSecondaryIndex)
}
if dispatch_by:
base_df = cast(pd.DataFrame, base_df)
# Group the resulting MetaParitions by partition keys or a subset of those keys
merged_partitions = base_df.groupby(
by=list(dispatch_by), sort=True, as_index=False
)
for group_name, group in merged_partitions:
if not isinstance(group_name, tuple):
group_name = (group_name,)
mps = []
logical_conjunction = list(
zip(dispatch_by, ["=="] * len(dispatch_by), group_name)
)
for label in group.index.unique():
mps.append(
MetaPartition.from_partition(
partition=dataset_factory.partitions[label],
dataset_metadata=dataset_factory.metadata
if dispatch_metadata
else None,
indices=indices_to_dispatch if dispatch_metadata else None,
metadata_version=dataset_factory.metadata_version,
table_meta=dataset_factory.table_meta,
partition_keys=dataset_factory.partition_keys,
logical_conjunction=logical_conjunction,
)
)
yield mps
else:
for part_label in base_df.index.unique():
part = dataset_factory.partitions[part_label]
yield MetaPartition.from_partition(
partition=part,
dataset_metadata=dataset_factory.metadata
if dispatch_metadata
else None,
indices=indices_to_dispatch if dispatch_metadata else None,
metadata_version=dataset_factory.metadata_version,
table_meta=dataset_factory.table_meta,
partition_keys=dataset_factory.partition_keys,
)
def dispatch_metapartitions(
dataset_uuid: str,
store: StoreInput,
load_dataset_metadata: bool = True,
keep_indices: bool = True,
keep_table_meta: bool = True,
label_filter: Optional[Callable] = None,
concat_partitions_on_primary_index: bool = False,
predicates: PredicatesType = None,
dispatch_by: Optional[List[str]] = None,
dispatch_metadata: bool = False,
) -> Union[Iterator[MetaPartition], Iterator[List[MetaPartition]]]:
dataset_factory = DatasetFactory(
dataset_uuid=dataset_uuid,
store_factory=store,
load_schema=True,
load_all_indices=False,
load_dataset_metadata=load_dataset_metadata,
)
return dispatch_metapartitions_from_factory(
dataset_factory=dataset_factory,
store=None,
label_filter=label_filter,
predicates=predicates,
dispatch_by=dispatch_by,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
dispatch_metadata=dispatch_metadata,
)
```
#### File: kartothek/io/eager_cube.py
```python
from collections import defaultdict
import pandas as pd
from kartothek.api.consistency import get_cube_payload
from kartothek.api.discover import discover_datasets, discover_datasets_unchecked
from kartothek.core.cube.constants import (
KTK_CUBE_DF_SERIALIZER,
KTK_CUBE_METADATA_STORAGE_FORMAT,
KTK_CUBE_METADATA_VERSION,
)
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.cube.append import check_existing_datasets
from kartothek.io_components.cube.cleanup import get_keys_to_clean
from kartothek.io_components.cube.common import assert_stores_different
from kartothek.io_components.cube.copy import get_copy_keys
from kartothek.io_components.cube.query import load_group, plan_query, quick_concat
from kartothek.io_components.cube.remove import (
prepare_metapartitions_for_removal_action,
)
from kartothek.io_components.cube.stats import (
collect_stats_block,
get_metapartitions_for_stats,
reduce_stats,
)
from kartothek.io_components.cube.write import (
MultiTableCommitAborted,
apply_postwrite_checks,
check_datasets_prebuild,
check_datasets_preextend,
check_provided_metadata_dict,
multiplex_user_input,
prepare_data_for_ktk,
prepare_ktk_metadata,
prepare_ktk_partition_on,
)
from kartothek.io_components.update import update_dataset_from_partitions
from kartothek.utils.ktk_adapters import get_dataset_keys, metadata_factory_from_dataset
from kartothek.utils.pandas import concat_dataframes
from kartothek.utils.store import copy_keys
__all__ = (
"append_to_cube",
"build_cube",
"cleanup_cube",
"collect_stats",
"copy_cube",
"delete_cube",
"extend_cube",
"query_cube",
"remove_partitions",
)
def build_cube(data, cube, store, metadata=None, overwrite=False, partition_on=None):
"""
Store given dataframes as Ktk_cube cube.
``data`` can be formatted in multiple ways:
- single DataFrame::
pd.DataFrame({
'x': [0, 1, 2, 3],
'p': [0, 0, 1, 1],
'v': [42, 45, 20, 10],
})
In that case, the seed dataset will be written.
- dictionary of DataFrames::
{
'seed': pd.DataFrame({
'x': [0, 1, 2, 3],
'p': [0, 0, 1, 1],
'v1': [42, 45, 20, 10],
}),
'enrich': pd.DataFrame({
'x': [0, 1, 2, 3],
'p': [0, 0, 1, 1],
'v2': [False, False, True, False],
}),
}
In that case, multiple datasets can be written at the same time. Note that the seed dataset MUST be included.
- list of anything above::
[
# seed data only
pd.DataFrame({
'x': [0, 1, 2, 3],
'p': [0, 0, 1, 1],
'v1': [42, 45, 20, 10],
}),
# seed data only, explicit way
{
'seed': pd.DataFrame({
'x': [4, 5, 6, 7],
'p': [0, 0, 1, 1],
'v1': [12, 32, 22, 9],
}),
},
# multiple datasets
{
'seed': pd.DataFrame({
'x': [8, 9, 10, 11],
'p': [0, 0, 1, 1],
'v1': [9, 2, 4, 11],
}),
'enrich': pd.DataFrame({
'x': [8, 9, 10, 11],
'p': [0, 0, 1, 1],
'v2': [True, True, False, False],
}),
},
# non-seed data only
{
'enrich': pd.DataFrame({
'x': [1, 2, 3, 4],
'p': [0, 0, 1, 1],
'v2': [False, True, False, False],
}),
},
]
In that case, multiple datasets may be written. Note that at least a single list element must contain seed data.
Extra metdata may be preserved w/ every dataset, e.g.::
{
'seed': {
'source': 'db',
'host': 'db1.cluster20.company.net',
'last_event': '230c6edb-b69a-4d30-b56d-28f5dfe20948',
},
'enrich': {
'source': 'python',
'commit_hash': '8b5d717518439921e6d17c7495956bdad687bc54',
},
}
Note that the given data must be JSON-serializable.
If the cube already exists, the ``overwrite`` flag must be given. In that case, all datasets that are part of the
existing cube must be overwritten. Partial overwrites are not allowed.
Parameters
----------
data: Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]]]
Data that should be written to the cube. If only a single dataframe is given, it is assumed to be the seed
dataset.
cube: kartothek.core.cube.cube.Cube
Cube specification.
store: simplekv.KeyValueStore
Store to which the data should be written to.
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every dataset.
overwrite: bool
If possibly existing datasets should be overwritten.
partition_on: Optional[Dict[str, Iterable[str]]]
Optional parition-on attributes for datasets (dictionary mapping :term:`Dataset ID` -> columns).
See :ref:`Dimensionality and Partitioning Details` for details.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
DatasetMetadata for every dataset written.
"""
data = _normalize_user_input(data, cube)
ktk_cube_dataset_ids = set(data.keys())
partition_on = prepare_ktk_partition_on(cube, ktk_cube_dataset_ids, partition_on)
metadata = check_provided_metadata_dict(metadata, ktk_cube_dataset_ids)
existing_datasets = discover_datasets_unchecked(cube.uuid_prefix, store)
check_datasets_prebuild(data, cube, existing_datasets)
# do all data preparation before writing anything
data = _prepare_data_for_ktk_all(
data=data, cube=cube, existing_payload=set(), partition_on=partition_on
)
datasets = {}
for ktk_cube_dataset_id, part in data.items():
datasets[ktk_cube_dataset_id] = store_dataframes_as_dataset(
store=store,
dataset_uuid=cube.ktk_dataset_uuid(ktk_cube_dataset_id),
dfs=part,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
partition_on=list(partition_on[ktk_cube_dataset_id]),
metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT,
metadata_version=KTK_CUBE_METADATA_VERSION,
df_serializer=KTK_CUBE_DF_SERIALIZER,
overwrite=overwrite,
)
return apply_postwrite_checks(
datasets=datasets, cube=cube, store=store, existing_datasets=existing_datasets
)
def extend_cube(data, cube, store, metadata=None, overwrite=False, partition_on=None):
"""
Store given dataframes into an existing Kartothek cube.
For details on ``data`` and ``metadata``, see :meth:`build_cube`.
Parameters
----------
data: Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]]]
Data that should be written to the cube. If only a single dataframe is given, it is assumed to be the seed
dataset.
cube: kartothek.core.cube.cube.Cube
Cube specification.
store: simplekv.KeyValueStore
Store to which the data should be written to.
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every dataset.
overwrite: bool
If possibly existing datasets should be overwritten.
partition_on: Optional[Dict[str, Iterable[str]]]
Optional parition-on attributes for datasets (dictionary mapping :term:`Dataset ID` -> columns).
See :ref:`Dimensionality and Partitioning Details` for details.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
DatasetMetadata for every dataset written.
"""
data = _normalize_user_input(data, cube)
ktk_cube_dataset_ids = set(data.keys())
partition_on = prepare_ktk_partition_on(cube, ktk_cube_dataset_ids, partition_on)
metadata = check_provided_metadata_dict(metadata, ktk_cube_dataset_ids)
check_datasets_preextend(data, cube)
existing_datasets = discover_datasets(cube, store)
if overwrite:
existing_datasets_cut = {
ktk_cube_dataset_id: ds
for ktk_cube_dataset_id, ds in existing_datasets.items()
if ktk_cube_dataset_id not in data
}
else:
existing_datasets_cut = existing_datasets
existing_payload = get_cube_payload(existing_datasets_cut, cube)
# do all data preparation before writing anything
data = _prepare_data_for_ktk_all(
data=data,
cube=cube,
existing_payload=existing_payload,
partition_on=partition_on,
)
datasets = {}
for ktk_cube_dataset_id, part in data.items():
datasets[ktk_cube_dataset_id] = store_dataframes_as_dataset(
store=store,
dataset_uuid=cube.ktk_dataset_uuid(ktk_cube_dataset_id),
dfs=part,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
partition_on=list(partition_on[ktk_cube_dataset_id]),
metadata_storage_format=KTK_CUBE_METADATA_STORAGE_FORMAT,
metadata_version=KTK_CUBE_METADATA_VERSION,
df_serializer=KTK_CUBE_DF_SERIALIZER,
overwrite=overwrite,
)
return apply_postwrite_checks(
datasets=datasets, cube=cube, store=store, existing_datasets=existing_datasets
)
def query_cube(
cube,
store,
conditions=None,
datasets=None,
dimension_columns=None,
partition_by=None,
payload_columns=None,
):
"""
Query cube.
.. note::
In case of ``partition_by=None`` (default case), only a single partition is generated. If this one will be
empty (e.g. due to the provided conditions), an empty list will be returned, and a single-element list
otherwise.
Parameters
----------
cube: Cube
Cube specification.
store: simplekv.KeyValueStore
KV store that preserves the cube.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Conditions that should be applied, optional.
datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
Datasets to query, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list
of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used).
dimension_columns: Union[None, str, Iterable[str]]
Dimension columns of the query, may result in projection. If not provided, dimension columns from cube
specification will be used.
partition_by: Union[None, str, Iterable[str]]
By which column logical partitions should be formed. If not provided, a single partition will be generated.
payload_columns: Union[None, str, Iterable[str]]
Which columns apart from ``dimension_columns`` and ``partition_by`` should be returned.
Returns
-------
dfs: List[pandas.DataFrame]
List of non-empty DataFrames, order by ``partition_by``. Column of DataFrames is alphabetically ordered. Data
types are provided on best effort (they are restored based on the preserved data, but may be different due to
Pandas NULL-handling, e.g. integer columns may be floats).
"""
intention, _empty, groups = plan_query(
cube=cube,
store=store,
conditions=conditions,
datasets=datasets,
dimension_columns=dimension_columns,
partition_by=partition_by,
payload_columns=payload_columns,
)
dfs = [load_group(group=g, store=store, cube=cube) for g in groups]
dfs = [df for df in dfs if not df.empty]
if not intention.partition_by and (len(dfs) > 0):
dfs = [
quick_concat(
dfs=dfs,
dimension_columns=intention.dimension_columns,
partition_columns=cube.partition_columns,
)
]
return dfs
def delete_cube(cube, store, datasets=None):
"""
Delete cube from store.
.. important::
This routine only deletes tracked files. Garbage and leftovers from old cubes and failed operations are NOT
removed.
Parameters
----------
cube: Cube
Cube specification.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
KV store.
datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
Datasets to delete, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list
of Ktk_cube dataset ID or ``None`` (in which case entire cube will be deleted).
"""
if callable(store):
store = store()
if not isinstance(datasets, dict):
datasets = discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=store,
filter_ktk_cube_dataset_ids=datasets,
)
keys = set()
for ktk_cube_dataset_id in sorted(datasets.keys()):
ds = datasets[ktk_cube_dataset_id]
keys |= get_dataset_keys(ds)
for k in sorted(keys):
store.delete(k)
def copy_cube(cube, src_store, tgt_store, overwrite=False, datasets=None):
"""
Copy cube from one store to another.
Parameters
----------
cube: Cube
Cube specification.
src_store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Source KV store.
tgt_store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Target KV store.
overwrite: bool
If possibly existing datasets in the target store should be overwritten.
datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
Datasets to copy, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list
of Ktk_cube dataset ID or ``None`` (in which case entire cube will be copied).
"""
if callable(src_store):
src_store = src_store()
if callable(tgt_store):
tgt_store = tgt_store()
assert_stores_different(
src_store, tgt_store, cube.ktk_dataset_uuid(cube.seed_dataset)
)
keys = get_copy_keys(
cube=cube,
src_store=src_store,
tgt_store=tgt_store,
overwrite=overwrite,
datasets=datasets,
)
copy_keys(keys, src_store, tgt_store)
def collect_stats(cube, store, datasets=None):
"""
Collect statistics for given cube.
Parameters
----------
cube: Cube
Cube specification.
store: simplekv.KeyValueStore
KV store that preserves the cube.
datasets: Union[None, Iterable[str], Dict[str, kartothek.core.dataset.DatasetMetadata]]
Datasets to query, must all be part of the cube. May be either the result of :meth:`discover_datasets`, a list
of Ktk_cube dataset ID or ``None`` (in which case auto-discovery will be used).
Returns
-------
stats: Dict[str, Dict[str, int]]
Statistics per ktk_cube dataset ID.
"""
if callable(store):
store = store()
if not isinstance(datasets, dict):
datasets = discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=store,
filter_ktk_cube_dataset_ids=datasets,
)
all_metapartitions = get_metapartitions_for_stats(datasets)
return reduce_stats([collect_stats_block(all_metapartitions, store)])
def cleanup_cube(cube, store):
"""
Remove unused keys from cube datasets.
.. important::
All untracked keys which start with the cube's `uuid_prefix` followed by the `KTK_CUBE_UUID_SEPERATOR`
(e.g. `my_cube_uuid++seed...`) will be deleted by this routine. These keys may be leftovers from past
overwrites or index updates.
Parameters
----------
cube: Cube
Cube specification.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
KV store.
"""
if callable(store):
store = store()
datasets = discover_datasets_unchecked(uuid_prefix=cube.uuid_prefix, store=store)
keys = get_keys_to_clean(cube.uuid_prefix, datasets, store)
for k in sorted(keys):
store.delete(k)
def remove_partitions(
cube, store, conditions=None, ktk_cube_dataset_ids=None, metadata=None
):
"""
Remove given partition range from cube using a transaction.
Remove the partitions selected by ``conditions``. If no ``conditions`` are given,
remove all partitions. For each considered dataset, only the subset of
``conditions`` that refers to the partition columns of the respective dataset
is used. In particular, a dataset that is not partitioned at all is always considered
selected by ``conditions``.
Parameters
----------
cube: kartothek.core.cube.cube.Cube
Cube spec.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Store.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Select the partitions to be removed. Must be a condition only on partition columns.
ktk_cube_dataset_ids: Optional[Union[Iterable[Union[Str, Bytes]], Union[Str, Bytes]]]
Ktk_cube dataset IDs to apply the remove action to, optional. Default to "all".
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every the datasets, optional. Only given keys are updated/replaced. Deletion of
metadata keys is not possible.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Datasets, updated.
"""
if callable(store):
store_instance = store()
store_factory = store
else:
store_instance = store
def store_factory():
return store
existing_datasets = discover_datasets(cube, store)
for (
ktk_cube_dataset_id,
(ds, mp, delete_scope),
) in prepare_metapartitions_for_removal_action(
cube=cube,
store=store_instance,
conditions=conditions,
ktk_cube_dataset_ids=ktk_cube_dataset_ids,
existing_datasets=existing_datasets,
).items():
mp = mp.store_dataframes(
store=store_instance,
dataset_uuid=ds.uuid,
df_serializer=KTK_CUBE_DF_SERIALIZER,
)
ds_factory = metadata_factory_from_dataset(
ds, with_schema=True, store=store_factory
)
existing_datasets[ktk_cube_dataset_id] = update_dataset_from_partitions(
mp,
store_factory=store_factory,
dataset_uuid=ds.uuid,
ds_factory=ds_factory,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
metadata_merger=None,
delete_scope=delete_scope,
)
return existing_datasets
def append_to_cube(data, cube, store, metadata=None):
"""
Append data to existing cube.
For details on ``data`` and ``metadata``, see :meth:`build_cube`.
.. important::
Physical partitions must be updated as a whole. If only single rows within a physical partition are updated, the
old data is treated as "removed".
.. hint::
To have better control over the overwrite "mask" (i.e. which partitions are overwritten), you should use
:meth:`remove_partitions` beforehand.
Parameters
----------
data: Union[pd.DataFrame, Dict[str, pd.DataFrame], List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]]]
Data that should be written to the cube. If only a single dataframe is given, it is assumed to be the seed
dataset.
cube: kartothek.core.cube.cube.Cube
Cube specification.
store: simplekv.KeyValueStore
Store to which the data should be written to.
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every dataset, optional. For every dataset, only given keys are updated/replaced. Deletion of
metadata keys is not possible.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
DatasetMetadata for every dataset written.
"""
data = _normalize_user_input(data, cube)
existing_datasets = discover_datasets(cube, store)
partition_on = {k: v.partition_keys for k, v in existing_datasets.items()}
check_existing_datasets(
existing_datasets=existing_datasets, ktk_cube_dataset_ids=set(data.keys())
)
# do all data preparation before writing anything
# existing_payload is set to empty because we're not checking against any existing payload. ktk will account for the
# compat check within 1 dataset
data = _prepare_data_for_ktk_all(
data=data, cube=cube, existing_payload=set(), partition_on=partition_on
)
# update_dataset_from_dataframes requires a store factory, so create one
# if not provided
if not callable(store):
def store_factory():
return store
else:
store_factory = store
updated_datasets = {}
for ktk_cube_dataset_id, part in data.items():
updated_datasets[ktk_cube_dataset_id] = update_dataset_from_dataframes(
store=store_factory,
dataset_uuid=cube.ktk_dataset_uuid(ktk_cube_dataset_id),
df_list=part,
partition_on=list(partition_on[ktk_cube_dataset_id]),
df_serializer=KTK_CUBE_DF_SERIALIZER,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
)
return apply_postwrite_checks(
datasets=updated_datasets,
cube=cube,
store=store,
existing_datasets=existing_datasets,
)
def _normalize_user_input(data, cube):
if isinstance(data, (dict, pd.DataFrame)):
data = [data]
else:
data = list(data)
data_lists = defaultdict(list)
for part in data:
part = multiplex_user_input(part, cube)
for k, v in part.items():
data_lists[k].append(v)
return {
k: concat_dataframes([df for df in v if df is not None])
for k, v in data_lists.items()
}
def _prepare_data_for_ktk_all(data, cube, existing_payload, partition_on):
data = {
ktk_cube_dataset_id: prepare_data_for_ktk(
df=df,
ktk_cube_dataset_id=ktk_cube_dataset_id,
cube=cube,
existing_payload=existing_payload,
partition_on=partition_on[ktk_cube_dataset_id],
)
for ktk_cube_dataset_id, df in data.items()
}
empty_datasets = {
ktk_cube_dataset_id
for ktk_cube_dataset_id, part in data.items()
if part.is_sentinel
}
if empty_datasets:
cause = ValueError(
"Cannot write empty datasets: {empty_datasets}".format(
empty_datasets=", ".join(sorted(empty_datasets))
)
)
exc = MultiTableCommitAborted("Aborting commit.")
exc.__cause__ = cause
raise exc
return data
```
#### File: io/testing/append_cube.py
```python
import pandas as pd
import pytest
from kartothek.core.cube.constants import (
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
)
from kartothek.core.cube.cube import Cube
from kartothek.core.dataset import DatasetMetadata
from kartothek.io.eager_cube import build_cube
__all__ = (
"existing_cube",
"test_append_partitions",
"test_append_partitions_no_ts",
"test_fails_incompatible_dtypes",
"test_fails_missing_column",
"test_fails_unknown_dataset",
"test_indices",
"test_metadata",
)
@pytest.fixture
def existing_cube(function_store):
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [10, 11, 12, 13],
}
)
df_enrich = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v2": [10, 11, 12, 13],
"i2": [10, 11, 12, 13],
}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={"source": df_source, "enrich": df_enrich},
cube=cube,
store=function_store,
metadata={"source": {"a": 10, "b": 11}, "enrich": {"a": 20, "b": 21}},
)
return cube
def test_append_partitions(driver, function_store, existing_cube):
partitions_source_1 = set(
DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("source"), function_store()
).partitions.keys()
)
partitions_enrich_1 = set(
DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("enrich"), function_store()
).partitions.keys()
)
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [20, 21, 22, 23],
"i1": [20, 21, 22, 23],
}
)
result = driver(
data={"source": df_source}, cube=existing_cube, store=function_store
)
assert set(result.keys()) == {"source"}
ds_source = result["source"]
ds_enrich = DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("enrich"), function_store()
)
partitions_source_2 = set(ds_source.partitions.keys())
partitions_enrich_2 = set(ds_enrich.partitions.keys())
assert len(partitions_source_2) > len(partitions_source_1)
assert partitions_source_1.issubset(partitions_source_2)
assert partitions_enrich_2 == partitions_enrich_1
def test_append_partitions_no_ts(driver, function_store):
df_source1 = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [10, 11, 12, 13],
}
)
df_enrich1 = pd.DataFrame(
{"x": [0, 1, 2, 3], "v2": [10, 11, 12, 13], "i2": [10, 11, 12, 13]}
)
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
index_columns=["i1", "i2", "i3"],
)
build_cube(
data={"source": df_source1, "enrich": df_enrich1},
cube=cube,
store=function_store,
metadata={"source": {"a": 10, "b": 11}, "enrich": {"a": 20, "b": 21}},
partition_on={"enrich": []},
)
partitions_source_1 = set(
DatasetMetadata.load_from_store(
cube.ktk_dataset_uuid("source"), function_store()
).partitions.keys()
)
partitions_enrich_1 = set(
DatasetMetadata.load_from_store(
cube.ktk_dataset_uuid("enrich"), function_store()
).partitions.keys()
)
df_source2 = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [20, 21, 22, 23],
"i1": [20, 21, 22, 23],
}
)
df_enrich2 = pd.DataFrame(
{"x": [0, 1, 2, 3], "v2": [20, 21, 22, 23], "i2": [20, 21, 22, 23]}
)
result = driver(
data={"source": df_source2, "enrich": df_enrich2},
cube=cube,
store=function_store,
)
assert set(result.keys()) == {"source", "enrich"}
ds_source = result["source"]
ds_enrich = result["enrich"]
partitions_source_2 = set(ds_source.partitions.keys())
partitions_enrich_2 = set(ds_enrich.partitions.keys())
assert len(partitions_source_2) > len(partitions_source_1)
assert partitions_source_1.issubset(partitions_source_2)
assert len(partitions_enrich_2) > len(partitions_enrich_1)
assert partitions_enrich_1.issubset(partitions_enrich_2)
def test_indices(driver, function_store, existing_cube):
idx1_1 = set(
DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("source"), function_store()
)
.load_all_indices(function_store())
.indices["i1"]
.index_dct.keys()
)
idx2_1 = set(
DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("enrich"), function_store()
)
.load_all_indices(function_store())
.indices["i2"]
.index_dct.keys()
)
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [20, 21, 22, 23],
"i1": [20, 21, 22, 23],
}
)
result = driver(
data={"source": df_source}, cube=existing_cube, store=function_store
)
assert set(result.keys()) == {"source"}
ds_source = result["source"]
ds_enrich = DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("enrich"), function_store()
)
idx1_2 = set(
ds_source.load_all_indices(function_store()).indices["i1"].index_dct.keys()
)
idx2_2 = set(
ds_enrich.load_all_indices(function_store()).indices["i2"].index_dct.keys()
)
assert idx1_1.issubset(idx1_2)
assert len(idx1_1) < len(idx1_2)
assert idx2_1 == idx2_2
def test_fails_incompatible_dtypes(driver, function_store, existing_cube):
"""
Should also cross check w/ seed dataset.
"""
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10.0, 11.0, 12.0, 13.0],
"i1": [10, 11, 12, 13],
}
)
with pytest.raises(ValueError, match="Schema violation"):
driver(data={"source": df_source}, cube=existing_cube, store=function_store)
def test_fails_missing_column(driver, function_store, existing_cube):
df_source = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "i1": [10, 11, 12, 13]}
)
with pytest.raises(ValueError, match="Schema violation"):
driver(data={"source": df_source}, cube=existing_cube, store=function_store)
def test_fails_unknown_dataset(driver, function_store, existing_cube):
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [10, 11, 12, 13],
"i1": [10, 11, 12, 13],
}
)
df_zoo = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v3": [10, 11, 12, 13],
"i3": [10, 11, 12, 13],
}
)
keys_pre = set(function_store().keys())
with pytest.raises(ValueError, match="Unknown / non-existing datasets: zoo"):
driver(
data={"source": df_source, "zoo": df_zoo},
cube=existing_cube,
store=function_store,
)
keys_post = set(function_store().keys())
assert keys_pre == keys_post
def test_metadata(driver, function_store, existing_cube):
"""
Test auto- and user-generated metadata.
"""
df_source = pd.DataFrame(
{
"x": [0, 1, 2, 3],
"p": [0, 0, 1, 1],
"v1": [20, 21, 22, 23],
"i1": [20, 21, 22, 23],
}
)
result = driver(
data={"source": df_source},
cube=existing_cube,
store=function_store,
metadata={"source": {"a": 12, "c": 13}},
)
assert set(result.keys()) == {"source"}
ds_source = result["source"]
assert set(ds_source.metadata.keys()) == {
"a",
"b",
"c",
"creation_time",
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
}
assert ds_source.metadata["a"] == 12
assert ds_source.metadata["b"] == 11
assert ds_source.metadata["c"] == 13
assert ds_source.metadata[KTK_CUBE_METADATA_DIMENSION_COLUMNS] == list(
existing_cube.dimension_columns
)
assert ds_source.metadata[KTK_CUBE_METADATA_KEY_IS_SEED] is True
assert ds_source.metadata[KTK_CUBE_METADATA_PARTITION_COLUMNS] == list(
existing_cube.partition_columns
)
ds_enrich = DatasetMetadata.load_from_store(
existing_cube.ktk_dataset_uuid("enrich"), function_store()
)
assert set(ds_enrich.metadata.keys()) == {
"a",
"b",
"creation_time",
KTK_CUBE_METADATA_DIMENSION_COLUMNS,
KTK_CUBE_METADATA_KEY_IS_SEED,
KTK_CUBE_METADATA_PARTITION_COLUMNS,
}
assert ds_enrich.metadata["a"] == 20
assert ds_enrich.metadata["b"] == 21
assert ds_enrich.metadata[KTK_CUBE_METADATA_DIMENSION_COLUMNS] == list(
existing_cube.dimension_columns
)
assert ds_enrich.metadata[KTK_CUBE_METADATA_KEY_IS_SEED] is False
assert ds_enrich.metadata[KTK_CUBE_METADATA_PARTITION_COLUMNS] == list(
existing_cube.partition_columns
)
```
#### File: io/testing/delete_cube.py
```python
import pandas as pd
import pytest
from kartothek.api.discover import discover_datasets_unchecked
from kartothek.core.cube.cube import Cube
from kartothek.io.eager_cube import build_cube
from kartothek.utils.ktk_adapters import get_dataset_keys
__all__ = (
"test_delete_twice",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fail_no_store_factory",
"test_keep_garbage_due_to_no_listing",
"test_keep_other",
"test_partial_delete",
"test_simple",
)
def test_simple(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
driver(cube=cube, store=function_store)
assert set(function_store().keys()) == set()
def test_keep_other(driver, function_store):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube1 = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube1")
cube2 = cube1.copy(uuid_prefix="cube2")
build_cube(data=df, cube=cube1, store=function_store)
keys = set(function_store().keys())
build_cube(data=df, cube=cube2, store=function_store)
driver(cube=cube2, store=function_store)
assert set(function_store().keys()) == keys
def test_keep_garbage_due_to_no_listing(driver, function_store):
df1 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
df2 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [2, 2, 3, 3], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
# test build DF1 to see which keys are created
build_cube(data=df1, cube=cube, store=function_store)
keys1 = set(function_store().keys())
# wipe
for k in list(function_store().keys()):
function_store().delete(k)
# test build DF2 to see which keys are created
build_cube(data=df2, cube=cube, store=function_store)
keys2 = set(function_store().keys())
# wipe again
for k in list(function_store().keys()):
function_store().delete(k)
# some keys are obviosly present everytime (like central metadata and
# common metadata)
keys_common = keys1 & keys2
# build DF1 and overwrite w/ DF2
build_cube(data=df1, cube=cube, store=function_store)
keys3 = set(function_store().keys())
build_cube(data=df2, cube=cube, store=function_store, overwrite=True)
# now some keys if DF1 must be leftovers/gargabe that cannot be deleted w/o listing the entire store (which would
# be too expensive)
gargabe = keys3 - keys_common
assert len(gargabe) > 0
driver(cube=cube, store=function_store)
assert set(function_store().keys()) == gargabe
def test_delete_twice(driver, function_store):
df = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df, cube=cube, store=function_store)
driver(cube=cube, store=function_store)
driver(cube=cube, store=function_store)
assert set(function_store().keys()) == set()
def test_partial_delete(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13]}
)
df_1 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "a": [20, 21, 22, 23]})
df_2 = pd.DataFrame({"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "b": [20, 21, 22, 23]})
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
datasets = build_cube(
data={cube.seed_dataset: df_seed, "enrich-1": df_1, "enrich-2": df_2},
cube=cube,
store=function_store,
)
enrich_1_keys = get_dataset_keys(
discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=function_store,
filter_ktk_cube_dataset_ids=["enrich-1"],
)["enrich-1"]
)
enrich_2_keys = get_dataset_keys(
discover_datasets_unchecked(
uuid_prefix=cube.uuid_prefix,
store=function_store,
filter_ktk_cube_dataset_ids=["enrich-2"],
)["enrich-2"]
)
all_keys = set(function_store().keys())
driver(cube=cube, store=function_store, datasets=["enrich-1"])
assert set(function_store().keys()) == all_keys - enrich_1_keys
driver(cube=cube, store=function_store, datasets={"enrich-2": datasets["enrich-2"]})
assert set(function_store().keys()) == all_keys - enrich_1_keys - enrich_2_keys
def test_fail_no_store_factory(driver, function_store, skip_eager):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
store = function_store()
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=store, no_run=True)
assert str(exc.value) == "store must be a factory but is HFilesystemStore"
def test_fail_blocksize_wrong_type(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(TypeError, match="blocksize must be an integer but is str"):
driver(cube=cube, store=function_store, blocksize="foo")
def test_fail_blocksize_negative(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError, match="blocksize must be > 0 but is -1"):
driver(cube=cube, store=function_store, blocksize=-1)
def test_fail_blocksize_zero(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError, match="blocksize must be > 0 but is 0"):
driver(cube=cube, store=function_store, blocksize=0)
```
#### File: io/testing/delete.py
```python
from .utils import create_dataset
def test_delete_dataset(store_factory, metadata_version, bound_delete_dataset):
"""
Ensure that a dataset can be deleted
"""
create_dataset("dataset", store_factory, metadata_version)
store = store_factory()
assert len(list(store.keys())) > 0
bound_delete_dataset("dataset", store_factory)
assert len(list(store.keys())) == 0
def test_delete_single_dataset(store_factory, metadata_version, bound_delete_dataset):
"""
Ensure that only the specified dataset is deleted
"""
create_dataset("dataset", store_factory, metadata_version)
create_dataset("another_dataset", store_factory, metadata_version)
store = store_factory()
amount_of_keys = len(list(store.keys()))
assert len(list(store.keys())) > 0
bound_delete_dataset("dataset", store_factory)
assert len(list(store.keys())) == amount_of_keys / 2, store.keys()
def test_delete_only_dataset(store_factory, metadata_version, bound_delete_dataset):
"""
Ensure that files including the UUID but not starting with it
are not deleted
"""
create_dataset("UUID", store_factory, metadata_version)
store = store_factory()
store.put(key="prefixUUID", data=b"")
bound_delete_dataset("UUID", store_factory)
assert "prefixUUID" in store.keys()
def test_delete_missing_dataset(store_factory, store_factory2, bound_delete_dataset):
"""
Ensure that a dataset can be deleted even though some keys are already removed.
"""
metadata_version = 4
create_dataset("dataset", store_factory, metadata_version)
store = store_factory()
keys = sorted(store.keys())
assert len(keys) > 0
store2 = store_factory2()
for missing in keys:
if missing == "dataset.by-dataset-metadata.json":
continue
for k in keys:
if k != missing:
store2.put(k, store.get(k))
bound_delete_dataset("dataset", store_factory2)
assert len(list(store2.keys())) == 0
def test_delete_dataset_unreferenced_files(
store_factory, metadata_version, bound_delete_dataset
):
"""
Ensure that unreferenced files of a dataset are also removed when a dataset is deleted
"""
uuid = "dataset"
create_dataset(uuid, store_factory, metadata_version)
store = store_factory()
store.put(f"{uuid}/table/trash.parquet", b"trash")
assert len(list(store.keys())) > 0
bound_delete_dataset(uuid, store_factory)
assert len(list(store.keys())) == 0
```
#### File: kartothek/utils/store.py
```python
import logging
import time
from urllib.parse import quote
from simplekv.contrib import VALID_KEY_RE_EXTENDED
try:
# azure-storage-blob < 12
from azure.storage.blob import BlockBlobService as _BlockBlobService
from azure.common import (
AzureMissingResourceHttpError as _AzureMissingResourceHttpError,
)
except ImportError:
class _BlockBlobService: # type: ignore
"""
Dummy class.
"""
class _AzureMissingResourceHttpError: # type: ignore
"""
Dummy class.
"""
try:
# azure-storage-blob >= 12
from azure.storage.blob import ContainerClient as _ContainerClient
from azure.core.exceptions import ResourceNotFoundError as _ResourceNotFoundError
except ImportError:
class _ContainerClient: # type: ignore
"""
Dummy class.
"""
class _ResourceNotFoundError: # type: ignore
"""
Dummy class.
"""
__all__ = ("copy_keys",)
_logger = logging.getLogger(__name__)
# Specialized implementation for azure-storage-blob < 12, using BlockBlobService (`bbs`):
def _has_azure_bbs(store):
try:
# store decorators will forward getattr calls
return isinstance(store.block_blob_service, _BlockBlobService)
except AttributeError:
return False
def _azure_bbs_content_md5(block_blob_service, container, key, accept_missing=False):
try:
return block_blob_service.get_blob_properties(
container, key
).properties.content_settings.content_md5
except _AzureMissingResourceHttpError:
if accept_missing:
return None
else:
raise KeyError(key)
def _copy_azure_bbs(keys, src_store, tgt_store):
src_container = src_store.container
tgt_container = tgt_store.container
src_bbs = src_store.block_blob_service
tgt_bbs = tgt_store.block_blob_service
cprops = {}
for k in keys:
source_md5 = _azure_bbs_content_md5(
src_bbs, src_container, k, accept_missing=False
)
if source_md5 is None:
_logger.debug("Missing hash for {}".format(k))
else:
tgt_md5 = _azure_bbs_content_md5(
tgt_bbs, tgt_container, k, accept_missing=True
)
if source_md5 == tgt_md5:
_logger.debug("Omitting copy to {} (checksum match)".format(k))
continue
copy_source = src_bbs.make_blob_url(
src_container, quote(k), sas_token=src_bbs.sas_token
)
cprops[k] = tgt_bbs.copy_blob(tgt_container, k, copy_source)
for k, cprop in cprops.items():
while True:
blob = tgt_bbs.get_blob_properties(tgt_container, k)
cprop_current = blob.properties.copy
assert cprop.id == cprop_current.id, "Concurrent copy to {}".format(k)
if cprop_current.status == "pending":
_logger.debug("Waiting for pending copy to {}...".format(k))
time.sleep(0.1)
continue
elif cprop_current.status == "success":
_logger.debug("Copy to {} completed".format(k))
break # break from while, continue in for-loop
else:
raise RuntimeError(
"Error while copying: status is {}: {}".format(
cprop_current.status, cprop_current.status_description
)
)
# Specialized implementation for azure-storage-blob >= 12, using ContainerClient (`cc`):
def _has_azure_cc(store):
try:
# store decorators will forward getattr calls
return isinstance(store.blob_container_client, _ContainerClient)
except AttributeError:
return False
def _azure_cc_content_md5(cc, key, accept_missing=False):
try:
bc = cc.get_blob_client(key)
return bc.get_blob_properties().content_settings.content_md5
except _ResourceNotFoundError:
if accept_missing:
return None
else:
raise KeyError(key)
def _copy_azure_cc(keys, src_store, tgt_store):
src_cc = src_store.blob_container_client
tgt_cc = tgt_store.blob_container_client
copy_ids = {}
for k in keys:
source_md5 = _azure_cc_content_md5(src_cc, k, accept_missing=False)
if source_md5 is None:
_logger.debug("Missing hash for {}".format(k))
else:
tgt_md5 = _azure_cc_content_md5(tgt_cc, k, accept_missing=True)
if source_md5 == tgt_md5:
_logger.debug("Omitting copy to {} (checksum match)".format(k))
continue
copy_source = src_cc.get_blob_client(k).url
copy_ids[k] = tgt_cc.get_blob_client(k).start_copy_from_url(copy_source)[
"copy_id"
]
for k, copy_id in copy_ids.items():
while True:
cprop_current = tgt_cc.get_blob_client(k).get_blob_properties().copy
assert copy_id == cprop_current.id, "Concurrent copy to {}".format(k)
if cprop_current.status == "pending":
_logger.debug("Waiting for pending copy to {}...".format(k))
time.sleep(0.1)
continue
elif cprop_current.status == "success":
_logger.debug("Copy to {} completed".format(k))
break # break from while, continue in for-loop
else:
raise RuntimeError(
"Error while copying: status is {}: {}".format(
cprop_current.status, cprop_current.status_description
)
)
def _copy_naive(keys, src_store, tgt_store):
for k in keys:
tgt_store.put(k, src_store.get(k))
def copy_keys(keys, src_store, tgt_store):
"""
Copy keys from one store the another.
Parameters
----------
keys: Iterable[str]
Keys to copy.
src_store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Source KV store.
tgt_store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Target KV store.
"""
if callable(src_store):
src_store = src_store()
if callable(tgt_store):
tgt_store = tgt_store()
keys = sorted(keys)
for k in keys:
if (k is None) or (not VALID_KEY_RE_EXTENDED.match(k)) or (k == "/"):
raise ValueError("Illegal key: {}".format(k))
if _has_azure_bbs(src_store) and _has_azure_bbs(tgt_store):
_logger.debug(
"Azure stores based on BlockBlobStorage class detected, use fast-path."
)
_copy_azure_bbs(keys, src_store, tgt_store)
elif _has_azure_cc(src_store) and _has_azure_cc(tgt_store):
_logger.debug(
"Azure stores based on ContainerClient class detected, use fast-path."
)
_copy_azure_cc(keys, src_store, tgt_store)
else:
_logger.debug("Use naive slow-path.")
_copy_naive(keys, src_store, tgt_store)
```
#### File: tests/cli/test_base.py
```python
from subprocess import check_call
import pytest
from dask.callbacks import Callback
def test_entry_point(cli):
check_call("kartothek_cube")
def test_noop(cli):
result = cli()
assert result.exit_code == 0
assert result.output.startswith("Usage: cli")
@pytest.mark.parametrize("arg", ["--help", "-h"])
def test_help(cli, arg):
result = cli(arg)
assert result.exit_code == 0
assert result.output.startswith("Usage: cli")
def test_missing_command(cli):
result = cli("my_cube")
assert result.exit_code == 2
assert "Error: Missing command." in result.output
def test_unknown_command(cli):
result = cli("my_cube", "foo")
assert result.exit_code == 2
assert (
'Error: No such command "foo".' in result.output
or "Error: No such command 'foo'." in result.output
)
def test_cleanup(cli, built_cube, skv):
# test that interpreter is clean after CLI exists
cli("--store=cubes", "my_cube", "info")
assert Callback.active == set()
```
#### File: core/cube/test_constants.py
```python
from kartothek.core.cube.constants import KTK_CUBE_UUID_SEPERATOR
from kartothek.core.dataset import _validate_uuid
def test_uuid_seperator_valid():
assert _validate_uuid(KTK_CUBE_UUID_SEPERATOR)
```
#### File: tests/core/test_dataset_dyn_part.py
```python
import os
import random
import tempfile
from urllib.parse import quote
import numpy as np
import pandas as pd
import simplejson
import storefact
from kartothek.core.common_metadata import (
_get_common_metadata_key,
make_meta,
store_schema_metadata,
)
from kartothek.core.dataset import DatasetMetadata, create_partition_key, naming
from kartothek.core.urlencode import quote_indices, unquote_indices
def test_create_partition_key():
key = create_partition_key(
"my-uuid", "testtable", [("index1", "value1"), ("index2", "value2")]
)
assert key == "my-uuid/testtable/index1=value1/index2=value2/data"
def test_index_quote_roundtrip():
indices = [
(1, b"Muenchen"),
("location", b"Muenchen"),
("location", "München"),
("product", "å\\ øß"),
]
expected = [
("1", "Muenchen"),
("location", "Muenchen"),
("location", "München"),
("product", "å\\ øß"),
]
assert expected == unquote_indices(quote_indices(indices))
def testunquote_indices():
index_strings = [
"{}={}".format(quote("location"), quote("München".encode("utf-8"))),
"{}={}".format(quote("product"), quote("å\\ øß".encode("utf-8"))),
]
indices = unquote_indices(index_strings)
assert indices == [("location", "München"), ("product", "å\\ øß")]
def test_dynamic_partitions(store):
"""
Do not specify partitions in metadata, but read them dynamically from store
"""
partition_suffix = "suffix"
dataset_uuid = "uuid+namespace-attribute12_underscored"
partition0_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-0")],
"{}.parquet".format(partition_suffix),
)
partition1_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-1")],
"{}.parquet".format(partition_suffix),
)
partition0_ext = create_partition_key(
dataset_uuid,
"extension",
[("location", "L-0")],
"{}.parquet".format(partition_suffix),
)
partition1_ext = create_partition_key(
dataset_uuid,
"extension",
[("location", "L-1")],
"{}.parquet".format(partition_suffix),
)
metadata = {"dataset_metadata_version": 4, "dataset_uuid": dataset_uuid}
expected_partitions = {
"location=L-0/{}".format(partition_suffix): {
"files": {"core": partition0_core, "extension": partition0_ext}
},
"location=L-1/{}".format(partition_suffix): {
"files": {"core": partition1_core, "extension": partition1_ext}
},
}
expected_indices = {
"location": {
"L-0": ["location=L-0/{}".format(partition_suffix)],
"L-1": ["location=L-1/{}".format(partition_suffix)],
}
}
# put two partitions for two tables each to store
store.put(
"{}{}.json".format(dataset_uuid, naming.METADATA_BASE_SUFFIX),
simplejson.dumps(metadata).encode("utf-8"),
)
store.put(partition0_core, b"test")
store.put(partition1_core, b"test")
store.put(partition0_ext, b"test")
store.put(partition1_ext, b"test")
store_schema_metadata(
make_meta(
pd.DataFrame({"location": ["L-0/{}".format(partition_suffix)]}),
origin="stored",
),
dataset_uuid,
store,
"core",
)
# instantiate metadata to write table metadatad
core_schema = make_meta(
pd.DataFrame(
{
"column_0": pd.Series([1], dtype=int),
"column_1": pd.Series([1], dtype=int),
"location": pd.Series(["str"]),
}
),
origin="core",
)
extension_schema = make_meta(
pd.DataFrame(
{
"column_77": pd.Series([1], dtype=int),
"column_78": pd.Series([1], dtype=int),
"location": pd.Series(["str"]),
}
),
origin="extension",
)
store_schema_metadata(core_schema, dataset_uuid, store, "core")
store_schema_metadata(extension_schema, dataset_uuid, store, "extension")
dmd = DatasetMetadata.load_from_store(dataset_uuid, store)
# reload metadata to use table metadata
dmd = DatasetMetadata.load_from_store(dataset_uuid, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
assert dmd_dict["indices"] == expected_indices
def test_dynamic_partitions_multiple_indices(store):
"""
Do not specify partitions in metadata, but read them dynamically from store
"""
suffix = "suffix"
dataset_uuid = "uuid+namespace-attribute12_underscored"
partition0_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-0"), ("product", "P-0")],
"{}.parquet".format(suffix),
)
partition1_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-1"), ("product", "P-0")],
"{}.parquet".format(suffix),
)
metadata = {"dataset_metadata_version": 4, "dataset_uuid": dataset_uuid}
expected_partitions = {
"location=L-0/product=P-0/{}".format(suffix): {
"files": {"core": partition0_core}
},
"location=L-1/product=P-0/{}".format(suffix): {
"files": {"core": partition1_core}
},
}
expected_indices = {
"location": {
"L-0": ["location=L-0/product=P-0/{}".format(suffix)],
"L-1": ["location=L-1/product=P-0/{}".format(suffix)],
},
"product": {
"P-0": [
"location=L-0/product=P-0/{}".format(suffix),
"location=L-1/product=P-0/{}".format(suffix),
]
},
}
store.put(partition0_core, b"test")
store.put(partition1_core, b"test")
store_schema_metadata(
make_meta(pd.DataFrame({"location": ["L-0"], "product": ["P-0"]}), origin="1"),
dataset_uuid,
store,
"core",
)
dmd = DatasetMetadata.load_from_dict(metadata, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
# Sorting may differ in the index list. This is ok for runtime
# but does produce flaky tests thus sort them.
sorted_result = {
column: {label: sorted(x) for label, x in index.items()}
for column, index in dmd_dict["indices"].items()
}
assert sorted_result == expected_indices
def test_dynamic_partitions_with_garbage(store):
"""
In case there are unknown files, dataset and indices still load correctly
"""
dataset_uuid = "uuid+namespace-attribute12_underscored"
partition_suffix = "suffix"
partition0_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-0"), ("product", "P-0")],
"{}.parquet".format(partition_suffix),
)
partition1_core = create_partition_key(
dataset_uuid,
"core",
[("location", "L-1"), ("product", "P-0")],
"{}.parquet".format(partition_suffix),
)
metadata = {"dataset_metadata_version": 4, "dataset_uuid": dataset_uuid}
expected_partitions = {
"location=L-0/product=P-0/{}".format(partition_suffix): {
"files": {"core": partition0_core}
},
"location=L-1/product=P-0/{}".format(partition_suffix): {
"files": {"core": partition1_core}
},
}
expected_indices = {
"location": {
"L-0": ["location=L-0/product=P-0/{}".format(partition_suffix)],
"L-1": ["location=L-1/product=P-0/{}".format(partition_suffix)],
},
"product": {
"P-0": [
"location=L-0/product=P-0/{}".format(partition_suffix),
"location=L-1/product=P-0/{}".format(partition_suffix),
]
},
}
store.put(partition0_core, b"test")
store.put(partition1_core, b"test")
store_schema_metadata(
make_meta(pd.DataFrame({"location": ["L-0"], "product": ["P-0"]}), origin="1"),
dataset_uuid,
store,
"core",
)
# the following files are garbage and should not interfere with the indices and/or partitions
for suffix in ["", ".json", ".msgpack", ".my_own_file_format"]:
store.put("this_should_not_exist{}".format(suffix), b"ignore me")
store.put(
"{}/this_should_not_exist{}".format(dataset_uuid, suffix), b"ignore me"
)
store.put(
"{}/{}/this_should_not_exist{}".format(dataset_uuid, "core", suffix),
b"ignore me",
)
store.put(
"{}/{}/location=L-0/this_should_not_exist{}".format(
dataset_uuid, "core", suffix
),
b"ignore me",
)
dmd = DatasetMetadata.load_from_dict(metadata, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
# Sorting may differ in the index list. This is ok for runtime
# but does produce flaky tests thus sort them.
sorted_result = {
column: {label: sorted(x) for label, x in index.items()}
for column, index in dmd_dict["indices"].items()
}
assert sorted_result == expected_indices
def test_dynamic_partitions_quote(store, metadata_version):
"""
Do not specify partitions in metadata, but read them dynamically from store
"""
dataset_uuid = "uuid-namespace-attribute12_underscored"
partition0_core = create_partition_key(
dataset_uuid, "core", [("location", "München")], "data.parquet"
)
partition1_core = create_partition_key(
dataset_uuid, "core", [("location", "å\\ øß")], "data.parquet"
)
metadata = {
"dataset_metadata_version": metadata_version,
"dataset_uuid": dataset_uuid,
}
expected_partitions = {
"location=M%C3%BCnchen/data": {"files": {"core": partition0_core}},
"location=%C3%A5%5C%20%C3%B8%C3%9F/data": {"files": {"core": partition1_core}},
}
expected_indices = {
"location": {
"München": ["location=M%C3%BCnchen/data"],
"å\\ øß": ["location=%C3%A5%5C%20%C3%B8%C3%9F/data"],
}
}
store.put(partition0_core, b"test")
store.put(partition1_core, b"test")
store_schema_metadata(
make_meta(pd.DataFrame({"location": ["L-0"]}), origin="1"),
dataset_uuid,
store,
"core",
)
dmd = DatasetMetadata.load_from_dict(metadata, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
assert dmd_dict["indices"] == expected_indices
def test_dask_partitions(metadata_version):
"""
Create partitions for one table with dask
and check that it can be read with kartothek
"""
import dask.dataframe
bucket_dir = tempfile.mkdtemp()
dataset_uuid = "uuid+namespace-attribute12_underscored"
os.mkdir("{}/{}".format(bucket_dir, dataset_uuid))
table_dir = "{}/{}/core".format(bucket_dir, dataset_uuid)
os.mkdir(table_dir)
store = storefact.get_store_from_url("hfs://{}".format(bucket_dir))
locations = ["L-{}".format(i) for i in range(2)]
df = pd.DataFrame()
for location in locations:
core = pd.DataFrame(
data={
"date": np.array(
["2017-11-23", "2017-11-23", "2017-11-24", "2017-11-24"]
),
"product": np.array(["P-0", "P-1", "P-0", "P-1"]),
"location": location,
"value": np.array(random.sample(range(1, 100), 4)),
}
)
df = pd.concat([df, core])
ddf = dask.dataframe.from_pandas(df, npartitions=1)
dask.dataframe.to_parquet(ddf, table_dir, partition_on=["location"])
partition0 = "{}/core/location=L-0/part.0.parquet".format(dataset_uuid)
partition1 = "{}/core/location=L-1/part.0.parquet".format(dataset_uuid)
metadata = {
"dataset_metadata_version": metadata_version,
"dataset_uuid": dataset_uuid,
}
expected_partitions = {
"partitions": {
"location=L-0": {"files": {"core": partition0}},
"location=L-1": {"files": {"core": partition1}},
}
}
expected_tables = {"tables": {"core": ["date", "product", "value"]}}
store.put(
"{}.by-dataset-metadata.json".format(dataset_uuid),
simplejson.dumps(metadata).encode(),
)
metadata.update(expected_partitions)
metadata.update(expected_tables)
dmd = DatasetMetadata.load_from_store(dataset_uuid, store)
actual_partitions = dmd.to_dict()["partitions"]
# we partition on location ID which has two values
assert len(actual_partitions) == 2
assert dmd.partition_keys == ["location"]
def test_overlap_keyspace(store, metadata_version):
dataset_uuid1 = "uuid+namespace-attribute12_underscored"
dataset_uuid2 = "uuid+namespace-attribute12_underscored_ext"
table = "core"
for dataset_uuid in (dataset_uuid1, dataset_uuid2):
partition0 = "location=L-0"
partition0_key = "{}/{}/{}/data.parquet".format(dataset_uuid, table, partition0)
metadata = {
"dataset_metadata_version": metadata_version,
"dataset_uuid": dataset_uuid,
}
# put two partitions for two tables each to store
store.put(
"{}{}.json".format(dataset_uuid, naming.METADATA_BASE_SUFFIX),
simplejson.dumps(metadata).encode("utf-8"),
)
store.put(partition0_key, b"test")
store_schema_metadata(
make_meta(pd.DataFrame({"location": ["L-0"]}), origin="1"),
dataset_uuid,
store,
"core",
)
for dataset_uuid in (dataset_uuid1, dataset_uuid2):
partition0_label = "location=L-0/data"
partition0_key = "{}/{}/{}.parquet".format(
dataset_uuid, table, partition0_label
)
expected_partitions = {"location=L-0/data": {"files": {"core": partition0_key}}}
expected_indices = {"location": {"L-0": ["location=L-0/data"]}}
assert DatasetMetadata.storage_keys(dataset_uuid, store) == [
"{}{}.json".format(dataset_uuid, naming.METADATA_BASE_SUFFIX),
_get_common_metadata_key(dataset_uuid, "core"),
partition0_key,
]
dmd = DatasetMetadata.load_from_store(dataset_uuid, store)
dmd = dmd.load_partition_indices()
dmd_dict = dmd.to_dict()
assert dmd_dict["partitions"] == expected_partitions
assert dmd_dict["indices"] == expected_indices
```
#### File: io/cube/conftest.py
```python
import dask
import pytest
@pytest.fixture(params=["dask_bag_bs1", "dask_bag_bs3", "dask_dataframe", "eager"])
def driver_name(request):
return request.param
@pytest.fixture(autouse=True, scope="session")
def setup_dask():
dask.config.set(scheduler="synchronous")
@pytest.fixture
def skip_eager(driver_name):
if driver_name == "eager":
pytest.skip("Skipped for eager backend.")
```
#### File: io/cube/test_extend.py
```python
import uuid
import dask
import dask.bag as db
import dask.core
import pandas as pd
import pytest
from tests.io.cube.utils import wrap_bag_write, wrap_ddf_write
from kartothek.core.cube.cube import Cube
from kartothek.io.dask.bag_cube import extend_cube_from_bag
from kartothek.io.dask.dataframe_cube import extend_cube_from_dataframe
from kartothek.io.eager_cube import extend_cube
from kartothek.io.testing.extend_cube import * # noqa
@pytest.fixture
def driver(driver_name):
if driver_name == "dask_bag_bs1":
return wrap_bag_write(extend_cube_from_bag, blocksize=1)
elif driver_name == "dask_bag_bs3":
return wrap_bag_write(extend_cube_from_bag, blocksize=3)
elif driver_name == "dask_dataframe":
return wrap_ddf_write(extend_cube_from_dataframe)
elif driver_name == "eager":
return extend_cube
else:
raise ValueError("Unknown driver: {}".format(driver_name))
def _count_execution_to_store(obj, store):
store = store()
key = "counter.{}".format(uuid.uuid4().hex)
store.put(key, b"")
return obj
def test_dask_bag_fusing(
driver, function_store, driver_name, skip_eager, existing_cube
):
"""
See kartothek/tests/io/cube/test_build.py::test_dask_bag_fusing
"""
if driver_name == "dask_dataframe":
pytest.skip("not relevant for dask.dataframe")
partition_size = 1 if driver_name == "dask_bag_bs1" else 3
n_partitions = 4
dfs = [
{
"a": pd.DataFrame({"x": [2 * i, 2 * i + 1], "p": i, "v3": 42}),
"b": pd.DataFrame({"x": [2 * i, 2 * i + 1], "p": i, "v4": 1337}),
}
for i in range(partition_size * n_partitions)
]
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
bag = db.from_sequence(dfs, partition_size=partition_size).map(
_count_execution_to_store, store=function_store
)
bag = extend_cube_from_bag(
data=bag, cube=cube, store=function_store, ktk_cube_dataset_ids=["a", "b"]
)
dct = dask.optimize(bag)[0].__dask_graph__()
tasks = {k for k, v in dct.items() if dask.core.istask(v)}
assert len(tasks) == (n_partitions + 1)
def test_function_executed_once(driver, function_store, driver_name, existing_cube):
"""
Test that the payload function is only executed once per branch.
This was a bug in the dask_bag backend.
"""
if driver_name == "eager":
pytest.skip("not relevant for eager")
if driver_name == "dask_dataframe":
pytest.skip("not relevant for dask.dataframe")
df_a1 = pd.DataFrame({"x": [0, 1], "p": [0, 0], "v3": [10, 11]})
df_a2 = pd.DataFrame({"x": [2, 3], "p": [1, 1], "v3": [12, 13]})
df_b1 = pd.DataFrame({"x": [0, 1], "p": [0, 0], "v4": [20, 21]})
df_b2 = pd.DataFrame({"x": [2, 3], "p": [1, 1], "v4": [22, 23]})
dfs = [{"a": df_a1, "b": df_b1}, {"a": df_a2, "b": df_b2}]
cube = Cube(
dimension_columns=["x"],
partition_columns=["p"],
uuid_prefix="cube",
seed_dataset="source",
)
if driver_name in ("dask_bag_bs1", "dask_bag_bs3"):
bag = db.from_sequence(
dfs, partition_size=1 if driver_name == "dask_bag_bs1" else 3
).map(_count_execution_to_store, store=function_store)
bag = extend_cube_from_bag(
data=bag, cube=cube, store=function_store, ktk_cube_dataset_ids=["a", "b"]
)
bag.compute()
else:
raise ValueError("Missing implementation for driver: {}".format(driver_name))
assert len(function_store().keys(prefix="counter.")) == 2
```
#### File: tests/serialization/test_dataframe.py
```python
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pyarrow.parquet import ParquetFile
from kartothek.serialization import (
CsvSerializer,
DataFrameSerializer,
ParquetSerializer,
default_serializer,
)
from kartothek.serialization._util import ensure_unicode_string_type
TYPE_STABLE_SERIALISERS = [ParquetSerializer()]
SERLIALISERS = TYPE_STABLE_SERIALISERS + [
CsvSerializer(),
CsvSerializer(compress=False),
default_serializer(),
]
type_stable_serialisers = pytest.mark.parametrize("serialiser", TYPE_STABLE_SERIALISERS)
predicate_serialisers = pytest.mark.parametrize(
"serialiser",
[
ParquetSerializer(chunk_size=1),
ParquetSerializer(chunk_size=2),
ParquetSerializer(chunk_size=4),
]
+ SERLIALISERS,
)
def test_load_df_from_store_unsupported_format(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, "test.unknown")
def test_store_df_to_store(store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
dataframe_format = default_serializer()
assert isinstance(dataframe_format, ParquetSerializer)
key = dataframe_format.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_store_table_to_store(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
table = pa.Table.from_pandas(df)
key = serialiser.store(store, "prefix", table)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip(serialiser, store):
if serialiser in TYPE_STABLE_SERIALISERS:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], b"d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
else:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
# Test that all serialisers can ingest predicate_pushdown_to_io
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["a", "c"], predicate_pushdown_to_io=False
),
df[["a", "c"]],
)
# Test that all serialisers can deal with categories
expected = df[["c", "d"]].copy()
expected["c"] = expected["c"].astype("category")
# Check that the dtypes match but don't care about the order of the categoricals.
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["c", "d"], categories=["c"]
),
expected,
check_categorical=False,
)
# Test restore w/ empty col list
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=[]), df[[]]
)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_missing_column(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]})
key = serialiser.store(store, "prefix", df)
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "x"])
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_empty(serialiser, store):
df = pd.DataFrame({})
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_no_rows(serialiser, store):
df = pd.DataFrame({"a": [], "b": [], "c": []}).astype(object)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
def test_filter_query_predicate_exclusion(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(
store, "test.parquet", predicates=[[("a", "==", 1)]], filter_query="True"
)
def assert_frame_almost_equal(df_left, df_right):
"""
Be more friendly to some dtypes that are not preserved during the roundtrips.
"""
# FIXME: This needs a better documentation
for col in df_left.columns:
if pd.api.types.is_datetime64_dtype(
df_left[col].dtype
) and pd.api.types.is_object_dtype(df_right[col].dtype):
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_datetime64_dtype(df_right[col].dtype):
df_left[col] = pd.to_datetime(df_left[col])
elif (
len(df_left) > 0
and pd.api.types.is_object_dtype(df_left[col].dtype)
and pd.api.types.is_object_dtype(df_right[col].dtype)
):
if isinstance(df_left[col].iloc[0], datetime.date) or isinstance(
df_right[col].iloc[0], datetime.date
):
df_left[col] = pd.to_datetime(df_left[col])
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_categorical_dtype(df_right[col].dtype):
df_left[col] = df_left[col].astype(df_right[col].dtype)
pdt.assert_frame_equal(
df_left.reset_index(drop=True), df_right.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df, read_kwargs",
[
(pd.DataFrame({"string_ü": ["abc", "affe", "banane", "buchstabe_ü"]}), {}),
(pd.DataFrame({"integer_ü": np.arange(4)}), {}),
(pd.DataFrame({"float_ü": [-3.141591, 0.0, 3.141593, 3.141595]}), {}),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": False},
),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": True},
),
(
pd.DataFrame(
{"categorical_ü": list("abcd")},
dtype=pd.api.types.CategoricalDtype(list("abcd"), ordered=True),
),
{},
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown(
store, df, read_kwargs, predicate_pushdown_to_io, serialiser
):
"""
Test predicate pushdown for several types and operations.
The DataFrame parameters all need to be of same length for this test to
work universally. Also the values in the DataFrames need to be sorted in
ascending order.
"""
# All test dataframes need to have the same length
assert len(df) == 4
assert df[df.columns[0]].is_monotonic and df.iloc[0, 0] < df.iloc[-1, 0]
# This is due to the limitation that dates cannot be expressed in
# Pandas' query() method.
if isinstance(serialiser, CsvSerializer) and isinstance(
df.iloc[0, 0], datetime.date
):
pytest.skip("CsvSerialiser cannot filter on dates")
key = serialiser.store(store, "prefix", df)
# Test `<` and `>` operators
expected = df.iloc[[1, 2], :].copy()
predicates = [
[(df.columns[0], "<", df.iloc[3, 0]), (df.columns[0], ">", df.iloc[0, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `=<` and `>=` operators
expected = df.iloc[[1, 2, 3], :].copy()
predicates = [
[(df.columns[0], "<=", df.iloc[3, 0]), (df.columns[0], ">=", df.iloc[1, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `==` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "==", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `in` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", [df.iloc[1, 0]])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `!=` operator
expected = df.iloc[[0, 2, 3], :].copy()
predicates = [[(df.columns[0], "!=", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test empty DataFrame
expected = df.head(0)
predicates = [[(df.columns[0], "<", df.iloc[0, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in empty list
expected = df.head(0)
predicates = [[(df.columns[0], "in", [])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in numpy array
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", np.asarray([df.iloc[1, 0], df.iloc[1, 0]]))]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test malformed predicates 1
predicates = []
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Empty predicates"
# Test malformed predicates 2
predicates = [[]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 0 is empty"
# Test malformed predicates 3
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], []]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 1 is empty"
# Test malformed predicates 4
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], ["foo"]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert (
str(exc.value)
== "Invalid predicates: Clause 0 in conjunction 1 should be a 3-tuple, got object of type <class 'str'> instead"
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_big(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [3141590.0, 3141592.0, 3141594.0]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 3141592.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_small(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [0.3141590, 0.3141592, 0.3141594]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 0.3141592)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@type_stable_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_eval_string_types(serialiser, store, predicate_pushdown_to_io):
df = pd.DataFrame({b"a": [1, 2], "b": [3.0, 4.0]})
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
for col in ["a", b"a", "a"]:
predicates = [[(col, "==", 1)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for col in ["b", b"b", "b"]:
predicates = [[(col, "==", 3.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for preds in (
[[("a", "==", 1), ("b", "==", 3.0)]],
[[("a", "==", 1), (b"b", "==", 3.0)]],
[[(b"a", "==", 1), ("b", "==", 3.0)]],
):
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=preds,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"u": pd.Series([None], dtype=object)}), "foo"),
(pd.DataFrame({"b": pd.Series([None], dtype=object)}), b"foo"),
(pd.DataFrame({"f": pd.Series([np.nan], dtype=float)}), 1.2),
(
pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")}),
pd.Timestamp("2017"),
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown_null_col(
store, df, value, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[[]].copy()
predicates = [[(df.columns[0], "==", value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
check_datetimelike_compat = (
isinstance(value, pd.Timestamp) and not serialiser.type_stable
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
check_datetimelike_compat=check_datetimelike_compat,
)
@pytest.mark.parametrize(
"df, op, value, expected_index",
[
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"==",
None,
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None],
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"!=",
None,
[1],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None, "x"],
[0, 1, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"==",
np.nan,
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan],
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"!=",
np.nan,
[1],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan, 1.0],
[0, 1, 2],
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values(
store, df, op, value, expected_index, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[expected_index].copy()
predicates = [[(df.columns[0], op, value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values_failing(
store, op, predicate_pushdown_to_io, serialiser
):
df = pd.DataFrame({"u": pd.Series([1.0, np.nan])})
key = serialiser.store(store, "prefix", df)
predicates = [[(df.columns[0], op, np.nan)]]
with pytest.raises(ValueError, match="Only operators supporting null values"):
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
@pytest.mark.parametrize(
"column, expected_null_count",
[
("no_nulls_int", (0, 0, 0)),
("partial_nulls_int", (0, 1, 2)),
("no_nulls_float", (0, 0, 0)),
("partial_nulls_float", (0, 1, 2)),
("partial_nulls_obj", (0, 1, 2)),
("no_nulls_obj", (0, 0, 0)),
("partial_nulls_obj_mixed", (0, 2, 1)),
("nulls_reverse_rg", (1, 0, 1)),
],
)
def test_null_count(store, column, expected_null_count):
serialiser = ParquetSerializer(chunk_size=2)
df = pd.DataFrame(
{
"no_nulls_int": [1, 2, 3, 4, 5, 6],
"partial_nulls_int": [1, 2, 3, None, None, None],
"no_nulls_float": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6],
"partial_nulls_float": [1.0, 2.2, 3.3, np.nan, np.nan, np.nan],
"partial_nulls_obj": [1.0, 2.2, 3.3, np.nan, np.nan, np.nan],
"no_nulls_obj": ["1.1", "2", "3", "vier", "fuenfeinhalb", "6.6"],
"partial_nulls_obj_mixed": [1.0, 2.2, None, np.nan, np.nan, 6.6],
"nulls_reverse_rg": [3.3, np.nan, 1.0, 2.0, np.nan, -1.1],
}
)
key = serialiser.store(store, "prefix", df)
reader = pa.BufferReader(store.get(key))
parquet_file = ParquetFile(reader)
col_idx = parquet_file.reader.column_name_idx(column)
assert parquet_file.num_row_groups == 3
for idx in range(0, 3):
rg = parquet_file.metadata.row_group(idx)
assert rg.column(col_idx).statistics.null_count == expected_null_count[idx]
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"nan": pd.Series([np.nan, -1.0, 1.0], dtype=float)}), 0.0),
(pd.DataFrame({"inf": pd.Series([np.inf, -1.0, 1.0], dtype=float)}), 0.0),
(pd.DataFrame({"ninf": pd.Series([-np.inf, -1.0, 1.0], dtype=float)}), 0.0),
(
pd.DataFrame(
{"inf2": pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)}
),
0.0,
),
(
pd.DataFrame(
{"inf2": pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)}
),
0.0,
),
(
pd.DataFrame(
{"inf2": pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)}
),
np.inf,
),
(
pd.DataFrame(
{"inf2": pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)}
),
-np.inf,
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown_weird_floats_col(
store, df, value, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
col = df.columns[0]
expected = df.loc[df[col] >= value].copy()
predicates = [[(col, ">=", value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
assert_frame_almost_equal(result, expected)
``` |
{
"source": "jorisvandenbossche/pydov",
"score": 3
} |
#### File: tests/data/update_test_data.py
```python
import os
import sys
from owslib.etree import etree
from owslib.util import openURL
from pydov.types.boring import Boring
from pydov.types.grondwaterfilter import GrondwaterFilter
from pydov.types.grondwatermonster import GrondwaterMonster
from pydov.types.grondmonster import Grondmonster
from pydov.types.interpretaties import (
GeotechnischeCodering,
GecodeerdeLithologie,
LithologischeBeschrijvingen,
HydrogeologischeStratigrafie,
FormeleStratigrafie,
InformeleStratigrafie,
QuartairStratigrafie,
InformeleHydrogeologischeStratigrafie,
)
from pydov.types.sondering import Sondering
from pydov.util.dovutil import build_dov_url
def get_first_featuremember(wfs_response):
tree = etree.fromstring(wfs_response.encode('utf-8'))
feature_members = tree.find('.//{http://www.opengis.net/gml}'
'featureMembers')
if feature_members is not None:
for ft in feature_members:
return etree.tostring(ft).decode('utf-8')
def update_file(filepath, url, process_fn=None):
sys.stdout.write('Updating {} ...'.format(filepath))
filepath = os.path.join(os.path.dirname(__file__), filepath)
try:
data = openURL(url).read()
if type(data) is bytes:
data = data.decode('utf-8')
except Exception as e:
sys.stdout.write(' FAILED:\n {}.\n'.format(e))
return
else:
with open(filepath, 'wb') as f:
if process_fn:
data = process_fn(data)
f.write(data.encode('utf-8'))
sys.stdout.write(' OK.\n')
if __name__ == '__main__':
# types/boring
update_file('types/boring/boring.xml',
build_dov_url('data/boring/2004-103984.xml'))
update_file('types/boring/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=dov-pub:Boringen'
'&maxFeatures=1&CQL_Filter=fiche=%27' + build_dov_url(
'data/boring/2004-103984%27')))
update_file('types/boring/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=dov-pub:Boringen'
'&maxFeatures=1&CQL_Filter=fiche=%27' + build_dov_url(
'data/boring/2004-103984%27')),
get_first_featuremember)
update_file('types/boring/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=c0cbd397-520f-4ee1-aca7'
'-d70e271eeed6'))
update_file('types/boring/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=4e20bf9c-3a5c-42be-b5b6'
'-bef6214d1fa7'))
update_file('types/boring/wfsdescribefeaturetype.xml',
build_dov_url('geoserver/dov-pub/Boringen'
'/ows?service=wfs&version=1.1.0&request=DescribeFeatureType'))
for xsd_schema in Boring.get_xsd_schemas():
update_file(
'types/boring/xsd_{}.xml'.format(xsd_schema.split('/')[-1]),
xsd_schema)
# types/sondering
update_file('types/sondering/sondering.xml',
build_dov_url('data/sondering/2002-018435.xml'))
update_file('types/sondering/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=dov-pub'
':Sonderingen&maxFeatures=1&CQL_Filter=fiche=%27' +
build_dov_url('data/sondering/2002-018435%27')))
update_file('types/sondering/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=dov-pub'
':Sonderingen&maxFeatures=1&CQL_Filter=fiche=%27' +
build_dov_url('data/sondering/2002-018435%27')),
get_first_featuremember)
update_file('types/sondering/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=bd539ba5-5f4d-4c43-9662'
'-51c16caea351'))
update_file('types/sondering/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=b397faec-1b64-4854-8000'
'-2375edb3b1a8'))
update_file('types/sondering/wfsdescribefeaturetype.xml',
build_dov_url('geoserver/dov-pub/Sonderingen'
'/ows?service=wfs&version=1.1.0&request=DescribeFeatureType'))
for xsd_schema in Sondering.get_xsd_schemas():
update_file(
'types/sondering/xsd_{}.xml'.format(xsd_schema.split('/')[-1]),
xsd_schema)
# types/interpretaties/informele_stratigrafie
update_file('types/interpretaties/informele_stratigrafie'
'/informele_stratigrafie.xml',
build_dov_url('data/interpretatie/1962-101692.xml'))
update_file('types/interpretaties/informele_stratigrafie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':informele_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1962-101692%27'))
update_file('types/interpretaties/informele_stratigrafie/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':informele_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1962-101692%27'),
get_first_featuremember)
update_file(
'types/interpretaties/informele_stratigrafie/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=b6c651f9-5972-4252-ae10-ad69ad08e78d'))
update_file('types/interpretaties/informele_stratigrafie/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=bd171ea4-2509-478d-a21c'
'-c2728d3a9051'))
update_file(
'types/interpretaties/informele_stratigrafie/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/informele_stratigrafie/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in InformeleStratigrafie.get_xsd_schemas():
update_file(
'types/interpretaties/informele_stratigrafie/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/formele_stratigrafie
update_file('types/interpretaties/formele_stratigrafie'
'/formele_stratigrafie.xml',
build_dov_url('data/interpretatie/2011-249333.xml'))
update_file('types/interpretaties/formele_stratigrafie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':formele_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2011-249333%27'))
update_file('types/interpretaties/formele_stratigrafie/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':formele_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2011-249333%27'),
get_first_featuremember)
update_file(
'types/interpretaties/formele_stratigrafie/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=68405b5d-51e6-44d0-b634-b580bc2f9eb6'))
update_file('types/interpretaties/formele_stratigrafie/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=212af8cd-bffd-423c-9d2b'
'-69c544ab3b04'))
update_file(
'types/interpretaties/formele_stratigrafie/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/formele_stratigrafie/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in FormeleStratigrafie.get_xsd_schemas():
update_file(
'types/interpretaties/formele_stratigrafie/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/hydrogeologische_stratigrafie
update_file('types/interpretaties/hydrogeologische_stratigrafie'
'/hydrogeologische_stratigrafie.xml',
build_dov_url('data/interpretatie/2001-186543.xml'))
update_file('types/interpretaties/hydrogeologische_stratigrafie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':hydrogeologische_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2001-186543%27'))
update_file('types/interpretaties/hydrogeologische_stratigrafie'
'/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':hydrogeologische_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data/'
'interpretatie/2001-186543%27'),
get_first_featuremember)
update_file(
'types/interpretaties/hydrogeologische_stratigrafie/'
'fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=b89e72de-35a9-4bca-8d0b-712d1e881ea6'))
update_file('types/interpretaties/hydrogeologische_stratigrafie/'
'md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=25c5d9fa-c2ba-4184-b796'
'-fde790e73d39'))
update_file(
'types/interpretaties/hydrogeologische_stratigrafie/'
'wfsdescribefeaturetype.xml',
build_dov_url('geoserver/interpretaties'
'/hydrogeologische_stratigrafie/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in HydrogeologischeStratigrafie.get_xsd_schemas():
update_file(
'types/interpretaties/hydrogeologische_stratigrafie/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/lithologische_beschrijvingen
update_file('types/interpretaties/lithologische_beschrijvingen'
'/lithologische_beschrijvingen.xml',
build_dov_url('data/interpretatie/1958-003925.xml'))
update_file('types/interpretaties/lithologische_beschrijvingen'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':lithologische_beschrijvingen&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1958-003925%27'))
update_file('types/interpretaties/lithologische_beschrijvingen/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':lithologische_beschrijvingen&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1958-003925%27'),
get_first_featuremember)
update_file(
'types/interpretaties/lithologische_beschrijvingen/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=2450d592-29bc-4970-a89f-a7b14bd38dc2'))
update_file('types/interpretaties/lithologische_beschrijvingen/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=45b5610e-9a66-42bd-b920'
'-af099e399f3b'))
update_file(
'types/interpretaties/lithologische_beschrijvingen/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/lithologische_beschrijvingen/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in LithologischeBeschrijvingen.get_xsd_schemas():
update_file(
'types/interpretaties/lithologische_beschrijvingen/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/gecodeerde_lithologie
update_file('types/interpretaties/gecodeerde_lithologie'
'/gecodeerde_lithologie.xml',
build_dov_url('data/interpretatie/2001-046845.xml'))
update_file('types/interpretaties/gecodeerde_lithologie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':gecodeerde_lithologie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2001-046845%27'))
update_file('types/interpretaties/gecodeerde_lithologie/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':gecodeerde_lithologie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2001-046845%27'),
get_first_featuremember)
update_file(
'types/interpretaties/gecodeerde_lithologie/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=0032241d-8920-415e-b1d8-fa0a48154904'))
update_file('types/interpretaties/gecodeerde_lithologie/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=35d630e4-9145-46f9-b7dc'
'-da290a0adc55'))
update_file(
'types/interpretaties/gecodeerde_lithologie/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/gecodeerde_lithologie/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in GecodeerdeLithologie.get_xsd_schemas():
update_file(
'types/interpretaties/gecodeerde_lithologie/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/geotechnische_codering
update_file('types/interpretaties/geotechnische_codering'
'/geotechnische_codering.xml',
build_dov_url('data/interpretatie/2016-298511.xml'))
update_file('types/interpretaties/geotechnische_codering'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':geotechnische_coderingen&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2016-298511%27'))
update_file('types/interpretaties/geotechnische_codering/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':geotechnische_coderingen&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2016-298511%27'),
get_first_featuremember)
update_file(
'types/interpretaties/geotechnische_codering/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=85404aa6-2d88-46f6-ae5a-575aece71efd'))
update_file('types/interpretaties/geotechnische_codering/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=6a3dc5d4-0744-4d9c-85ce'
'-da50913851cc'))
update_file(
'types/interpretaties/geotechnische_codering/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/geotechnische_coderingen/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in GeotechnischeCodering.get_xsd_schemas():
update_file(
'types/interpretaties/geotechnische_codering/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/interpretaties/informele_hydrogeologische_stratigrafie
update_file('types/interpretaties/informele_hydrogeologische_stratigrafie'
'/informele_hydrogeologische_stratigrafie.xml',
build_dov_url('data/interpretatie/2003-297774.xml'))
update_file('types/interpretaties/informele_hydrogeologische_stratigrafie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':informele_hydrogeologische_stratigrafie&maxFeatures=1'
'&CQL_Filter=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2003-297774%27'))
update_file('types/interpretaties/informele_hydrogeologische_stratigrafie'
'/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':informele_hydrogeologische_stratigrafie&maxFeatures=1'
'&CQL_Filter=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/2003-297774%27'),
get_first_featuremember)
update_file(
'types/interpretaties/informele_hydrogeologische_stratigrafie'
'/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=69f71840-bd29-4b59-9b02-4e36aafaa041'))
update_file('types/interpretaties/informele_hydrogeologische_stratigrafie'
'/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full'
'&id=ca1d704a-cdee-4968-aa65-9c353863e4b1'))
update_file(
'types/interpretaties/informele_hydrogeologische_stratigrafie/'
'wfsdescribefeaturetype.xml',
build_dov_url('geoserver/interpretaties'
'/informele_hydrogeologische_stratigrafie/'
'ows?service=wfs&version=1.1.0&request=DescribeFeatureType'))
for xsd_schema in InformeleHydrogeologischeStratigrafie.get_xsd_schemas():
update_file(
'types/interpretaties/informele_hydrogeologische_stratigrafie/'
'xsd_%s.xml' % xsd_schema.split('/')[-1], xsd_schema)
# types/grondwaterfilter
update_file('types/grondwaterfilter/grondwaterfilter.xml',
build_dov_url('data/filter/2003-004471.xml'))
update_file('types/grondwaterfilter/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'gw_meetnetten:meetnetten&maxFeatures=1&'
'CQL_Filter=filterfiche=%27' + build_dov_url(
'data/filter/2003-004471%27')))
update_file('types/grondwaterfilter/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'gw_meetnetten:meetnetten&maxFeatures=1&'
'CQL_Filter=filterfiche=%27' + build_dov_url(
'data/filter/2003-004471%27')),
get_first_featuremember)
update_file('types/grondwaterfilter/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=b142965f-b2aa-429e-86ff'
'-a7cb0e065d48'))
update_file('types/grondwaterfilter/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=6c39d716-aecc-4fbc-bac8'
'-4f05a49a78d5'))
update_file('types/grondwaterfilter/wfsdescribefeaturetype.xml',
build_dov_url('geoserver/gw_meetnetten/'
'meetnetten/ows?service=wfs&version=1.1.0&'
'request=DescribeFeatureType'))
for xsd_schema in GrondwaterFilter.get_xsd_schemas():
update_file(
'types/grondwaterfilter/xsd_%s.xml' % xsd_schema.split('/')[-1],
xsd_schema)
# types/grondwatermonster
update_file('types/grondwatermonster/grondwatermonster.xml',
build_dov_url('data/watermonster/2006-115684.xml'))
update_file('types/grondwatermonster/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'gw_meetnetten:grondwatermonsters&maxFeatures=1&'
'CQL_Filter=grondwatermonsterfiche=%27' + build_dov_url(
'data/watermonster/2006-115684') + '%27'))
update_file('types/grondwatermonster/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'gw_meetnetten:grondwatermonsters&maxFeatures=1&'
'CQL_Filter=grondwatermonsterfiche=%27' + build_dov_url(
'data/watermonster/2006-115684') + '%27'),
get_first_featuremember)
update_file('types/grondwatermonster/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&'
'id=639c9612-4bbb-4826-86fd-fec9afd49bf7'))
update_file('types/grondwatermonster/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&'
'id=0b378716-39fb-4151-96c5-2021672f4762'))
update_file('types/grondwatermonster/wfsdescribefeaturetype.xml',
build_dov_url('geoserver/gw_meetnetten/'
'grondwatermonsters/ows?service=wfs&version=1.1.0&'
'request=DescribeFeatureType'))
for xsd_schema in GrondwaterMonster.get_xsd_schemas():
update_file(
'types/grondwatermonster/xsd_%s.xml' % xsd_schema.split('/')[-1],
xsd_schema)
# util/owsutil
update_file('util/owsutil/fc_featurecatalogue_notfound.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=badfc000-0000-0000-0000'
'-badfc00badfc'))
update_file('util/owsutil/wfscapabilities.xml',
build_dov_url('geoserver/wfs?request'
'=getcapabilities&service=wfs&version=1.1.0'))
# types/interpretaties/quartaire_stratigrafie
update_file('types/interpretaties/quartaire_stratigrafie'
'/quartaire_stratigrafie.xml',
build_dov_url('data/interpretatie/1999-057087.xml'))
update_file('types/interpretaties/quartaire_stratigrafie'
'/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':quartaire_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1999-057087%27'))
update_file('types/interpretaties/quartaire_stratigrafie/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName=interpretaties'
':quartaire_stratigrafie&maxFeatures=1&CQL_Filter'
'=Interpretatiefiche=%27') + build_dov_url('data'
'/interpretatie/1999-057087%27'),
get_first_featuremember)
update_file(
'types/interpretaties/quartaire_stratigrafie/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=d40ef884-3278-45db-ad69-2c2a8c3981c3'))
update_file('types/interpretaties/quartaire_stratigrafie/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&id=8b204ed6-e44c-4567-bbe8'
'-bd427eba082c'))
update_file(
'types/interpretaties/quartaire_stratigrafie/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/interpretaties'
'/quartaire_stratigrafie/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in QuartairStratigrafie.get_xsd_schemas():
update_file(
'types/interpretaties/quartaire_stratigrafie/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
# types/grondmonster
update_file('types/grondmonster/grondmonster.xml',
build_dov_url('data/grondmonster/2017-168758.xml'))
update_file('types/grondmonster/wfsgetfeature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'boringen:grondmonsters&maxFeatures=1&CQL_Filter'
'=grondmonsterfiche=%27' + build_dov_url('data'
'/grondmonster/2017-168758') + '%27'))
update_file('types/grondmonster/feature.xml',
build_dov_url('geoserver/ows?service=WFS'
'&version=1.1.0&request=GetFeature&typeName='
'boringen:grondmonsters&maxFeatures=1&CQL_Filter'
'=grondmonsterfiche=%27' + build_dov_url('data'
'/grondmonster/2017-168758') + '%27'),
get_first_featuremember)
update_file(
'types/grondmonster/fc_featurecatalogue.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gfc'
'&elementSetName=full&id=b9338fb5-fc9c-4229-858b-06a5fa3ee49d'))
update_file('types/grondmonster/md_metadata.xml',
build_dov_url('geonetwork/srv/dut/csw'
'?Service=CSW&Request=GetRecordById&Version=2.0.2'
'&outputSchema=http://www.isotc211.org/2005/gmd'
'&elementSetName=full&'
'id=6edeab46-2cfc-4aa2-ae03-307d772f34ae'))
update_file(
'types/grondmonster/wfsdescribefeaturetype'
'.xml',
build_dov_url('geoserver/boringen'
'/grondmonsters/ows?service=wfs&version=1.1.0&request'
'=DescribeFeatureType'))
for xsd_schema in Grondmonster.get_xsd_schemas():
update_file(
'types/grondmonster/xsd_%s.xml' %
xsd_schema.split('/')[-1], xsd_schema)
```
#### File: pydov/tests/test_search.py
```python
import glob
from io import open
import pytest
import owslib
import pydov
from owslib.etree import etree
from owslib.fes import (
SortBy,
SortProperty,
And,
)
from owslib.wfs import WebFeatureService
from pydov.search.boring import BoringSearch
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from numpy.compat import unicode
from pydov.search.grondwatermonster import GrondwaterMonsterSearch
from pydov.search.interpretaties import (
InformeleStratigrafieSearch,
FormeleStratigrafieSearch,
InformeleHydrogeologischeStratigrafieSearch,
GeotechnischeCoderingSearch,
QuartairStratigrafieSearch,
)
from pydov.search.interpretaties import HydrogeologischeStratigrafieSearch
from pydov.search.interpretaties import GecodeerdeLithologieSearch
from pydov.search.interpretaties import LithologischeBeschrijvingenSearch
from pydov.search.sondering import SonderingSearch
from pydov.search.grondmonster import GrondmonsterSearch
from pydov.util.dovutil import build_dov_url
from pydov.util.errors import (
InvalidSearchParameterError,
)
from pydov.util.location import (
WithinDistance,
Point,
)
from tests.abstract import service_ok
search_objects = [BoringSearch(),
SonderingSearch(),
GrondwaterFilterSearch(),
GrondwaterMonsterSearch(),
FormeleStratigrafieSearch(),
InformeleHydrogeologischeStratigrafieSearch(),
GeotechnischeCoderingSearch(),
QuartairStratigrafieSearch(),
InformeleStratigrafieSearch(),
HydrogeologischeStratigrafieSearch(),
GecodeerdeLithologieSearch(),
LithologischeBeschrijvingenSearch(),
GrondmonsterSearch()]
@pytest.fixture(scope='module')
def mp_wfs(monkeymodule):
"""Monkeypatch the call to the remote GetCapabilities request.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
"""
def read(*args, **kwargs):
with open('tests/data/util/owsutil/wfscapabilities.xml', 'r',
encoding='utf-8') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
data = etree.fromstring(data)
return data
monkeymodule.setattr(
owslib.feature.common.WFSCapabilitiesReader, 'read', read)
@pytest.fixture(scope='module')
def wfs(mp_wfs):
"""PyTest fixture providing an instance of a WebFeatureService based on
a local copy of a GetCapabilities request.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
Returns
-------
owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
"""
return WebFeatureService(
url=build_dov_url('geoserver/wfs'), version="1.1.0")
@pytest.fixture()
def mp_remote_fc_notfound(monkeypatch):
"""Monkeypatch the call to get an inexistent remote featurecatalogue.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def __get_remote_fc(*args, **kwargs):
with open('tests/data/util/owsutil/fc_featurecatalogue_notfound.xml',
'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.owsutil, '__get_remote_fc', __get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_md(wfs, monkeymodule, request):
"""Monkeypatch the call to get the remote metadata of the layer.
This monkeypatch requires a module variable ``location_md_metadata``
with the path to the md_metadata file on disk.
Parameters
----------
wfs : pytest.fixture returning owslib.wfs.WebFeatureService
WebFeatureService based on the local GetCapabilities.
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_md(*args, **kwargs):
file_path = getattr(request.module, "location_md_metadata")
with open(file_path, 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil, '__get_remote_md',
__get_remote_md)
@pytest.fixture(scope='module')
def mp_remote_fc(monkeymodule, request):
"""Monkeypatch the call to get the remote feature catalogue.
This monkeypatch requires a module variable
``location_fc_featurecatalogue`` with the path to the fc_featurecatalogue
file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_fc(*args, **kwargs):
file_path = getattr(request.module, "location_fc_featurecatalogue")
with open(file_path, 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil, '__get_remote_fc',
__get_remote_fc)
@pytest.fixture(scope='module')
def mp_remote_describefeaturetype(monkeymodule, request):
"""Monkeypatch the call to a remote DescribeFeatureType.
This monkeypatch requires a module variable
``location_wfs_describefeaturetype`` with the path to the
wfs_describefeaturetype file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_describefeaturetype(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_describefeaturetype")
with open(file_path, 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil,
'__get_remote_describefeaturetype',
__get_remote_describefeaturetype)
@pytest.fixture(scope='module')
def wfs_getfeature(request):
"""PyTest fixture providing a WFS GetFeature response.
This monkeypatch requires a module variable ``location_wfs_getfeature``
with the path to the wfs_getfeature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
str
WFS response of a GetFeature call to the dov-pub:Boringen layer.
"""
file_path = getattr(request.module, "location_wfs_getfeature")
with open(file_path, 'r') as f:
data = f.read()
return data
@pytest.fixture(scope='module')
def wfs_feature(request):
"""PyTest fixture providing an XML of a WFS feature element.
This monkeypatch requires a module variable ``location_wfs_feature``
with the path to the wfs_feature file on disk.
Parameters
----------
request : pytest.fixtue
PyTest fixture providing request context.
Returns
-------
etree.Element
XML element representing a single record of the Boring WFS layer.
"""
file_path = getattr(request.module, "location_wfs_feature")
with open(file_path, 'r') as f:
return etree.fromstring(f.read())
@pytest.fixture(scope='module')
def mp_remote_wfs_feature(monkeymodule, request):
"""Monkeypatch the call to get WFS features.
This monkeypatch requires a module variable ``location_wfs_getfeature``
with the path to the wfs_getfeature file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def __get_remote_wfs_feature(*args, **kwargs):
file_path = getattr(request.module, "location_wfs_getfeature")
with open(file_path, 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.util.owsutil,
'wfs_get_feature',
__get_remote_wfs_feature)
@pytest.fixture(scope='module')
def mp_dov_xml(monkeymodule, request):
"""Monkeypatch the call to get the remote XML data.
This monkeypatch requires a module variable ``location_dov_xml``
with the path to the dov_xml file on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def _get_xml_data(*args, **kwargs):
file_path = getattr(request.module, "location_dov_xml")
with open(file_path, 'r', encoding="utf-8") as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeymodule.setattr(pydov.types.abstract.AbstractDovType,
'_get_xml_data', _get_xml_data)
@pytest.fixture()
def mp_dov_xml_broken(monkeypatch):
"""Monkeypatch the call to break the fetching of remote XML data.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def _get_xml_data(*args, **kwargs):
raise RuntimeError
monkeypatch.setattr(pydov.types.abstract.AbstractDovType,
'_get_xml_data', _get_xml_data)
@pytest.fixture()
def mp_remote_xsd(monkeymodule, request):
"""Monkeypatch the call to get the remote XSD schemas.
This monkeypatch requires a module variable ``location_xsd_base``
with a glob expression to the XSD file(s) on disk.
Parameters
----------
monkeymodule : pytest.fixture
PyTest monkeypatch fixture with module scope.
request : pytest.fixtue
PyTest fixture providing request context.
"""
def _get_remote_xsd(*args, **kwargs):
xsd_base_path = getattr(request.module, "location_xsd_base")
schemas = []
for xsd_file in glob.glob(xsd_base_path):
with open(xsd_file, 'r', encoding="utf-8") as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
schemas.append(etree.fromstring(data))
return schemas
monkeymodule.setattr(pydov.search.abstract.AbstractSearch,
'_get_remote_xsd_schemas', _get_remote_xsd)
@pytest.mark.parametrize("objectsearch", search_objects)
def test_get_description(mp_wfs, objectsearch):
"""Test the get_description method.
Test whether the method returns a non-empty string.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
description = objectsearch.get_description()
assert type(description) in (str, unicode)
assert len(description) > 0
@pytest.mark.online
@pytest.mark.skipif(not service_ok(), reason="DOV service is unreachable")
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_location(objectsearch):
"""Test the get_description method.
Test whether the method returns a non-empty string.
Parameters
----------
mp_wfs : pytest.fixture
Monkeypatch the call to the remote GetCapabilities request.
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
objectsearch.search(location=WithinDistance(Point(100000, 100000), 100))
@pytest.mark.online
@pytest.mark.skipif(not service_ok(), reason="DOV service is unreachable")
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_maxfeatures(objectsearch):
"""Test the search method with a max_features parameter.
Test whether no error is raised.
Parameters
----------
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
objectsearch.search(location=WithinDistance(Point(100000, 100000), 100),
max_features=10)
@pytest.mark.online
@pytest.mark.skipif(not service_ok(), reason="DOV service is unreachable")
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_maxfeatures_only(objectsearch):
"""Test the search method with only the max_features parameter.
Test whether no error is raised.
Parameters
----------
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
objectsearch.search(max_features=1)
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_nolocation_noquery(objectsearch):
"""Test the search method without providing a location or a query.
Test whether an InvalidSearchParameterError is raised.
Parameters
----------
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
with pytest.raises(InvalidSearchParameterError):
objectsearch.search(location=None, query=None)
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_both_location_query_wrongquerytype(objectsearch):
"""Test the search method providing both a location and a query,
using a query with an invalid type.
Test whether an InvalidSearchParameterError is raised.
Parameters
----------
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
with pytest.raises(InvalidSearchParameterError):
objectsearch.search(location=(1, 2, 3, 4),
query='computer says no')
@pytest.mark.parametrize("objectsearch", search_objects)
def test_search_query_wrongtype(objectsearch):
"""Test the search method with the query parameter using a wrong
query type.
Test whether an InvalidSearchParameterError is raised.
Parameters
----------
objectsearch : pytest.fixture
An instance of a subclass of AbstractTestSearch to perform search
operations on the corresponding DOV type.
"""
with pytest.raises(InvalidSearchParameterError):
objectsearch.search(query='computer says no')
```
#### File: pydov/tests/test_types_grondwatermonster.py
```python
from pydov.types.grondwatermonster import GrondwaterMonster
from pydov.util.dovutil import build_dov_url
from tests.abstract import AbstractTestTypes
from tests.test_search_grondwatermonster import (
wfs_getfeature,
wfs_feature,
mp_dov_xml,
location_wfs_getfeature,
location_wfs_feature,
location_dov_xml,
)
class TestGrondwaterMonster(AbstractTestTypes):
"""Class grouping tests for the
pydov.types.grondwaterfilter.GrondwaterFilter class."""
def get_type(self):
"""Get the class reference for this datatype.
Returns
-------
pydov.types.grondwatermonster.GrondwaterMonster
Class reference for the GrondwaterMonster class.
"""
return GrondwaterMonster
def get_namespace(self):
"""Get the WFS namespace associated with this datatype.
Returns
-------
str
WFS namespace for this type.
"""
return 'http://dov.vlaanderen.be/grondwater/gw_meetnetten'
def get_pkey_base(self):
"""Get the base URL for the permanent keys of this datatype.
Returns
-------
str
Base URL for the permanent keys of this datatype. For example
"https://www.dov.vlaanderen.be/data/boring/"
"""
return build_dov_url('data/watermonster/')
def get_field_names(self):
"""Get the field names for this type
Returns
-------
list
List of field names.
"""
return ['pkey_grondwatermonster', 'grondwatermonsternummer',
'pkey_grondwaterlocatie', 'gw_id', 'pkey_filter',
'filternummer', 'x', 'y', 'start_grondwaterlocatie_mtaw',
'gemeente', 'datum_monstername', 'parametergroep',
'parameter', 'detectie', 'waarde', 'eenheid', 'veld_labo']
def get_field_names_subtypes(self):
"""Get the field names of this type that originate from subtypes only.
Returns
-------
list<str>
List of field names from subtypes.
"""
return ['parametergroep', 'parameter', 'detectie',
'waarde', 'eenheid', 'veld_labo']
def get_field_names_nosubtypes(self):
"""Get the field names for this type, without including fields from
subtypes.
Returns
-------
list<str>
List of field names.
"""
return ['pkey_grondwatermonster', 'grondwatermonsternummer',
'pkey_grondwaterlocatie', 'gw_id', 'pkey_filter',
'filternummer', 'x', 'y', 'start_grondwaterlocatie_mtaw',
'gemeente', 'datum_monstername']
def get_valid_returnfields(self):
"""Get a list of valid return fields from the main type.
Returns
-------
tuple
A tuple containing only valid return fields.
"""
return ('y', 'gemeente')
def get_valid_returnfields_subtype(self):
"""Get a list of valid return fields, including fields from a subtype.
Returns
-------
tuple
A tuple containing valid return fields, including fields from a
subtype.
"""
return ('pkey_filter', 'pkey_grondwatermonster', 'eenheid')
def get_inexistent_field(self):
"""Get the name of a field that doesn't exist.
Returns
-------
str
The name of an inexistent field.
"""
return 'onbestaand'
```
#### File: pydov/tests/test_util_caching.py
```python
import datetime
import gzip
import os
import tempfile
from io import open
import time
import pytest
import pydov
from pydov.util.caching import (
PlainTextFileCache,
GzipTextFileCache,
)
from pydov.util.dovutil import build_dov_url
@pytest.fixture
def mp_remote_xml(monkeypatch):
"""Monkeypatch the call to get the remote Boring XML data.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def _get_remote_data(*args, **kwargs):
with open('tests/data/types/boring/boring.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.caching.AbstractFileCache,
'_get_remote', _get_remote_data)
@pytest.fixture
def plaintext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Paramaters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
plaintext_cache = PlainTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = plaintext_cache
yield plaintext_cache
plaintext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def gziptext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Paramaters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
gziptext_cache = GzipTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = gziptext_cache
yield gziptext_cache
gziptext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def nocache():
"""Fixture to temporarily disable caching."""
orig_cache = pydov.cache
pydov.cache = None
yield
pydov.cache = orig_cache
class TestPlainTextFileCacheCache(object):
"""Class grouping tests for the pydov.util.caching.PlainTextFileCache
class."""
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_clean(self, plaintext_cache, mp_remote_xml):
"""Test the clean method.
Test whether the cached file and the cache directory are nonexistent
after the clean method has been called.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
plaintext_cache.clean()
assert os.path.exists(cached_file)
assert os.path.exists(plaintext_cache.cachedir)
time.sleep(1.5)
plaintext_cache.clean()
assert not os.path.exists(cached_file)
assert os.path.exists(plaintext_cache.cachedir)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_remove(self, plaintext_cache, mp_remote_xml):
"""Test the remove method.
Test whether the cache directory is nonexistent after the remove
method has been called.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
plaintext_cache.remove()
assert not os.path.exists(cached_file)
assert not os.path.exists(plaintext_cache.cachedir)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_save(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_reuse(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache and reused in a
second function call.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(0.5)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we didn't redownload the file:
assert os.path.getmtime(cached_file) == first_download_time
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_invalid(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache not reused if the
second function call is after the maximum age of the cached file.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(1.5)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we did redownload the file, since original is invalid now:
assert os.path.getmtime(cached_file) > first_download_time
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_save_content(self, plaintext_cache, mp_remote_xml):
"""Test whether the data is saved in the cache.
Test if the contents of the saved document are the same as the
original data.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r',
encoding='utf-8') as ref:
ref_data = ref.read()
with open(cached_file, 'r', encoding='utf-8') as cached:
cached_data = cached.read()
assert cached_data == ref_data
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_reuse_content(self, plaintext_cache, mp_remote_xml):
"""Test whether the saved data is reused.
Test if the contents returned by the cache are the same as the
original data.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r') as ref:
ref_data = ref.read().encode('utf-8')
cached_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert cached_data == ref_data
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_return_type(self, plaintext_cache, mp_remote_xml):
"""The the return type of the get method.
Test wether the get method returns the data in the same datatype (
i.e. bytes) regardless of the data was cached or not.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
ref_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(ref_data) is bytes
assert os.path.exists(cached_file)
cached_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(cached_data) is bytes
class TestGzipTextFileCacheCache(object):
"""Class grouping tests for the pydov.util.caching.PlainTextFileCache
class."""
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_clean(self, gziptext_cache, mp_remote_xml):
"""Test the clean method.
Test whether the cached file and the cache directory are nonexistent
after the clean method has been called.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
gziptext_cache.clean()
assert os.path.exists(cached_file)
assert os.path.exists(gziptext_cache.cachedir)
time.sleep(1.5)
gziptext_cache.clean()
assert not os.path.exists(cached_file)
assert os.path.exists(gziptext_cache.cachedir)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_remove(self, gziptext_cache, mp_remote_xml):
"""Test the remove method.
Test whether the cache directory is nonexistent after the remove
method has been called.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
gziptext_cache.remove()
assert not os.path.exists(cached_file)
assert not os.path.exists(gziptext_cache.cachedir)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_save(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_reuse(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache and reused in a
second function call.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(0.5)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we didn't redownload the file:
assert os.path.getmtime(cached_file) == first_download_time
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_invalid(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache not reused if the
second function call is after the maximum age of the cached file.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(1.5)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we did redownload the file, since original is invalid now:
assert os.path.getmtime(cached_file) > first_download_time
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_save_content(self, gziptext_cache, mp_remote_xml):
"""Test whether the data is saved in the cache.
Test if the contents of the saved document are the same as the
original data.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r',
encoding='utf-8') as ref:
ref_data = ref.read()
with gzip.open(cached_file, 'rb') as cached:
cached_data = cached.read().decode('utf-8')
assert cached_data == ref_data
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_reuse_content(self, gziptext_cache, mp_remote_xml):
"""Test whether the saved data is reused.
Test if the contents returned by the cache are the same as the
original data.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r') as ref:
ref_data = ref.read().encode('utf-8')
cached_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert cached_data == ref_data
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_return_type(self, gziptext_cache, mp_remote_xml):
"""The the return type of the get method.
Test wether the get method returns the data in the same datatype (
i.e. bytes) regardless of the data was cached or not.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
ref_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(ref_data) is bytes
assert os.path.exists(cached_file)
cached_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(cached_data) is bytes
``` |
{
"source": "jorisvandenbossche/pygeos",
"score": 3
} |
#### File: pygeos/pygeos/measurements.py
```python
from . import ufuncs
__all__ = ["area", "distance", "length", "hausdorff_distance"]
def area(geometries):
return ufuncs.area(geometries)
def distance(a, b):
return ufuncs.distance(a, b)
def length(geometries):
return ufuncs.length(geometries)
def hausdorff_distance(a, b, densify=None):
if densify is None:
return ufuncs.hausdorff_distance(a, b)
else:
return ufuncs.haussdorf_distance_densify(a, b, densify)
```
#### File: pygeos/test/test_geometry.py
```python
import numpy as np
import pygeos
import pytest
from .common import line_string
from .common import point
from .common import point_z
from .common import all_types
def test_get_type_id():
assert pygeos.get_type_id(all_types).tolist()[:-1] == list(range(8))
def test_get_num_points():
assert pygeos.get_num_points(line_string) == 3
def test_get_point():
actual = pygeos.get_point(line_string, 1)
assert pygeos.equals(actual, pygeos.points(1, 0))
def test_get_set_srid():
actual = pygeos.set_srid(point, 4326)
assert pygeos.get_srid(point) == 0
assert pygeos.get_srid(actual) == 4326
def test_new_from_wkt():
geom = point
actual = pygeos.Geometry(geom.to_wkt())
assert pygeos.equals(actual, geom)
def test_new_from_wkb():
geom = point
actual = pygeos.Geometry(geom.to_wkb())
assert pygeos.equals(actual, geom)
def test_adapt_ptr_raises():
geom = pygeos.clone(point)
with pytest.raises(AttributeError):
geom._ptr += 1
def test_to_wkt():
assert point.to_wkt() == "POINT (2 2)"
assert point.to_wkt(trim=False) == "POINT (2.000000 2.000000)"
assert point.to_wkt(trim=False, precision=3) == "POINT (2.000 2.000)"
assert point_z.to_wkt(dimension=2) == "POINT (1 1)"
assert point_z.to_wkt(dimension=3) == "POINT Z (1 1 1)"
assert point_z.to_wkt(dimension=3, use_old_3d=True) == "POINT (1 1 1)"
def test_to_wkb():
be = b"\x00"
le = b"\x01"
point_type = b"\x01\x00\x00\x00" # 1 as 32-bit uint (LE)
point_type_3d = b"\x01\x00\x00\x80"
coord = b"\x00\x00\x00\x00\x00\x00\xf0?" # 1.0 as double (LE)
assert point_z.to_wkb(dimension=2) == le + point_type + 2 * coord
assert point_z.to_wkb(dimension=3) == le + point_type_3d + 3 * coord
assert (
point_z.to_wkb(dimension=2, byte_order=0)
== be + point_type[::-1] + 2 * coord[::-1]
)
def test_to_wkb_with_srid():
point_with_srid = pygeos.set_srid(point, np.int32(4326))
result = point_with_srid.to_wkb(include_srid=True)
assert np.frombuffer(result[5:9], "<u4").item() == 4326
def test_to_wkb_hex():
le = b"01"
point_type = b"01000000"
coord = b"000000000000F03F" # 1.0 as double (LE)
assert point_z.to_wkb(hex=True, dimension=2) == le + point_type + 2 * coord
@pytest.mark.parametrize("geom", all_types)
def test_from_wkt(geom):
wkt = geom.to_wkt()
actual = pygeos.Geometry.from_wkt(wkt)
assert pygeos.equals(actual, geom)
def test_from_wkt_bytes():
actual = pygeos.Geometry.from_wkt(b"POINT (2 2)")
assert pygeos.equals(actual, point)
def test_from_wkt_exceptions():
with pytest.raises(TypeError):
pygeos.Geometry.from_wkt(list("POINT (2 2)"))
with pytest.raises(TypeError):
pygeos.Geometry.from_wkt(None)
with pytest.raises(pygeos.GEOSException):
pygeos.Geometry.from_wkt("")
with pytest.raises(pygeos.GEOSException):
pygeos.Geometry.from_wkt("NOT A WKT STRING")
@pytest.mark.parametrize("geom", all_types)
@pytest.mark.parametrize("use_hex", [False, True])
@pytest.mark.parametrize("byte_order", [0, 1])
def test_from_wkb(geom, use_hex, byte_order):
wkb = geom.to_wkb(hex=use_hex, byte_order=byte_order)
actual = pygeos.Geometry.from_wkb(wkb)
assert pygeos.equals(actual, geom)
def test_from_wkb_typeerror():
with pytest.raises(TypeError):
pygeos.Geometry.from_wkb("\x01")
with pytest.raises(TypeError):
pygeos.Geometry.from_wkb(None)
with pytest.raises(pygeos.GEOSException):
pygeos.Geometry.from_wkb(b"POINT (2 2)")
```
#### File: pygeos/test/test_linear.py
```python
import pygeos
import numpy as np
def test_project():
line = pygeos.linestrings([[0, 0], [1, 1], [2, 2]])
points = pygeos.points([1, 3], [0, 3])
actual = pygeos.project(line, points)
expected = [0.5 * 2 ** 0.5, 2 * 2 ** 0.5]
np.testing.assert_allclose(actual, expected)
``` |
{
"source": "jorisvandenbossche/ramp-board",
"score": 2
} |
#### File: ramp-board/databoard/deploy.py
```python
import os
from shutil import rmtree
from databoard import (db, deployment_path, ramp_config, ramp_data_path,
ramp_kits_path)
def recreate_db():
"""Initialisation of a test database."""
db.session.close()
db.drop_all()
db.create_all()
print(db)
def deploy():
if os.getenv('DATABOARD_STAGE') in ['TEST', 'TESTING']:
rmtree(deployment_path)
os.makedirs(deployment_path)
os.system('rsync -rultv fabfile.py {}'.format(deployment_path))
os.makedirs(ramp_kits_path)
os.makedirs(ramp_data_path)
os.makedirs(
os.path.join(deployment_path, ramp_config['submissions_dir'])
)
recreate_db()
else:
raise AttributeError('DATABOARD_STAGE should be set to TESTING for '
'`deploy` to work')
``` |
{
"source": "jorisvandenbossche/scipy-lecture-notes",
"score": 4
} |
#### File: mathematical_optimization/examples/cost_functions.py
```python
import numpy as np
###############################################################################
# Gaussian functions with varying conditionning
def gaussian(x):
return np.exp(-np.sum(x**2))
def gaussian_prime(x):
return -2*x*np.exp(-np.sum(x**2))
def gaussian_prime_prime(x):
return -2*np.exp(-x**2) + 4*x**2*np.exp(-x**2)
def mk_gauss(epsilon, ndim=2):
def f(x):
x = np.asarray(x)
y = x.copy()
y *= np.power(epsilon, np.arange(ndim))
return -gaussian(.5*y) + 1
def f_prime(x):
x = np.asarray(x)
y = x.copy()
scaling = np.power(epsilon, np.arange(ndim))
y *= scaling
return -.5*scaling*gaussian_prime(.5*y)
def hessian(x):
epsilon = .07
x = np.asarray(x)
y = x.copy()
scaling = np.power(epsilon, np.arange(ndim))
y *= .5*scaling
H = -.25*np.ones((ndim, ndim))*gaussian(y)
d = 4*y*y[:, np.newaxis]
d.flat[::ndim+1] += -2
H *= d
return H
return f, f_prime, hessian
###############################################################################
# Quadratic functions with varying conditionning
def mk_quad(epsilon, ndim=2):
def f(x):
x = np.asarray(x)
y = x.copy()
y *= np.power(epsilon, np.arange(ndim))
return .33*np.sum(y**2)
def f_prime(x):
x = np.asarray(x)
y = x.copy()
scaling = np.power(epsilon, np.arange(ndim))
y *= scaling
return .33*2*scaling*y
def hessian(x):
scaling = np.power(epsilon, np.arange(ndim))
return .33*2*np.diag(scaling)
return f, f_prime, hessian
###############################################################################
# Super ill-conditionned problem: the Rosenbrock function
def rosenbrock(x):
y = 4*x
y[0] += 1
y[1:] += 3
return np.sum(.5*(1 - y[:-1])**2 + (y[1:] - y[:-1]**2)**2)
def rosenbrock_prime(x):
y = 4*x
y[0] += 1
y[1:] += 3
xm = y[1:-1]
xm_m1 = y[:-2]
xm_p1 = y[2:]
der = np.zeros_like(y)
der[1:-1] = 2*(xm - xm_m1**2) - 4*(xm_p1 - xm**2)*xm - .5*2*(1 - xm)
der[0] = -4*y[0]*(y[1] - y[0]**2) - .5*2*(1 - y[0])
der[-1] = 2*(y[-1] - y[-2]**2)
return 4*der
def rosenbrock_hessian_(x):
x, y = x
x = 4*x + 1
y = 4*y + 3
return 4*4*np.array((
(1 - 4*y + 12*x**2, -4*x),
( -4*x, 2),
))
def rosenbrock_hessian(x):
y = 4*x
y[0] += 1
y[1:] += 3
H = np.diag(-4*y[:-1], 1) - np.diag(4*y[:-1], -1)
diagonal = np.zeros_like(y)
diagonal[0] = 12*y[0]**2 - 4*y[1] + 2*.5
diagonal[-1] = 2
diagonal[1:-1] = 3 + 12*y[1:-1]**2 - 4*y[2:]*.5
H = H + np.diag(diagonal)
return 4*4*H
###############################################################################
# Helpers to wrap the functions
class LoggingFunction(object):
def __init__(self, function, counter=None):
self.function = function
if counter is None:
counter = list()
self.counter = counter
self.all_x_i = list()
self.all_y_i = list()
self.all_f_i = list()
self.counts = list()
def __call__(self, x0):
x_i, y_i = x0[:2]
self.all_x_i.append(x_i)
self.all_y_i.append(y_i)
f_i = self.function(np.asarray(x0))
self.all_f_i.append(f_i)
self.counter.append('f')
self.counts.append(len(self.counter))
return f_i
class CountingFunction(object):
def __init__(self, function, counter=None):
self.function = function
if counter is None:
counter = list()
self.counter = counter
def __call__(self, x0):
self.counter.append('f_prime')
return self.function(x0)
```
#### File: mathematical_optimization/examples/plot_constraints.py
```python
import numpy as np
import pylab as pl
from scipy import optimize
x, y = np.mgrid[-2.9:5.8:.05, -2.5:5:.05]
x = x.T
y = y.T
for i in (1, 2):
# Create 2 figure: only the second one will have the optimization
# path
pl.figure(i, figsize=(3, 2.5))
pl.clf()
pl.axes([0, 0, 1, 1])
contours = pl.contour(np.sqrt((x - 3)**2 + (y - 2)**2),
extent=[-3, 6, -2.5, 5],
cmap=pl.cm.gnuplot)
pl.clabel(contours,
inline=1,
fmt='%1.1f',
fontsize=14)
pl.plot([-1.5, -1.5, 1.5, 1.5, -1.5],
[-1.5, 1.5, 1.5, -1.5, -1.5], 'k', linewidth=2)
pl.fill_between([ -1.5, 1.5],
[ -1.5, -1.5],
[ 1.5, 1.5],
color='.8')
pl.axvline(0, color='k')
pl.axhline(0, color='k')
pl.text(-.9, 4.4, '$x_2$', size=20)
pl.text(5.6, -.6, '$x_1$', size=20)
pl.axis('equal')
pl.axis('off')
# And now plot the optimization path
accumulator = list()
def f(x):
# Store the list of function calls
accumulator.append(x)
return np.sqrt((x[0] - 3)**2 + (x[1] - 2)**2)
# We don't use the gradient, as with the gradient, L-BFGS is too fast,
# and finds the optimum without showing us a pretty path
def f_prime(x):
r = np.sqrt((x[0] - 3)**2 + (x[0] - 2)**2)
return np.array(((x[0] - 3)/r, (x[0] - 2)/r))
optimize.fmin_l_bfgs_b(f, np.array([0, 0]), approx_grad=1,
bounds=((-1.5, 1.5), (-1.5, 1.5)))
accumulated = np.array(accumulator)
pl.plot(accumulated[:, 0], accumulated[:, 1])
pl.show()
```
#### File: matplotlib/examples/plot_contour.py
```python
import pylab as pl
import numpy as np
def f(x,y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X, Y = np.meshgrid(x, y)
pl.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=pl.cm.hot)
C = pl.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
pl.clabel(C, inline=1, fontsize=10)
pl.xticks(())
pl.yticks(())
pl.text(-0.05, 1.02, " Contour Plot: pl.contour(..)\n",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
bbox=dict(facecolor='white', alpha=1.0, width=400, height=65),
transform=pl.gca().transAxes)
pl.text(-0.05, 1.01, "\n\n Draw contour lines and filled contours ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=pl.gca().transAxes)
pl.show()
```
#### File: matplotlib/examples/plot_imshow_ex.py
```python
import pylab as pl
import numpy as np
def f(x, y):
return (1 - x / 2 + x ** 5 + y ** 3 ) * np.exp(-x ** 2 - y ** 2)
n = 10
x = np.linspace(-3, 3, 3.5 * n)
y = np.linspace(-3, 3, 3.0 * n)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
pl.axes([0.025, 0.025, 0.95, 0.95])
pl.imshow(Z, interpolation='nearest', cmap='bone', origin='lower')
pl.colorbar(shrink=.92)
pl.xticks(())
pl.yticks(())
pl.show()
```
#### File: 3d_plotting/examples/mlab_interactive_dialog.py
```python
import numpy as np
from traits.api import HasTraits, Instance
from traitsui.api import View, Item, HGroup
from mayavi.core.ui.api import SceneEditor, MlabSceneModel
def curve(n_turns):
phi = np.linspace(0, 2*np.pi, 2000)
return [np.cos(phi) * (1 + 0.5*np.cos(n_turns*phi)),
np.sin(phi) * (1 + 0.5*np.cos(n_turns*phi)),
0.5*np.sin(n_turns*phi)]
# The class that contains the dialog
from traits.api import Range, on_trait_change
class Visualization(HasTraits):
n_turns = Range(0, 30, 11)
scene = Instance(MlabSceneModel, ())
def __init__(self):
HasTraits.__init__(self)
x, y, z = curve(self.n_turns)
self.plot = self.scene.mlab.plot3d(x, y, z)
@on_trait_change('n_turns')
def update_plot(self):
x, y, z = curve(self.n_turns)
self.plot.mlab_source.set(x=x, y=y, z=z)
view = View(Item('scene', height=300, show_label=False,
editor=SceneEditor()),
HGroup('n_turns'), resizable=True)
# Fire up the dialog
Visualization().configure_traits()
``` |
{
"source": "jorisvandenbossche/Shapely",
"score": 2
} |
#### File: shapely/geometry/proxy.py
```python
from shapely.geometry.base import EMPTY
from shapely.geos import lgeos
class CachingGeometryProxy(object):
context = None
factory = None
__geom__ = EMPTY
_gtag = None
def __init__(self, context):
self.context = context
@property
def _is_empty(self):
return self.__geom__ in [EMPTY, None]
def empty(self, val=EMPTY):
if not self._is_empty and self.__geom__:
lgeos.GEOSGeom_destroy(self.__geom__)
self.__geom__ = val
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context)
self._gtag = gtag
return self.__geom__
def gtag(self):
return hash(repr(self.context))
class PolygonProxy(CachingGeometryProxy):
@property
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context[0], self.context[1])
self._gtag = gtag
return self.__geom__
``` |
{
"source": "jorisvandenbossche/sphinx-theme-builder",
"score": 2
} |
#### File: sphinx_theme_builder/_internal/nodejs.py
```python
import os
import shlex
import shutil
import subprocess
import sys
import textwrap
from pathlib import Path
from typing import Any, List, Optional
from unittest.mock import patch
from .errors import DiagnosticError
from .passthrough import passthrough_run
from .project import Project
from .ui import log
_NODEENV_DIR = ".nodeenv"
_BIN_DIR = "Scripts" if os.name == "nt" else "bin"
def run_in(
nodeenv: Path, args: List[str], *, production: bool = False, **kwargs: Any
) -> "Optional[subprocess.CompletedProcess[bytes]]":
"""Run a command, using a binary from `nodeenv`."""
assert nodeenv.name == _NODEENV_DIR
log(f"[magenta](nodeenv) $[/] [blue]{' '.join(args)}[/]")
env = {
"NPM_CONFIG_PREFIX": os.fsdecode(nodeenv),
"npm_config_prefix": os.fsdecode(nodeenv),
"NODE_PATH": os.fsdecode(nodeenv / "lib" / "node_modules"),
"PATH": os.pathsep.join([os.fsdecode(nodeenv / "bin"), os.environ["PATH"]]),
"NODE_ENV": "production" if production else "development",
}
# Fully qualify the first argument.
resolved_name = shutil.which(args[0], path=env["PATH"])
if not resolved_name:
raise FileNotFoundError(resolved_name)
args[0] = resolved_name
with patch.dict("os.environ", env):
if not kwargs:
returncode = passthrough_run(args)
if returncode:
cmd = " ".join(shlex.quote(arg) for arg in args)
raise subprocess.CalledProcessError(returncode=returncode, cmd=cmd)
return None
else:
return subprocess.run(args, check=True, **kwargs)
def _run_python_nodeenv(*args: str) -> None:
presentation = ["python", "-m", "nodeenv", *args]
log(f"[magenta]$[/] [blue]{' '.join(presentation)}[/]")
command = [
sys.executable,
"-c",
textwrap.dedent(
"""
import runpy
import rich
import rich.traceback
import urllib.request
rich.traceback.install(
width=rich.get_console().width,
show_locals=True,
suppress=[runpy, urllib.request],
)
runpy.run_module("nodeenv", run_name="__main__", alter_sys=True)
"""
),
"-v",
*args,
]
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError as error:
raise DiagnosticError(
reference="nodeenv-creation-failed",
message="Failed to create a `nodeenv`",
context="See above for failure output from the underlying tooling.",
hint_stmt=(
"A `urllib.error.HTTPError` would indicate mean that the issue is "
"related to the network, or the NodeJS servers, or the node version "
"that this tool is trying to fetch is no longer available."
),
) from error
def _should_use_system_node(node_version: str) -> bool:
try:
process = subprocess.run(["node", "--version"], capture_output=True, check=True)
except FileNotFoundError:
log("[yellow]#[/] [cyan]Could not find a `node` executable.[/]")
return False
except subprocess.CalledProcessError as error:
log(
"[yellow]#[/] [cyan]`node` executable did not exit cleanly.[/]\n"
f"{error.stderr.decode()}"
)
return False
if process.stdout.decode().strip() != f"v{node_version}":
log("[yellow]#[/] [cyan]The system `node` has a different version:[/] {}")
return False
return True
def create_nodeenv(nodeenv: Path, node_version: str) -> None:
log(
"[yellow]#[/] [cyan]Generating new [magenta]nodeenv[/] with "
f"NodeJS [green]{node_version}[/]!"
)
if _should_use_system_node(node_version=node_version):
log("[yellow]#[/] Will use system nodeJS, since version matches.")
node_version = "system"
_run_python_nodeenv(
f"--node={node_version}",
os.fsdecode(nodeenv),
)
def run_npm_build(nodeenv: Path, *, production: bool) -> None:
try:
run_in(nodeenv, ["npm", "run-script", "build"], production=production)
except subprocess.CalledProcessError as error:
raise DiagnosticError(
reference="js-build-failed",
message="The Javascript-based build pipeline failed.",
context="See above for failure output from the underlying tooling.",
hint_stmt=None,
) from error
def populate_npm_packages(nodeenv: Path, node_modules: Path) -> None:
try:
run_in(nodeenv, ["npm", "install", "--include=dev", "--no-save"])
except FileNotFoundError as error:
raise DiagnosticError(
reference="nodeenv-unhealthy-npm-not-found",
message="The `nodeenv` for this project is unhealthy.",
context=str(error),
hint_stmt=(
f"Deleting the {_NODEENV_DIR} directory and trying again may work."
),
) from error
except subprocess.CalledProcessError as error:
raise DiagnosticError(
reference="js-install-failed",
message="Javascript dependency installation failed.",
context="See above for failure output from the underlying tooling.",
hint_stmt=None,
) from error
if node_modules.is_dir():
node_modules.touch()
def generate_assets(project: Project, *, production: bool) -> None:
package_json = project.location / "package.json"
nodeenv = project.location / _NODEENV_DIR
node_modules = project.location / "node_modules"
assert package_json.exists()
created_new_nodeenv = False
if not nodeenv.exists():
log("[yellow]#[/] [magenta]nodeenv[cyan] does not exist.[/]")
create_nodeenv(nodeenv, project.node_version)
created_new_nodeenv = True
# Checking the node version is a sanity check, and ensures that the environment is
# "healthy".
try:
process = run_in(nodeenv, ["node", "--version"], stdout=subprocess.PIPE)
except FileNotFoundError as error:
raise DiagnosticError(
reference="nodeenv-unhealthy-file-not-found",
message="The `nodeenv` for this project is unhealthy.",
context=str(error),
hint_stmt=(
f"Deleting the {_NODEENV_DIR} directory and trying again may work."
),
) from error
except subprocess.CalledProcessError as error:
raise DiagnosticError(
reference="nodeenv-unhealthy-subprocess-failure",
message="The `nodeenv` for this project is unhealthy.",
context="See above for failure output from the underlying tooling.",
hint_stmt=(
f"Deleting the {_NODEENV_DIR} directory and trying again may work."
),
) from error
# Present the `node --version` output to the user.
assert process
got = process.stdout.decode().strip()
print(got)
# Sanity-check the node version.
expected = f"v{project.node_version}"
if got != expected:
raise DiagnosticError(
reference="nodeenv-version-mismatch",
message="The `nodeenv` for this project is unhealthy.",
context=(
"There is a mismatch between what is present in the environment "
f"({got}) and the expected version of NodeJS ({expected})."
),
hint_stmt=(
f"Deleting the {_NODEENV_DIR} directory and trying again may work."
),
)
need_to_populate = False
if created_new_nodeenv:
need_to_populate = True
elif not node_modules.exists():
need_to_populate = True
elif node_modules.stat().st_mtime < package_json.stat().st_mtime:
log("[yellow]#[/] [cyan]Detected changes in [magenta]package.json[cyan].[/]")
need_to_populate = True
if need_to_populate:
if node_modules.exists():
log("[yellow]#[/] [cyan]Cleaning up [magenta]node_modules[cyan].[/]")
try:
shutil.rmtree(node_modules)
except OSError as error:
raise DiagnosticError(
reference="unable-to-cleanup-node-modules",
message="Could not remove node_modules directory.",
context=str(error),
hint_stmt=f"Deleting {node_modules} and trying again may work.",
) from error
log("[yellow]#[/] [cyan]Installing NodeJS packages.[/]")
populate_npm_packages(nodeenv, node_modules)
run_npm_build(nodeenv=nodeenv, production=production)
log("[green]Done![/]")
``` |
{
"source": "Jorisvansteenbrugge/GapFiller",
"score": 3
} |
#### File: GapFiller/Hydraslayer/Utility.py
```python
import logging
logger = logging.getLogger("Hydraslayer")
def get_consensusbase(bases, mincov=3):
"""
:param mincov:
:type bases: list
"""
bases = "".join(bases)
a = bases.count('A')
t = bases.count('T')
c = bases.count('C')
g = bases.count('G')
n = bases.count("N") + bases.count('-')
counts = [(a, 'A'), (t, 'T'), (c, 'C'), (g, 'G')]
s_dic = sorted(counts, key=lambda x: x[0], reverse=True)
max = s_dic[0]
if max[0] < mincov:
return "N"
else:
return max[1]
def get_gap_pos_alignment(record):
sequence = str(record.seq)
N_pos = [x for x, nuc in enumerate(sequence) if nuc == "N"]
return N_pos
def extract_positions(seq, positions):
return "".join([seq[idx] for idx in positions])
def pretty_print_alignment(fasta_sequences):
alignment_len = len(fasta_sequences[0])
for x in range(alignment_len):
row = []
for alignment in fasta_sequences:
row.append(alignment[x])
print(" ".join(row))
def get_consensus(fasta_seqs, mincov):
"""Get the per-position consensus sequence, excluding gaps.
All read sequences (not the assembly sequence) are merged into a consensus sequence.
"""
consensus = []
alignment_len = len(fasta_seqs[0])
for x in range(alignment_len):
bases = [fasta[x] for fasta in fasta_seqs]
consensus.append(get_consensusbase(bases, mincov))
# logger.debug(f'Consensus {"".join(consensus)}')
return "".join(consensus)
``` |
{
"source": "Jorisvansteenbrugge/GROS_genomes",
"score": 3
} |
#### File: GROS_genomes/Assembly/swap_fasta_header.py
```python
import argparse
from Bio import SeqIO
def parse_arguments():
parser = argparse.ArgumentParser(description="Swap fasta header name with a prefix and number them. Output is printed to stdout")
parser.add_argument('prefix', metavar='PREFIX', help='the prefix in the fasta header contig/scaffold/chr/..')
parser.add_argument('fasta', metavar='FASTA', help='Fasta to swap')
return parser.parse_args()
args = parse_arguments()
count = 1
for record in SeqIO.parse(args.fasta, 'fasta'):
print(f">{args.prefix}{count}")
print(record.seq)
count += 1
``` |
{
"source": "jorisvanzundert/cx2tree",
"score": 3
} |
#### File: jorisvanzundert/cx2tree/cx2vartab.py
```python
from docopt import docopt
import csv
def to_variant_table( input_filepath, output_filepath, gap_as_variant=False, ignore_invariants=False ):
witnesses = []
with open( input_filepath, 'r' ) as csv_file:
csv_reader = csv.reader( csv_file )
for row in csv_reader:
witnesses.append( row )
ranks = list( zip(*witnesses) )
variant_table = [ [ 'variant', *ranks[0] ] ]
for rank in ranks[1:]:
variants = set( rank )
if( ( not ignore_invariants ) or ( len( variants ) > 1 ) ):
for variant in variants:
if( gap_as_variant ):
nexus_code = [ ( '-' if not variant.strip() else variant.strip() ) ]
for token in rank:
nexus_code.append( int( token==variant ) )
variant_table.append( nexus_code )
else:
nexus_code = [ variant.strip() ]
if( nexus_code[0] != '' ):
for token in rank:
nexus_code.append( int( token==variant ) ) if( token.strip() != '' ) else nexus_code.append( '-' )
variant_table.append( nexus_code )
if( not output_filepath ):
output_filepath = input_filepath + '.csv'
with open( output_filepath, 'w' ) as csv_file:
writer = csv.writer( csv_file )
writer.writerows( variant_table )
doc = """cx2vartabl.
Usage:
cx2vartabl.py [-gn] [-o <outputfile>] <inputfile>
cx2vartabl.py (-h | --help)
Options:
-h --help Show this help information.
-o <outputfile> Specify output file. If none the name of the input file
will be used with an added extension of .csv.
-g --gap-variants Encode gaps as variants (not -).
-n --no-invariants Do not encode non-variants.
"""
if __name__ == '__main__':
arguments = docopt( doc, version='vartable2nex 0.1' )
to_variant_table( arguments['<inputfile>'], arguments['-o'], arguments['--gap-variants'], arguments['--no-invariants'] )
``` |
{
"source": "jorisvanzundert/timelessness",
"score": 3
} |
#### File: jorisvanzundert/timelessness/gradient_grid.py
```python
import numpy as np
import matplotlib.pyplot as plt
import math
def gauss_grid(size_x, size_y=None):
if size_y == None:
size_y = size_x
sigma_x = 0.17 * size_x
sigma_y = 0.17 * size_y
assert isinstance(size_x, int)
assert isinstance(size_y, int)
x0 = size_x // 2
y0 = size_y // 2
x = np.arange(0, size_x, dtype=float)
y = np.arange(0, size_y, dtype=float)[:,np.newaxis]
x -= x0
y -= y0
exp_part = x**2/(2*sigma_x**2)+ y**2/(2*sigma_y**2)
dist = 1/(2*np.pi*sigma_x*sigma_y) * np.exp(-exp_part)
return dist * (256/dist.max())
def alpha_gradient_grid( size_x, size_y=None, color="#888888", alpha=0.1 ):
arr = []
for row in gauss_grid( size_x, size_y ).tolist():
for item in row:
arr.append( "{}{:02X}".format( color, math.floor( alpha * item ) ) )
return arr
```
#### File: jorisvanzundert/timelessness/src_to_txt.py
```python
from spellchecker import SpellChecker
import nltk
import utils
def find_untxted_PDFs():
for dir in [ utils.POPULAR_FULLTEXT, utils.BEST_SELLING_FULLTEXT ]:
pdfs = set( utils.get_file_names_by_extension( dir, "pdf", utils.DROP_EXTENSION ) )
txts = set( utils.get_file_names_by_extension( dir, "txt", utils.DROP_EXTENSION ) )
print( dir, len(pdfs) )
for pdf_without_txt in (pdfs-txts):
print( pdf_without_txt )
def list_misspellings( file_path ):
spell = SpellChecker()
a_text = open( file_path, "r" ).read()
text_tokens = nltk.word_tokenize( a_text )
vocabulary = sorted( set( text_tokens ) )
misspelled = list( spell.unknown( vocabulary ) )
concordance = nltk.text.ConcordanceIndex( text_tokens )
misspelled.remove("’")
misspelled.remove("‘")
misspelled.remove("”")
misspelled.remove("“")
misspelled.remove("``")
misspelled.remove("—")
print( len( misspelled ) )
for token in misspelled:
kwics = concordance.find_concordance( token, width=40 )
for kwic in kwics:
print( kwic.query, ":", kwic.line )
# MAIN
find_untxted_PDFs()
# Steps taken to turn initial data into parseable
#
# 1) From alternative English sources (because files were German/Spanish), pdf/ocr:
# * POP_1910_The Notebooks of Malte Laurids Brigge
# * POP_1913_Petersburg
# * POP_1914_Niebla
# 2) Removes POP_POP_1917_Cuentos de amor, de locura y de muerte as it is in Spanish
# and no alternative source found.
# 3) pdftotext
# * POP_1919_In the Shadow of Young Girls in Flower (In Search of Lost Time, #2)
# * POP_1914_The Dead.pdf
# 4) mv POP_1921_The\ Good\ Soldier\ Svejk.txt POP_1921_The\ Good\ Soldier\ Svejk.pdf
# 5) OCR'd Sveijk
# 6) Use list_misspellings to improve the OCR
# 7) rm *.pdf
# 8) remove space in front of filename: " BS_1917_The Light in the Clearing"
# 9) convert to UTF-8 (from CP1252):
# utils.correct_wrong_encodings( POPULAR_FULLTEXT )
# --
``` |
{
"source": "jorisvddonk/gpt2-movie-suggestions",
"score": 3
} |
#### File: jorisvddonk/gpt2-movie-suggestions/main.py
```python
from fastapi import FastAPI
from starlette.responses import HTMLResponse
import gpt_2_simple as gpt2
from pydantic import BaseModel
from datetime import datetime
import re
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='run1')
def generate_texts(text: str, samples: int = 1):
if samples > 5:
raise Exception("Samples can not be greater than 5!")
actual_text = "<REQUEST>\n%s\n\n<REPLY>\n" % text
with sess.graph.as_default():
datas = gpt2.generate(sess,
length=250,
temperature=0.7,
prefix=actual_text,
nsamples=samples,
batch_size=samples,
run_name='run1',
return_as_list=True
)
retval = {
"raw": [],
"text": []
}
for data in datas:
retval["raw"].append(data)
m = re.search("\\<REQUEST\\>(.*?)\\<REPLY\\>(.*?)\\<END\\>",
"%s<END>" % data, re.MULTILINE | re.DOTALL)
actual_text = m.group(2).strip()
retval["text"].append(actual_text)
return retval
class Input(BaseModel):
text: str
samples: int
app = FastAPI()
indexhtml = open('./index.html', 'r').read()
@app.get("/.*", include_in_schema=False)
def root():
return HTMLResponse(indexhtml)
@app.post("/generate")
def generate(data: Input):
data = generate_texts(data.text, data.samples)
return {"data": data['text'], "raw": data['raw']}
``` |
{
"source": "jorisvos/making-things-smart",
"score": 3
} |
#### File: smart-lamp/modes/sun.py
```python
from sense_hat import SenseHat
import time
import datetime
import requests
import json
import dateutil.parser
import pytz
sense = SenseHat()
white = (255, 255, 255)
orange_hues = [(246,120,88), (247,133,104), (248,147,121), (249,160,138), (250,174,155), (250,187,171), (251,201,188), (252,214,205), (253,228,222), (254,241,238)]
orange = (245, 106, 71)
zwolle = "https://api.sunrise-sunset.org/json?lat=52.5055809&lng=6.0905981&date=today&formatted=0"
sydney = "https://api.sunrise-sunset.org/json?lat=-33.8688197&lng=151.2092955&date=today&formatted=0"
san = "https://api.sunrise-sunset.org/json?lat=37.7749295&lng=-122.4194155&date=today&formatted=0"
try:
r = requests.get(zwolle)
except:
print("Error when getting request!")
sys.exit(255)
parsed = json.loads(r.content)
results = parsed['results']
#print(json.dumps(parsed, indent=4, sort_keys=True)) ## Comment this line
# Functions
# @staticmethod
def toDataTime(datestring):
parsed = dateutil.parser.parse(datestring) # ISO 8601 basic format
return parsed
# Format data to proper types
sunrise = toDataTime(results['sunrise'])
sunrise_unix = time.mktime(sunrise.timetuple())
sunset = toDataTime(results['sunset'])
sunset_unix = time.mktime(sunset.timetuple())
civil_twilight_begin = toDataTime(results['civil_twilight_begin'])
civil_twilight_begin_unix = time.mktime(civil_twilight_begin.timetuple())
civil_twilight_end = toDataTime(results['civil_twilight_end'])
civil_twilight_end_unix = time.mktime(civil_twilight_end.timetuple())
utc = datetime.datetime.utcnow()
utc_unix = time.mktime(utc.timetuple())
if sunrise_unix <= utc_unix <= sunset_unix:
print("Het is dag")
sense.clear(white)
else:
# Morning
if civil_twilight_begin_unix <= utc_unix <= sunrise_unix:
dif_sunrise = (sunrise_unix - civil_twilight_begin_unix) / 10
print dif_sunrise
interval_1 = civil_twilight_begin_unix + dif_sunrise
if civil_twilight_begin_unix <= utc_unix <= (civil_twilight_begin_unix + dif_sunrise):
sense.clear(orange01)
print("1")
for x in range(1, 10):
if (civil_twilight_begin_unix + (dif_sunrise * x)) <= utc_unix <= (civil_twilight_begin_unix + (dif_sunrise * (x+1))):
sense.clear(orange_hues[x])
print(x)
break
#sense.clear(255,0,255)
# print("Sleepy sleepy, need to go to bed soon!")
# Evening
if sunset_unix <= utc_unix <= civil_twilight_end_unix:
dif_sunset = (civil_twilight_end_unix - sunset_unix) / 10
if sunset_unix <= utc_unix <= (sunset_unix + dif_sunset):
sense.clear(orange10)
print("10")
for x in range(9, 0, -1):
print x
if (sunset_unix + (dif_sunset * x)) <= utc_unix <= (sunset_unix + (dif_sunset * (x+1))):
sense.clear(orange_hues[x])
print(x)
break
# Nighttime
if (utc_unix < civil_twilight_begin_unix) or (utc_unix > civil_twilight_end_unix):
print("Nighty night, sleep well!")
sense.clear(orange)
``` |
{
"source": "jorisvos/rasplock",
"score": 2
} |
#### File: rasplock/smart-lock/main.py
```python
import os, sys, signal, subprocess
from sense_hat import SenseHat
from time import sleep
from libs.clear import *
from modules.joystick import *
from modules.check import *
import variables.vars as v
sense = SenseHat()
sense.clear()
# Function -----------------
def exit(signal, frame):
clear()
print(c.bcolors.OKGREEN+"Bye!"+c.bcolors.ENDC)
sys.exit(0)
# Main program -------------
if __name__ == '__main__':
sense.stick.direction_middle = joystick_move_middle
signal.signal(signal.SIGINT, exit)
print("initialized")
while True:
if v.enabled:
check()
sleep(0.05)
# nearby_devices = bluetooth.discover_devices(duration=4, lookup_names=True, flush_cache=True, lookup_class=False)
```
#### File: smart-lock/modules/joystick.py
```python
import variables.vars as v
from libs.lock import *
from libs.clear import *
def joystick_move_middle(event):
if event.action == "pressed":
if v.enabled:
v.enabled = False
clear()
else:
v.enabled = True
lock(False)
print("enabled="+str(v.enabled))
``` |
{
"source": "joriwinderickx-fm/pyngsi",
"score": 3
} |
#### File: pyngsi/examples/example2_basic_agent_no_processing.py
```python
from pyngsi.agent import NgsiAgent
from pyngsi.sources.source import Row
def basic_processing(data: Row):
# A data row is the output of the Source.
# It is a simple dataclass composed of :
# - the name of the datasource provider : here stdin
# - the data record itself : here a line typed in by the user
# The function can takes additional arguments if needed by the processing logic
_ = data.provider # for this example, we don't consider the datasource provider
return data.record.replace("ping", "pong")
def main():
agent = NgsiAgent.create_agent(process=basic_processing)
# We could have used a lambda, or any function which a Row as a first argument, even an object method
# agent = NgsiAgent(process=lambda x: x.record.replace("ping", "pong"))
# agent = NgsiAgent(process=myObject.basic_processing)
agent.run()
agent.close()
if __name__ == '__main__':
main()
```
#### File: pyngsi/pyngsi/scheduler.py
```python
import json
import socket
import signal
import time
import schedule
import _thread
from flask import Flask, request, jsonify
from cheroot.wsgi import Server as WSGIServer
from loguru import logger
from datetime import datetime
from typing import Callable
from dataclasses import dataclass
from enum import Enum, auto
from pyngsi.sink import Sink
from pyngsi.agent import NgsiAgent, NgsiAgentPull
from pyngsi.__init__ import __version__
class UNIT(Enum):
seconds = "s"
minutes = "m"
hours = "h"
days = "d"
class SchedulerException(Exception):
pass
@dataclass
class SchedulerStatus:
version = __version__
starttime: datetime = None
lastcalltime: datetime = None
calls: int = 0
calls_success: int = 0
calls_error: int = 0
stats: NgsiAgent.Stats = None
def __init__(self):
self.starttime = datetime.now()
self.stats = NgsiAgent.Stats()
class Scheduler():
"""
Poll takes an agent and polls at periodic intervals.
Poll updates statitics and provides information (status and version)
"""
def __init__(self,
agent: NgsiAgentPull,
host: str = "0.0.0.0",
port: int = 8081,
wsgi_port: int = 8880,
debug: bool = False,
interval: int = 1,
unit: UNIT = UNIT.minutes):
self.agent = agent
self.host = host
self.port = port
self.wsgi_port = wsgi_port
self.debug = debug
self.interval = interval
self.unit = unit
self.status = SchedulerStatus()
self.app = Flask(__name__)
self.app.add_url_rule("/version", 'version',
self._version, methods=['GET'])
self.app.add_url_rule("/status", 'status',
self._status, methods=['GET'])
def _flaskthread(self):
if self.debug:
self.app.run(host=self.host, port=self.port, debug=self.debug)
else:
wsgi_server = WSGIServer(bind_addr=(
"0.0.0.0", self.wsgi_port), wsgi_app=self.app, numthreads=100)
try:
wsgi_server.start()
except KeyboardInterrupt:
wsgi_server.stop()
def _job(self):
logger.info(f"start new job at {datetime.now()}")
self.status.lastcalltime = datetime.now()
self.status.calls += 1
# run the NGSI Agent
try:
self.agent.run()
except Exception as e:
logger.error(f"Error while running job : {e}")
self.status.calls_error += 1
else:
self.status.calls_success += 1
logger.info(self.agent.stats)
self.status.stats += self.agent.stats
self.agent.reset()
def run(self):
logger.info(
f"HTTP server listens on http://{self.host}:{self.port}")
self.status.starttime = datetime.now()
_thread.start_new_thread(self._flaskthread, ())
logger.info("run job now")
self._job()
logger.info("schedule job")
if self.unit == UNIT.seconds:
schedule.every(self.interval).seconds.do(self._job)
tick = 1
elif self.unit == UNIT.minutes:
schedule.every(self.interval).minutes.do(self._job)
tick = 4
elif self.unit == UNIT.hours:
schedule.every(self.interval).hours.do(self._job)
tick = 32
elif self.unit == UNIT.days:
schedule.every(self.interval).days.do(self._job)
tick = 128
while True:
logger.trace("tick")
schedule.run_pending()
time.sleep(tick)
def _version(self):
logger.trace("ask for version")
return jsonify(name="pyngsi", version=__version__)
def _status(self):
logger.trace("ask for status")
remote_status = self.agent.sink.status()
if remote_status:
return jsonify(poll_status=self.status,
orion_status=remote_status)
else:
return jsonify(poll_status=self.status)
```
#### File: pyngsi/sources/source_json.py
```python
import sys
import json
import gzip
from typing import Tuple, List, Callable
from loguru import logger
from os.path import basename
from zipfile import ZipFile
from io import TextIOWrapper
from pyngsi.sources.source import Row, Source
class SourceJson(Source):
"""Read JSON formatted data from Standard Input"""
def __init__(self, input: str, provider: str = "user", jsonpath: str = None):
self.json_obj = input
self.provider = provider
self.path = jsonpath
def __iter__(self):
obj = self.json_obj
if self.path:
obj = self.jsonpath(self.path)
if isinstance(obj, list):
for j in obj:
yield Row(self.provider, j)
else:
yield Row(self.provider, obj)
def jsonpath(self, path: List):
obj = self.json_obj
for p in path:
if isinstance(p, int):
obj = obj[p]
else:
obj = obj.get(p)
return obj
def reset(self):
pass
```
#### File: pyngsi/tests/test_ngsi.py
```python
import pytest
from datetime import datetime, timedelta, timezone
from geojson import Point
from pyngsi.ngsi import DataModel, NgsiException, unescape, ONE_WEEK
def test_create():
m = DataModel("id", "type")
assert m["id"] == "id"
assert m["type"] == "type"
def test_add_field_str():
m = DataModel("id", "type")
m.add("projectName", "Pixel")
assert m.json(
) == r'{"id": "id", "type": "type", "projectName": {"value": "Pixel", "type": "Text"}}'
def test_add_field_str_escaped():
m = DataModel("id", "type")
m.add("forbiddenCharacters", r"""BEGIN<>"'=;()END""", urlencode=True)
assert m.json(
) == r'{"id": "id", "type": "type", "forbiddenCharacters": {"value": "BEGIN%3C%3E%22%27%3D%3B%28%29END", "type": "STRING_URL_ENCODED"}}'
assert unescape(m["forbiddenCharacters"]["value"]
) == r"""BEGIN<>"'=;()END"""
def test_add_field_int():
m = DataModel("id", "type")
m.add("temperature", 37)
assert m.json(
) == r'{"id": "id", "type": "type", "temperature": {"value": 37, "type": "Number"}}'
def test_add_field_float():
m = DataModel("id", "type")
m.add("temperature", 37.2)
assert m.json(
) == r'{"id": "id", "type": "type", "temperature": {"value": 37.2, "type": "Number"}}'
def test_add_field_bool():
m = DataModel("id", "type")
m.add("loading", True)
assert m.json(
) == r'{"id": "id", "type": "type", "loading": {"value": true, "type": "Boolean"}}'
def test_add_field_date_from_str_old_way():
m = DataModel("id", "type")
m.add("dateObserved", "2018-01-01T15:00:00", isdate=True)
assert m.json(
) == r'{"id": "id", "type": "type", "dateObserved": {"value": "2018-01-01T15:00:00", "type": "DateTime"}}'
def test_add_field_date_from_str():
m = DataModel("id", "type")
m.add_date("dateObserved", "2018-01-01T15:00:00")
assert m.json(
) == r'{"id": "id", "type": "type", "dateObserved": {"value": "2018-01-01T15:00:00", "type": "DateTime"}}'
def test_add_field_url_from_str_old_way():
m = DataModel("id", "type")
m.add("dataProvider", "https://www.fiware.org", isurl=True)
assert m.json(
) == r'{"id": "id", "type": "type", "dataProvider": {"value": "https://www.fiware.org", "type": "URL"}}'
def test_add_field_url_from_str():
m = DataModel("id", "type")
m.add_url("dataProvider", "https://www.fiware.org")
assert m.json(
) == r'{"id": "id", "type": "type", "dataProvider": {"value": "https://www.fiware.org", "type": "URL"}}'
def test_add_field_date_from_datetime():
m = DataModel("id", "type")
d = datetime(2019, 6, 1, 18, 30, 0)
m.add("dateObserved", d)
assert m.json(
) == r'{"id": "id", "type": "type", "dateObserved": {"value": "2019-06-01T18:30:00Z", "type": "DateTime"}}'
def test_add_location_from_tuple():
m = DataModel("id", "type")
m.add("location", (44.8333, -0.5667))
assert m.json(
) == r'{"id": "id", "type": "type", "location": {"value": {"type": "Point", "coordinates": [-0.5667, 44.8333]}, "type": "geo:json"}}'
def test_add_location_from_geojson():
m = DataModel("id", "type")
location = Point((-0.5667, 44.8333))
m.add("location", location)
assert m.json(
) == r'{"id": "id", "type": "type", "location": {"value": {"type": "Point", "coordinates": [-0.5667, 44.8333]}, "type": "geo:json"}}'
def test_add_location_invalid():
m = DataModel("id", "type")
with pytest.raises(NgsiException, match=r".*JSON compliant.*"):
m.add("location", ('A', -0.5667))
def test_cannot_map_ngsi_type():
m = DataModel("id", "type")
with pytest.raises(NgsiException, match=r".*Cannot map.*"):
m.add("unknown", None)
def test_add_field_sequence():
m = DataModel("id", "type")
d1 = {}
d1["major"] = "standard"
d1["minor"] = "surface"
d1["dateObserved"] = datetime(2019, 6, 1, 18, 30, 0)
seq = [{"major": "standard", "minor": "surface", "elapsed": 3600},
{"major": "standard", "minor": "tropopause", "elapsed": 1800},
d1]
m.add("collection", seq)
assert m.json() == r'{"id": "id", "type": "type", ' \
r'"collection": {"value": [{"major": "standard", "minor": "surface", "elapsed": 3600}, ' \
r'{"major": "standard", "minor": "tropopause", "elapsed": 1800}, ' \
r'{"major": "standard", "minor": "surface", "dateObserved": "2019-06-01 18:30:00"}], ' \
r'"type": "Array"}}'
# https://fiware-datamodels.readthedocs.io/en/latest/Environment/AirQualityObserved/doc/spec/index.html#representing-air-pollutants
def test_metadata():
m = DataModel("AirQualityObserved", "AirQualityObserved")
unitsGP = {"unitCode": {"value": "GP"}}
unitsGQ = {"unitCode": {"value": "GQ"}}
m.add("CO", 500, metadata=unitsGP)
m.add("NO", 45, metadata=unitsGQ)
assert m.json() == r'{"id": "AirQualityObserved", "type": "AirQualityObserved", ' \
r'"CO": {"value": 500, "type": "Number", "metadata": {"unitCode": {"value": "GP"}}}, ' \
r'"NO": {"value": 45, "type": "Number", "metadata": {"unitCode": {"value": "GQ"}}}}'
def test_add_relationship():
# https://github.com/Fiware/tutorials.Entity-Relationships
m = DataModel("id", "type")
m.add_relationship("refStore", "Shelf", "001")
assert m.json() == r'{"id": "id", "type": "type", ' \
r'"refStore": {"value": "urn:ngsi-ld:Shelf:001", "type": "Relationship"}}'
def test_add_relationship_bad_ref():
m = DataModel("id", "type")
with pytest.raises(NgsiException, match=r".*Bad relationship.*"):
m.add_relationship("store", "Shelf", "001")
def test_add_dict():
m = DataModel("id", "type")
m.add("property", {"a": 1, "b": 2})
assert m.json() == r'{"id": "id", "type": "type", ' \
r'"property": {"value": {"a": 1, "b": 2}, "type": "Property"}}'
def test_add_address():
m = DataModel("id", "type")
addr = {"addressLocality": "London",
"postalCode": "EC4N 8AF",
"streetAddress": "25 Walbrook"}
m.add_address(addr)
assert m.json() == r'{"id": "id", "type": "type", ' \
r'"address": {"value": {"addressLocality": "London", ' \
r'"postalCode": "EC4N 8AF", "streetAddress": "25 Walbrook"}, ' \
r'"type": "PostalAddress"}}'
def test_add_transient_expire_date():
christmas_under_lockdown = datetime(
2020, 12, 25, 12, 00, tzinfo=timezone.utc)
m = DataModel("id", "type")
m.add_transient(expire=christmas_under_lockdown)
assert m.json() == r'{"id": "id", "type": "type", ' \
r'"dateExpires": {"value": "2020-12-25T12:00:00Z", "type": "DateTime"}}'
def test_add_transient_timeout_1d():
now = datetime.utcnow()
m = DataModel("id", "type")
m.add_transient(timeout=86400)
tomorrow = now + timedelta(days=1)
assert m['dateExpires']['value'][:10] == tomorrow.strftime("%Y-%m-%d")
def test_set_implicit_transient():
now = datetime.utcnow()
DataModel.set_transient(timeout=ONE_WEEK)
m = DataModel("id", "type")
a_week_later = now + timedelta(weeks=1)
assert m['dateExpires']['value'][:10] == a_week_later.strftime("%Y-%m-%d")
def test_unset_implicit_transient():
DataModel.set_transient()
DataModel.unset_transient()
assert DataModel.transient_timeout is None
m = DataModel("id", "type")
assert m.json() == r'{"id": "id", "type": "type"}'
``` |
{
"source": "jorjao81/lambda-wortschatz",
"score": 3
} |
#### File: jorjao81/lambda-wortschatz/lambda_function.py
```python
import boto3
import json
def lambda_handler(event, context):
print "PRINT hello world"
return {
"statusCode": 200,
"headers": {},
"body": "Hello World"}
if __name__ == "__main__":
print "Invoked outside of lambda"
print lambda_handler(None, None)
``` |
{
"source": "jorjao81/zh-learn",
"score": 3
} |
#### File: jorjao81/zh-learn/segment.py
```python
from pysubparser import parser
from pysubparser.util import time_to_millis
from pydub import AudioSegment
# find segments of conversation
import sys
FIVE_SECONDS = 5000
def get_segments(subtitles):
segments = []
prev_end = -1000000
curr_segment = None
for subtitle in subtitles:
this_start = time_to_millis(subtitle.start)
if this_start - prev_end > FIVE_SECONDS:
if curr_segment != None:
segments.append(curr_segment)
curr_segment = []
curr_segment.append(subtitle)
prev_end = time_to_millis(subtitle.end)
# append last segment
segments.append(curr_segment)
return segments
def print_segment(seg):
print(seg[0].start)
for sub in seg:
print(sub.text)
print(seg[-1].end)
print("------------------------------------")
print("Segment duration: " + str((time_to_millis(seg[-1].end) - time_to_millis(seg[0].start))/1000))
print("====================================")
audio_filename = sys.argv[1]
subtitle_filename = sys.argv[2]
subtitles = parser.parse(subtitle_filename)
segments = get_segments(subtitles)
song = AudioSegment.from_mp3(audio_filename)
folder = "out/"
episode = "e01"
n = 1
for seg in segments:
start = time_to_millis(seg[0].start) - 1000
end = time_to_millis(seg[-1].end) + 1500
cut = song[start:end]
cut.export(folder + episode + "_seg" + str(n) + ".mp3", format="mp3")
print("===== Segment " + str(n) + " ========")
print_segment(seg)
n += 1
``` |
{
"source": "jorje1908/MHMM",
"score": 3
} |
#### File: MHMM/MHMM/_misc.py
```python
import numpy as np
from scipy.special import logsumexp
def make_supervised( states_matrix, value = 0, value2 = None, value3 = 2):
"""
takes a matrix with values
(in general 0 or 1) and produces
a matrix with 1 and -infinities
replacing the value "value" with -inf
value2 : what value to replace with value 3
"""
dim0 = states_matrix.shape[0]
new_mat = np.zeros_like( states_matrix )
for i in np.arange( dim0 ):
rowi = states_matrix[i,:]
rowi[np.where(rowi == value)] = -np.Inf
if value2 is not None:
rowi[np.where(rowi == value2)] = value3
new_mat[i,:] = rowi
return new_mat
def make_supervised2( states_matrix, drop = 0.7):
"""
takes a matrix with values
(in general 0 or 1) and produces
a matrix with 1 and -infinities
replacing the value "value" with -inf
value2 : what value to replace with value 3
"""
perc = drop
N, T = states_matrix.shape
states_flat = states_matrix.reshape(N*T)
#pick a random percentage of indexes to hide
indx = np.random.choice( np.arange(N*T), size = int(perc*N*T),
replace = False)
states_flat[indx] = -np.inf
new_mat = states_flat.reshape([N,T])
return new_mat
def compute_forw(hmm, data):
"""
computes the forward probabilities for all data
gets an hmm and the data
"""
N = data.shape[0]
T = data.shape[1]
zers = np.zeros(shape = [N,T])
ones = np.zeros( shape = [N,T])
for i in range(N):
forw = np.exp( hmm.log_forward(data[i,:,:]) )
zers[i] = forw[0,:]
ones[i] = forw[1,:]
return zers.reshape(N*T), ones.reshape(N*T)
######### check functions #########
def checkShape( arg1, arg2, name):
if arg1.shape != arg2.shape:
print( "Warning shapes does not match in " + name)
return
def checkSum_one( matrix, axis, name):
"""
Checks if the matrix entries along the given axis
sum to 1
"""
result = matrix.sum( axis = axis ).round(5)
value = np.all( result == 1 )
if not value:
print(" Warning: Elements do not sum to 1 in {} ".format(name))
return
def checkSum_zero( matrix, axis, name):
"""
Checks if the matrix entries along the given axis
sum to 0
"""
result = logsumexp(matrix, axis = axis ).round(5)
value = np.all( result == 0 )
if not value:
print(" Warning: Elements do not sum to 0 in {} ".format(name))
return
def make_dataset(X, points):
"""
helper function for the Kmeans Initialization
returns a dataset with points number of observations from X
"""
T = X[0].shape[0]
N = len( X )
d = X[0].shape[1]
#see how many points we need to concatenate together
indx_num = int( np.ceil( points/ T ) )
#choose random indexes
indx = np.random.choice( np.arange(N), size = indx_num, replace = False)
indx = indx.astype(int)
#return the Kmeans dataset
X_kmeans = X[indx]
X_kmeans = np.reshape( X_kmeans, [-1, d])
return X_kmeans
``` |
{
"source": "jorj-kare/The-Colourant-Mapping-Project",
"score": 3
} |
#### File: The-Colourant-Mapping-Project/colors/utils.py
```python
from colors.models import Colourants
from folium.plugins import MarkerCluster
import folium
def format_number(n):
return n.split('/')
def chr_format(period_from, period_to, chr_from, chr_to):
if period_from == 'bce':
chr_f = 0 - chr_from
else:
chr_f = chr_from
if period_to == 'bce':
chr_t = 0 - chr_to
else:
chr_t = chr_to
return {'chr_from': chr_f, 'chr_to': chr_t}
def map_generator(Lat, Lon, Loc, Col,Pig, selection):
# for chronology
if (type(selection) == dict):
period_from = 'BCE' if selection['chr_from'] < 0 else 'CE'
period_to = 'BCE' if selection['chr_to'] < 0 else 'CE'
counter = Col.count(), "colourants found from {} {} to {} {} period".format(
abs(selection['chr_from']), period_from, abs(selection['chr_to']), period_to)
else:
counter = Col.count(), selection, "found"
counter = str(counter).replace("(", "").replace(")","").replace("'", "").replace(",", "")
marker_c = MarkerCluster()
kw = {"prefix": "fa", "color": 'darkblue',"icon": "circle-o" }
for lt, ln, lc, col,pig in zip(Lat, Lon, Loc, Col,Pig):
html = f"""
<h5 style="margin:1.5px; color:#232323;">Colour:</h5>{col}
<h5 style="margin:1.5px; color:#232323;">Pigment:</h5>{pig}
</*>
"""
folium.Marker(location=[lt, ln], popup=html,icon=folium.Icon(**kw)).add_to(marker_c)
m = folium.Map(zoom_start=2, min_zoom=2, location=[lt, ln]).add_child(marker_c)
m = m._repr_html_()
return {'map': m, 'counter': counter}
def not_selection(select_by):
if not select_by:
select_by = 'colour'
selection = ''
colourant_list = Colourants.objects.all().filter(
check=True).order_by('pigment')
Lat = Colourants.objects.filter(
check=True).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True).values_list('colour', flat=True)
Pig = Colourants.objects.filter(
check=True, ).values_list('pigment', flat=True)
m = map_generator(Lat, Lon, Loc, Col,Pig, selection)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, ('colourants'))['counter']
msg = None
return {'map': m, 'counter': counter, 'colourant_list': colourant_list, 'msg': msg}
def select(select_by, selection):
msg = None
if select_by == 'colour' and selection :
colour =selection[0]
pigment = selection[1]
if colour and not pigment:
Lat = Colourants.objects.filter(
check=True, colour=colour).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True, colour=colour).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True, colour=colour).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True, colour=colour).values_list('colour', flat=True)
colourant_list = Colourants.objects.all().filter(check=True, colour=colour, ).order_by('pigment')
Pig = Colourants.objects.filter(
check=True, colour=colour).values_list('pigment', flat=True)
m = map_generator(Lat, Lon, Loc, Col,Pig, colour)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, colour)['counter']
if pigment and not colour:
Lat = Colourants.objects.filter(
check=True, pigment=pigment).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True, pigment=pigment).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True, pigment=pigment).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True, pigment=pigment).values_list('colour', flat=True)
Pig = Colourants.objects.filter(
check=True, pigment=pigment ).values_list('pigment', flat=True)
colourant_list = Colourants.objects.all().filter(check=True, pigment=pigment, ).order_by('pigment')
m = map_generator(Lat, Lon, Loc, Col,Pig, pigment)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, pigment)['counter']
if colour and pigment :
Lat = Colourants.objects.filter(
check=True, colour=colour, pigment=pigment).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True, colour=colour, pigment=pigment).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True, colour=colour, pigment=pigment).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True, colour=colour, pigment=pigment).values_list('colour', flat=True)
Pig = Colourants.objects.filter(
check=True,colour=colour, pigment=pigment).values_list('pigment', flat=True)
if not Lat:
msg = 'colour-pigment'
m = not_selection(select_by)['map']
counter = not_selection(select_by)['counter']
colourant_list = not_selection(select_by)['colourant_list']
else:
colourant_list = Colourants.objects.all().filter(check=True,pigment=pigment,colour=colour ).order_by('pigment')
m = map_generator(Lat, Lon, Loc, Col,Pig, pigment)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, (colour, pigment))['counter']
if select_by == 'context' and selection:
colourant_list = Colourants.objects.all().filter(check=True, category_of_find=selection).order_by('pigment')
Lat = Colourants.objects.filter(
check=True, category_of_find=selection).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True, category_of_find=selection).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True, category_of_find=selection).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True, category_of_find=selection).values_list('colour', flat=True)
Pig = Colourants.objects.filter(
check=True,category_of_find=selection).values_list('pigment', flat=True)
m = map_generator(Lat, Lon, Loc, Col,Pig, selection)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, selection)['counter']
if select_by == 'chronology_from' and selection:
colourant_list = Colourants.objects.all().filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).order_by('pigment')
Lat = Colourants.objects.filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).values_list('latitude', flat=True)
Lon = Colourants.objects.filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).values_list('longitude', flat=True)
Loc = Colourants.objects.filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).values_list('location', flat=True)
Col = Colourants.objects.filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).values_list('colour', flat=True)
Pig = Colourants.objects.filter(
check=True, chronology_from__range=(selection['chr_from'], selection['chr_to']), chronology_to__range=(selection['chr_from'], selection['chr_to'])).values_list('colour', flat=True).values_list('pigment', flat=True)
if not Lat:
msg = 'chronology'
m = not_selection(select_by)['map']
counter = not_selection(select_by)['counter']
colourant_list = not_selection(select_by)['colourant_list']
else:
m = map_generator(Lat, Lon, Loc, Col,Pig, selection)['map']
counter = map_generator(Lat, Lon, Loc, Col,Pig, selection)['counter']
if not selection:
m = not_selection(select_by)['map']
counter = not_selection(select_by)['counter']
colourant_list = not_selection(select_by)['colourant_list']
msg = not_selection(select_by)['msg']
return {'map': m, 'counter': counter, 'colourant_list': colourant_list, 'msg': msg}
``` |
{
"source": "JorjMcKie/Nuitka",
"score": 2
} |
#### File: tools/specialize/__main__.py
```python
from __future__ import print_function
import os
from abc import abstractmethod
import jinja2
import nuitka.codegen.OperationCodes
from nuitka.tools.quality.autoformat.Autoformat import autoformat
from nuitka.__past__ import getMetaClassBase
class TypeDescBase(getMetaClassBase("Type")):
# To be overloaded
type_name = None
type_desc = None
type_decl = None
python_requirement = None
def __init__(self):
assert self.type_name
assert self.type_desc
assert self.type_decl
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__, self.type_name, self.type_desc)
@classmethod
def getHelperCodeName(cls):
return cls.type_name.upper()
@classmethod
def getVariableDecl(cls, variable_name):
if cls.type_decl.endswith("*"):
return cls.type_decl + variable_name
else:
return cls.type_decl + " " + variable_name
@classmethod
def getCheckValueCode(cls, operand):
return "CHECK_OBJECT(%s);" % operand
@classmethod
def getTypeValueExpression(cls, operand):
return "Py_TYPE(%s)" % operand
@abstractmethod
def getNewStyleNumberTypeCheckExpression(self, operand):
pass
@staticmethod
def needsIndexConversion():
return True
def canTypeCoerceObjects(self, left):
if left is self and left is not object_desc:
return "0"
# TODO: Provide hook for float to say it can do int.
return (
"1"
if self.getSlotValueCheckExpression("type2", "nb_coerce") != "false"
else "0"
)
@classmethod
def getIntCheckExpression(cls, operand):
if cls.type_name == "int":
return "1"
elif cls.type_name == "object":
return "PyInt_CheckExact(%s)" % operand
else:
return "0"
def getIndexCheckExpression(self, operand):
if self.hasSlot("nb_index"):
return "1"
elif self.type_name == "object":
return "PyIndex_Check(%s)" % operand
else:
return "0"
def getTypeIdenticalCheckExpression(self, other, operand1, operand2):
if self is object_desc or other is object_desc:
return "%s == %s" % (operand1, operand2)
elif self is other:
return "1"
else:
return "0"
@staticmethod
def getRealSubTypeCheckCode(right, operand2, operand1):
if right is object_desc:
return "PyType_IsSubtype(%s, %s)" % (operand2, operand1)
else:
return 0
def getSlotComparisonEqualExpression(self, right, operand1, operand2):
if right is object_desc or self is object_desc:
return "%s == %s" % (operand1, operand2)
else:
return "0"
@abstractmethod
def hasSlot(self, slot):
pass
def _getSlotValueExpression(self, operand, slot):
if slot.startswith("nb_"):
return "(%s) ? %s : NULL" % (
operand
+ "->tp_as_number != NULL && "
+ self.getNewStyleNumberTypeCheckExpression(operand),
operand + "->tp_as_number->" + slot,
)
elif slot.startswith("sq_"):
return "%s ? %s : NULL" % (
operand + "->tp_as_sequence" + " != NULL",
operand + "->tp_as_sequence->" + slot,
)
else:
assert False, slot
def getSlotValueExpression(self, operand, slot):
if not self.hasSlot(slot):
return "NULL"
return self._getSlotValueExpression(operand, slot)
def getSlotValueCheckExpression(self, operand, slot):
# Virtual method, pylint: disable=unused-argument
return "true" if self.hasSlot(slot) else "false"
def getRaiseUnsupportedTypeError(self, operation, other, operand1, operand2):
args = []
if self is object_desc:
args.append("%s->tp_name" % operand1)
if other is object_desc:
args.append("%s->tp_name" % operand2)
if args:
args = ", " + ", ".join(args)
else:
args = ""
return """\
PyErr_Format(PyExc_TypeError, "unsupported operand type(s) for %s: '%s' and '%s'"%s);
return NULL;""" % (
operation,
"%s" if self is object_desc else self.type_name,
"%s" if other is object_desc else other.type_name,
args,
)
def getSameTypeSpecializationCode(
self, other, nb_slot, sq_slot, operand1, operand2
):
cand = self if self is not object_desc else other
if cand is object_desc:
assert cand is not int_desc
return ""
helper_name = cand.getHelperCodeName()
# Special case for sequence concats/repeats.
if sq_slot is not None and not cand.hasSlot(nb_slot) and cand.hasSlot(sq_slot):
slot = sq_slot
else:
slot = nb_slot
if slot == "sq_repeat":
if cand in (list_desc, tuple_desc, unicode_desc, str_desc, bytes_desc):
return ""
return "return SLOT_%s_%s_%s(%s, %s);" % (
slot,
helper_name,
helper_name,
operand1,
operand2,
)
def getTypeSpecializationCode(self, other, nb_slot, sq_slot, operand1, operand2):
if self is object_desc or other is object_desc:
return ""
if self is other:
return self.getSameTypeSpecializationCode(
other, nb_slot, sq_slot, operand1, operand2
)
return ""
@abstractmethod
def getSqConcatSlotSpecializationCode(self, other, slot, operand1, operand2):
pass
class ConcreteTypeBase(TypeDescBase):
type_decl = "PyObject *"
def _getSlotValueExpression(self, operand, slot):
if slot.startswith("nb_"):
return self.getTypeValueExpression(operand)[1:] + ".tp_as_number->" + slot
elif slot.startswith("sq_"):
return self.getTypeValueExpression(operand)[1:] + ".tp_as_sequence->" + slot
else:
assert False, slot
def getCheckValueCode(self, operand):
return """\
CHECK_OBJECT(%(operand)s);
assert(%(type_name)s_CheckExact(%(operand)s));
#if PYTHON_VERSION < 300
assert(%(is_newstyle)sNEW_STYLE_NUMBER(%(operand)s));
#endif""" % {
"operand": operand,
"type_name": self.getTypeValueExpression(operand)[1:].split("_")[0],
"is_newstyle": ""
if self.getNewStyleNumberTypeCheckExpression(operand) == "1"
else "!",
}
@abstractmethod
def getTypeValueExpression(self, operand):
pass
def getSqConcatSlotSpecializationCode(self, other, slot, operand1, operand2):
if not self.hasSlot(slot):
return ""
# TODO: Use second type eventually when we specialize those too.
return "return SLOT_%s_%s_%s(%s, %s);" % (
slot,
self.getHelperCodeName(),
other.getHelperCodeName(),
operand1,
operand2,
)
class IntDesc(ConcreteTypeBase):
type_name = "int"
type_desc = "Python2 'int'"
python_requirement = "PYTHON_VERSION < 300"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyInt_Type"
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "1"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return True
elif slot.startswith("sq_"):
return False
else:
assert False
@staticmethod
def needsIndexConversion():
return False
@staticmethod
def getAsLongValueExpression(operand):
return "PyInt_AS_LONG(%s)" % operand
int_desc = IntDesc()
class StrDesc(ConcreteTypeBase):
type_name = "str"
type_desc = "Python2 'str'"
python_requirement = "PYTHON_VERSION < 300"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyString_Type"
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "1"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return "slot" == "nb_remainder"
elif slot.startswith("sq_"):
return "ass" not in slot
else:
assert False, slot
str_desc = StrDesc()
class UnicodeDesc(ConcreteTypeBase):
type_name = "UNICODE"
type_desc = "Python2 'unicode', Python3 'str'"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyUnicode_Type"
@classmethod
def getCheckValueCode(cls, operand):
return """\
CHECK_OBJECT(%(operand)s);
assert(PyUnicode_CheckExact(%(operand)s));
assert(NEW_STYLE_NUMBER(%(operand)s));""" % {
"operand": operand
}
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "1"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return "slot" == "nb_remainder"
elif slot.startswith("sq_"):
return "ass" not in slot
else:
assert False, slot
unicode_desc = UnicodeDesc()
class FloatDesc(ConcreteTypeBase):
type_name = "float"
type_desc = "Python 'float'"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyFloat_Type"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return True
elif slot.startswith("sq_"):
return False
else:
assert False, slot
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "1"
float_desc = FloatDesc()
class TupleDesc(ConcreteTypeBase):
type_name = "tuple"
type_desc = "Python 'tuple'"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyTuple_Type"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return False
elif slot.startswith("sq_"):
return "ass" not in slot
else:
assert False, slot
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "0"
tuple_desc = TupleDesc()
class ListDesc(ConcreteTypeBase):
type_name = "list"
type_desc = "Python 'list'"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyList_Type"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return False
elif slot.startswith("sq_"):
return True
else:
assert False, slot
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "0"
list_desc = ListDesc()
class BytesDesc(ConcreteTypeBase):
type_name = "bytes"
type_desc = "Python3 'bytes'"
python_requirement = "PYTHON_VERSION >= 300"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyBytes_Type"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return "slot" == "nb_remainder"
elif slot.startswith("sq_"):
return "ass" not in slot and slot != "sq_slice"
else:
assert False, slot
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "0"
bytes_desc = BytesDesc()
class LongDesc(ConcreteTypeBase):
type_name = "long"
type_desc = "Python2 'long', Python3 'int'"
@classmethod
def getTypeValueExpression(cls, operand):
return "&PyLong_Type"
def hasSlot(self, slot):
if slot.startswith("nb_"):
return True
elif slot.startswith("sq_"):
return False
else:
assert False
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "1"
@staticmethod
def needsIndexConversion():
return False
long_desc = LongDesc()
class ObjectDesc(TypeDescBase):
type_name = "object"
type_desc = "any Python object"
type_decl = "PyObject *"
def hasSlot(self, slot):
# Don't want to get asked, we cannot know.
assert False
def getIndexCheckExpression(self, operand):
return "PyIndex_Check(%s)" % operand
def getNewStyleNumberTypeCheckExpression(self, operand):
return "NEW_STYLE_NUMBER_TYPE(%s)" % operand
def getSlotValueExpression(self, operand, slot):
# Always check.
return self._getSlotValueExpression(operand, slot)
def getSlotValueCheckExpression(self, operand, slot):
return "(%s) != NULL" % self._getSlotValueExpression(operand, slot)
def getSqConcatSlotSpecializationCode(self, other, slot, operand1, operand2):
return ""
object_desc = ObjectDesc()
class CLongDesc(TypeDescBase):
type_name = "clong"
type_desc = "C platform long value"
type_decl = "long"
@classmethod
def getCheckValueCode(cls, operand):
return ""
@classmethod
def getTypeValueExpression(cls, operand):
return "NULL"
@classmethod
def getNewStyleNumberTypeCheckExpression(cls, operand):
return "0"
def hasSlot(self, slot):
return False
def getSqConcatSlotSpecializationCode(self, other, slot, operand1, operand2):
return ""
clong_desc = CLongDesc()
env = jinja2.Environment(
loader=jinja2.PackageLoader("nuitka.tools.specialize", "templates"),
trim_blocks=True,
lstrip_blocks=True,
)
env.undefined = jinja2.StrictUndefined
types = (
int_desc,
str_desc,
unicode_desc,
float_desc,
tuple_desc,
list_desc,
bytes_desc,
long_desc,
clong_desc,
object_desc,
)
def findTypeFromCodeName(code_name):
for candidate in types:
if candidate.getHelperCodeName() == code_name:
return candidate
add_codes = set()
def makeNbSlotCode(operand, op_code, left, emit):
key = operand, op_code, left
if key in add_codes:
return
if left == int_desc:
template = env.get_template("HelperOperationBinaryInt.c.j2")
elif left == long_desc:
template = env.get_template("HelperOperationBinaryLong.c.j2")
elif left == float_desc:
template = env.get_template("HelperOperationBinaryFloat.c.j2")
else:
return
code = template.render(
operand=operand,
left=left,
right=left,
nb_slot=_getNbSlotFromOperand(operand, op_code),
)
emit(code)
add_codes.add(key)
mul_repeats = set()
def makeMulRepeatCode(left, right, emit):
key = right, left
if key in mul_repeats:
return
template = env.get_template("HelperOperationMulRepeatSlot.c.j2")
code = template.render(left=left, right=right)
emit(code)
mul_repeats.add(key)
def _getNbSlotFromOperand(operand, op_code):
if operand == "+":
return "nb_add"
elif operand == "*":
return "nb_multiply"
elif operand == "-":
return "nb_subtract"
elif operand == "//":
return "nb_floor_divide"
elif operand == "/":
if op_code == "TRUEDIV":
return "nb_true_divide"
else:
return "nb_divide"
else:
assert False, operand
def makeHelperOperations(template, helpers_set, operand, op_code, emit_h, emit_c, emit):
emit(
'/* C helpers for type specialized "%s" (%s) operations */' % (operand, op_code)
)
emit()
for helper_name in helpers_set:
left = findTypeFromCodeName(helper_name.split("_")[3])
right = findTypeFromCodeName(helper_name.split("_")[4])
if left.python_requirement:
emit("#if %s" % left.python_requirement)
elif right.python_requirement:
emit("#if %s" % right.python_requirement)
code = left.getSameTypeSpecializationCode(
right, _getNbSlotFromOperand(operand, op_code), None, "operand1", "operand2"
)
if code:
makeNbSlotCode(
operand, op_code, left if left is not object_desc else right, emit_c
)
if operand == "*":
repeat = left.getSqConcatSlotSpecializationCode(
right, "sq_repeat", "operand2", "operand1"
)
if repeat:
makeMulRepeatCode(left, right, emit_c)
repeat = right.getSqConcatSlotSpecializationCode(
left, "sq_repeat", "operand2", "operand1"
)
if repeat:
makeMulRepeatCode(right, left, emit_c)
emit(
'/* Code referring to "%s" corresponds to %s and "%s" to %s. */'
% (
left.getHelperCodeName(),
left.type_desc,
right.getHelperCodeName(),
right.type_desc,
)
)
if operand == "+":
sq_slot = "sq_concat"
elif operand == "*":
sq_slot = "sq_repeat"
else:
sq_slot = None
code = template.render(
left=left,
right=right,
op_code=op_code,
operand=operand,
nb_slot=_getNbSlotFromOperand(operand, op_code),
sq_slot1=sq_slot,
)
emit_c(code)
emit_h("extern " + code.splitlines()[0].replace(" {", ";"))
if left.python_requirement or right.python_requirement:
emit("#endif")
emit()
def makeHelpersBinaryOperation(operand, op_code):
specialized_add_helpers_set = getattr(
nuitka.codegen.OperationCodes, "specialized_%s_helpers_set" % op_code.lower()
)
template = env.get_template("HelperOperationBinary.c.j2")
filename_c = "nuitka/build/static_src/HelpersOperationBinary%s.c" % op_code.title()
filename_h = (
"nuitka/build/include/nuitka/helper/operations_binary_%s.h" % op_code.lower()
)
with open(filename_c, "w") as output_c:
with open(filename_h, "w") as output_h:
def emit_h(*args):
writeline(output_h, *args)
def emit_c(*args):
writeline(output_c, *args)
def emit(*args):
emit_h(*args)
emit_c(*args)
def emitGenerationWarning(emit):
emit(
"/* WARNING, this code is GENERATED. Modify the template %s instead! */"
% template.name
)
emitGenerationWarning(emit_h)
emitGenerationWarning(emit_c)
filename_utils = filename_c[:-2] + "Utils.c"
if os.path.exists(filename_utils):
emit_c('#include "%s"' % os.path.basename(filename_utils))
makeHelperOperations(
template,
specialized_add_helpers_set,
operand,
op_code,
emit_h,
emit_c,
emit,
)
autoformat(filename_c, None, True)
autoformat(filename_h, None, True)
def writeline(output, *args):
if not args:
output.write("\n")
elif len(args) == 1:
output.write(args[0] + "\n")
else:
assert False, args
def main():
makeHelpersBinaryOperation("+", "ADD")
makeHelpersBinaryOperation("-", "SUB")
makeHelpersBinaryOperation("*", "MUL")
makeHelpersBinaryOperation("//", "FLOORDIV")
makeHelpersBinaryOperation("/", "TRUEDIV")
makeHelpersBinaryOperation("/", "OLDDIV")
if __name__ == "__main__":
main()
``` |
{
"source": "JorjMcKie/wxsimpleGUI",
"score": 2
} |
#### File: JorjMcKie/wxsimpleGUI/wxsimpleGUI.py
```python
import wx.lib.layoutf as layoutf
import traceback, wx, os
#TODO Check to see if this is still needed. Don't think that it is now that eash func also calls
app = wx.App()
def set_icon(dlg, icon):
if not icon:
return
if type(icon) is str:
ico = wx.Icon(icon)
dlg.SetIcon(ico)
return
if type(icon) == type(wx.Icon()):
dlg.SetIcon(ico)
return
if hasattr(icon, "GetIcon"):
dlg.SetIcon(icon.GetIcon())
return
return
#-----------------------------------------------------------------------------#
# SelectOne #
#-----------------------------------------------------------------------------#
def SelectOne(title, msg, lst, size = (-1, -1), icon = None):
'''
Show a list of strings.
Arguments: title, message and a list of strings to be shown for selection.
Return will be the selected string.
'''
app = wx.App()
dlg = wx.SingleChoiceDialog(None, msg, title, lst, wx.CHOICEDLG_STYLE)
dlg.Size = size
if icon:
dlg.SetIcon(icon.GetIcon())
if dlg.ShowModal() == wx.ID_OK:
sel = dlg.GetStringSelection()
else:
sel = None
dlg.Destroy()
del app
return sel
#-----------------------------------------------------------------------------#
# SelectMult #
#-----------------------------------------------------------------------------#
def SelectMult(title, msg, lst, preselect=None, size = (-1, -1), icon = None):
'''
Show a list of strings with a check box each.
Args: title, message, list and an optional list of integers containing to
indicate which items should appear as preselected.
Return is a list of integers of the selected item index.
'''
app = wx.App()
dlg = wx.MultiChoiceDialog(None, msg, title, lst)
if icon:
dlg.SetIcon(icon.GetIcon())
if type(preselect) == type([]):
dlg.SetSelections(preselect)
dlg.Size = size
if (dlg.ShowModal() == wx.ID_OK):
selections = dlg.GetSelections()
else:
selections = None
dlg.Destroy()
del app
return selections
#-----------------------------------------------------------------------------#
# DirDlg #
#-----------------------------------------------------------------------------#
def DirDlg(title="Choose a directory:",
startdir = os.getcwd(), size =(-1, -1), icon = None):
app = wx.App()
dlg = wx.DirDialog(None, title, pos=(-1,-1),
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST | \
wx.DD_CHANGE_DIR)
if icon:
dlg.SetIcon(icon.GetIcon())
dlg.SetPath(startdir)
dlg.Size = size
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
else:
path = None
dlg.Destroy()
del app
return path
#-----------------------------------------------------------------------------#
# OpenDlg #
#-----------------------------------------------------------------------------#
def OpenDlg(title="Choose files", mult = True, icon = None,
startdir = os.getcwd(), wildcard = None, size = (-1, -1)):
'''
Returns a list of selected files.
'''
app = wx.App()
if wildcard is None:
wild = "Python Files (*.py*)|*.py*|" \
"All Files (*.*)|*.*"
else:
wild = wildcard
if mult:
dlg = wx.FileDialog(None, message = title,
defaultDir = startdir,
defaultFile = "",
wildcard=wild,
style = wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR)
else:
dlg = wx.FileDialog(None, message = title,
defaultDir = startdir,
defaultFile = "",
wildcard = wild,
style = wx.FD_OPEN | wx.FD_CHANGE_DIR)
dlg.Size = size
if icon:
dlg.SetIcon(icon.GetIcon())
# Show the dialog and retrieve the user response.
# If OK process data.
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
paths = dlg.GetPaths()
else:
paths = None
dlg.Destroy()
del app
return paths
#-----------------------------------------------------------------------------#
# ExcBox #
#-----------------------------------------------------------------------------#
def ExcBox(title="Exception"):
'''
Return a message box with traceback content of the last exception.
'''
app = wx.App()
trc = traceback.format_exc()
wx.MessageBox(trc, title)
del app
return
#-----------------------------------------------------------------------------#
# YesNoBox #
#-----------------------------------------------------------------------------#
def YesNoBox(title, msg="", icon = None):
'''
Show a YES/NO box and return True or False.
'''
app = wx.App()
dlg = wx.MessageDialog(None, msg, title, wx.YES_NO | wx.ICON_QUESTION)
if icon:
dlg.SetIcon(icon.GetIcon())
result = dlg.ShowModal()
dlg.Destroy()
del app
if result == wx.ID_YES: return True
return False
#-----------------------------------------------------------------------------#
# InputBox #
#-----------------------------------------------------------------------------#
def InputBox(title, msg, default="", icon = None):
'''
Returns: user entered string.
None if user cancelled
'''
app = wx.App()
dlg = wx.TextEntryDialog(None, msg, title, default)
if icon:
dlg.SetIcon(icon.GetIcon())
if dlg.ShowModal() == wx.ID_OK:
rc = dlg.GetValue()
if not rc: rc = None
else:
rc = None
dlg.Destroy()
del app
return rc
#-----------------------------------------------------------------------------#
# PasswordBox #
#-----------------------------------------------------------------------------#
def PasswordBox(title, msg, icon = None):
'''
Returns: user entered password.
None if user cancelled
'''
app = wx.App()
dlg = wx.PasswordEntryDialog(None, msg, title, defaultValue = wx.EmptyString)
if icon:
dlg.SetIcon(icon.GetIcon())
if dlg.ShowModal() == wx.ID_OK:
rc = dlg.GetValue()
if not rc: rc = None
else:
rc = None
dlg.Destroy()
del app
return rc
#-----------------------------------------------------------------------------#
# MultInputBox #
#-----------------------------------------------------------------------------#
def MultInputBox(title, msg_text, Label, Feld, icon = None):
'''
Show two lists: one with field labels and one with field contents. User
entries will change the field contents. Can be used for simple data entries.
'''
class MyDialog(wx.Dialog):
def __init__(self, parent=None, msg="", caption="",
pos=(-1,-1), size=(500,300),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | \
wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX | \
wx.FULL_REPAINT_ON_RESIZE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
app = wx.App()
dlg = MyDialog()
dlg.Position = (-1, -1)
dlg.Title = title
msg = wx.StaticText(dlg, -1, msg_text.ljust(100," "))
okay = wx.Button(dlg, wx.ID_OK) # OK btn
okay.SetDefault()
cancel = wx.Button(dlg, wx.ID_CANCEL) # CANCEL btn
sizer = wx.BoxSizer(wx.VERTICAL) # Box Sizer
sizer.Add(msg, 0, wx.ALL, 5) # Zeile 1 = explanation
sizer.Add(wx.StaticLine(dlg), 0, wx.EXPAND|wx.ALL, 5) # then a line
num_fields = len(Feld)
if num_fields != len(Label):
raise ValueError("unequal number of labels and fields")
field_lbl = list(range(num_fields))
field_cont = list(range(num_fields))
fgs = wx.FlexGridSizer(rows=num_fields, cols=2, hgap=5, vgap=5)
for i in range(num_fields):
field_lbl[i] = wx.StaticText(dlg, -1, Label[i]) # label
field_cont[i] = wx.TextCtrl(dlg) # content
field_cont[i].Value = Feld[i] # fill in supplied
fgs.Add(field_lbl[i], 0, wx.ALIGN_RIGHT) # label right aligned
fgs.Add(field_cont[i], 0, wx.EXPAND) # expand content
fgs.AddGrowableCol(1)
sizer.Add(fgs, 0, wx.EXPAND|wx.ALL, 5)
btns = wx.StdDialogButtonSizer() # define button sizer
btns.AddButton(okay)
btns.AddButton(cancel)
btns.Realize()
sizer.Add(btns, 0, wx.EXPAND|wx.ALL, 5) # add btn size
if icon:
dlg.SetIcon(icon.GetIcon())
dlg.SetSizer(sizer)
sizer.Fit(dlg)
dlg.Center()
rc = dlg.ShowModal()
if rc != wx.ID_OK: # do nothing
dlg.Destroy()
return None
for i in range(num_fields): # put inputs back
Feld[i] = field_cont[i].Value
dlg.Destroy()
del app
return True
#-----------------------------------------------------------------------------#
# MsgBox #
# TODO add option to play 'beep' sound. Currently ALWAYS beeping (annoying) #
#-----------------------------------------------------------------------------#
def MsgBox(title, msg):
app = wx.App()
wx.MessageBox(msg, title)
del app
return
#-----------------------------------------------------------------------------#
# BusyInfo #
#-----------------------------------------------------------------------------#
def BusyInfo(title, msg, image = None):
'''
Show a "busy" message. Will not block but return the busy-object.
Important: this will NEVER disappear - except when you delete this object!
E.g. by setting busy = None oder del busy.
'''
import wx.lib.agw.pybusyinfo as PBI
app = wx.App()
if not image:
img = wx.NullBitmap
elif type(image) == type(u""):
if image.endswith(".ico"):
icon = wx.Icon(image, wx.BITMAP_TYPE_ICO)
img = wx.BitmapFromIcon(icon)
else:
img = wx.Bitmap(image, wx.BITMAP_TYPE_ANY)
else:
img = image.GetBitmap()
busy = PBI.PyBusyInfo(msg, parent=None, title=title, icon=img)
wx.Yield()
return busy
#-----------------------------------------------------------------------------#
# CodeBoxFF #
#-----------------------------------------------------------------------------#
class CodeBoxFF(wx.Dialog):
def __init__(self, parent, msg, caption, FF=True, fsize = 10, icon = None,
pos=(-1,-1) , size=(500,300),
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | \
wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX | \
wx.FULL_REPAINT_ON_RESIZE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
if icon:
self.SetIcon(icon.GetIcon())
# always center on screen
self.CenterOnScreen(wx.BOTH)
self.text = text = wx.TextCtrl(self, -1, msg,
style=wx.TE_MULTILINE | wx.TE_READONLY)
# default 10-point fixed font (DejaVu Sans Mono)
if FF:
self.text.SetFont(wx.Font(fsize, wx.MODERN, wx.NORMAL,
wx.NORMAL, 0, "DejaVu Sans Mono"))
else:
self.text.SetFont(wx.Font(fsize, wx.MODERN, wx.NORMAL,
wx.NORMAL, 0, "Calibri"))
ok = wx.Button(self, wx.ID_OK, "OK")
lc = layoutf.Layoutf('t=t5#1;b=t5#2;l=l5#1;r=r5#1', (self,ok))
text.SetConstraints(lc)
lc = layoutf.Layoutf('b=b5#1;x%w50#1;w!80;h*', (self,))
ok.SetConstraints(lc)
ok.SetDefault()
self.SetAutoLayout(1)
self.Layout()
#-----------------------------------------------------------------------------#
# CodeBox #
#-----------------------------------------------------------------------------#
def ScrollingTextbox(title, text = None, filename = None, size=(800,600), FF=True, icon = None):
'''
Show contents of a file or arbitrary text lines in a scrollable windows.
Argument msg may be a (list of) string. If starting with "file=", then the
rest is interpreted as a file name. This file will be displayed then.
Use FF to control use of a mono spaced vs. proportional font.
'''
app = wx.App()
if any((text and filename, not text and not filename)):
raise ValueError("need exactly one of text or filename")
if all((text, text in (list, tuple))):
msg_d = "\n".join(text)
elif filename: # den Inhalt einer Datei anzeigen
try: # wenn das mal gut geht ...
msg_d = open(filename).read()
except: # hab's ja geahnt!
msg_d = filename + "\ndoes not exist!"
else:
msg_d = text
dlg = CodeBoxFF(None, msg_d, title, size=size, FF=FF, icon = icon)
dlg.ShowModal()
dlg.Destroy()
del app
return
# ------------------------------------------------------------------------- #
# ProgressMeter #
# ------------------------------------------------------------------------- #
class ProgessBar:
'''
Display a Progress Meter without blocking
Provides an early cancelling
'''
def __init__(self, title, msg, maxItems, icon = None):
self._app = wx.App()
self._meter = wx.GenericProgressDialog(title, msg, maxItems,
style=wx.PD_CAN_ABORT
| wx.PD_ELAPSED_TIME
| wx.PD_AUTO_HIDE
| wx.PD_REMAINING_TIME
| wx.PD_ESTIMATED_TIME)
self.maxitems = maxItems
self.lastitem = 0
set_icon(self._meter, icon)
def update(self, msg, currentItemNumber):
if self.lastitem >= self.maxitems: # we have already been closed
return False
if currentItemNumber > self.maxitems: # no exception if number too high
self.lastitem = self.maxitems
else:
self.lastitem = currentItemNumber
keepGoing, _ = self._meter.Update(self.lastitem, newmsg=msg)
return keepGoing
``` |
{
"source": "jorjun/grow-python",
"score": 2
} |
#### File: library/tests/test_lock.py
```python
import time
def test_pumps_actually_stop(GPIO, smbus):
from grow.pump import Pump, global_lock
ch1 = Pump(channel=1)
ch1.dose(speed=0.5, timeout=0.05, blocking=False)
time.sleep(0.1)
assert ch1.get_speed() == 0
def test_pumps_are_mutually_exclusive(GPIO, smbus):
from grow.pump import Pump, global_lock
ch1 = Pump(channel=1)
ch2 = Pump(channel=2)
ch3 = Pump(channel=3)
ch1.dose(speed=0.5, timeout=1.0, blocking=False)
assert global_lock.locked() is True
assert ch2.dose(speed=0.5) is False
assert ch2.dose(speed=0.5, blocking=False) is False
assert ch3.dose(speed=0.5) is False
assert ch3.dose(speed=0.5, blocking=False) is False
def test_pumps_run_sequentially(GPIO, smbus):
from grow.pump import Pump, global_lock
ch1 = Pump(channel=1)
ch2 = Pump(channel=2)
ch3 = Pump(channel=3)
assert ch1.dose(speed=0.5, timeout=0.1, blocking=False) is True
assert global_lock.locked() is True
time.sleep(0.3)
assert ch2.dose(speed=0.5, timeout=0.1, blocking=False) is True
assert global_lock.locked() is True
time.sleep(0.3)
assert ch3.dose(speed=0.5, timeout=0.1, blocking=False) is True
assert global_lock.locked() is True
time.sleep(0.3)
``` |
{
"source": "jorke11/wiegand_raspberry",
"score": 3
} |
#### File: jorke11/wiegand_raspberry/api_rest.py
```python
import requests
import base64
class API_REST:
def inputGate(self,card_id,gate):
url = f'http://18.213.76.34/output/{card_id}'
print("RESPONSE API INPUT")
print(f'INPUT[] URL {url} gate={gate} card_id={card_id}')
response = requests.put(url,json={"gate":gate},verify=False).json()
return response
def outputGate(self,card_id,gate):
url = f'http://18.213.76.34/output/{card_id}'
print(f'OUTPUT:[] URL {url} gate={gate} card_id={card_id}')
response = requests.put(url,json={"gate":gate},verify=False).json()
print("RESPONSE API")
print(response)
return response
#api = API_REST()
#api.inputGate("0008949774",2)
```
#### File: jorke11/wiegand_raspberry/server.py
```python
from fastapi import FastAPI
from board import *
board = BOARD()
app = FastAPI()
@app.get("/")
async def root():
return {"message":"Server Raspberry PI"}
@app.get("/open")
async def openDoor():
board.activateRelay(7,4)
return {"open Gate":"Opening and Close Door"}
``` |
{
"source": "jorkro/wirecaml",
"score": 2
} |
#### File: wirecaml/tests/tools_data_tools_tests.py
```python
from wirecaml.tools.data_tools import slice_perc
def setup():
pass
def teardown():
pass
def test_slice_perc():
for n in range(0, 100):
x = list(range(0, n))
l1 = slice_perc(x, 0, 70)
l2 = slice_perc(x, 70, 80)
l3 = slice_perc(x, 80, 100)
assert(len(l1) + len(l2) + len(l3) == n)
```
#### File: wirecaml/extraction/definition_register.py
```python
from functools import reduce
import math
class DefinitionRegister:
defs_dict = dict()
defs_list = []
next_bit = 0
@staticmethod
def add_to_defs(assign, data):
if assign not in DefinitionRegister.defs_dict:
DefinitionRegister.defs_dict[assign] = []
DefinitionRegister.defs_dict[assign].append(data)
return DefinitionRegister.defs_dict[assign]
@staticmethod
def get_gen_kill(node, assign):
gen_bit = 1 << DefinitionRegister.next_bit
DefinitionRegister.defs_list.append(node)
d = DefinitionRegister.add_to_defs(assign, (gen_bit, node))
kill_bits = reduce(lambda x, y: x | y, [bit for bit, _ in d])
kill_bits &= ~gen_bit
DefinitionRegister.next_bit += 1
return gen_bit, kill_bits
@staticmethod
def get_def_bit(bit):
return DefinitionRegister.defs_list[int(math.log(bit, 2))]
@staticmethod
def get_def_int(i):
return DefinitionRegister.defs_list[i]
@staticmethod
def get_def_bitmask(assign_vars):
bitmask = 0
for assign in assign_vars:
if assign not in DefinitionRegister.defs_dict:
continue
for gen_bit, _ in DefinitionRegister.defs_dict[assign]:
bitmask |= gen_bit
return bitmask
@staticmethod
def reset():
DefinitionRegister.defs_dict = dict()
DefinitionRegister.defs_list = []
DefinitionRegister.next_bit = 0
```
#### File: extraction/phptraverser/php_traverser.py
```python
from phply.phpast import *
from wirecaml.extraction.phptraverser.php_listener import PHPListener
def traverse(nodes, listener: PHPListener):
for x in nodes:
traverse_node(x, listener)
def traverse_node(node, listener: PHPListener):
if listener.is_traversed(node):
return
#
# $a = 1
#
if isinstance(node, Assignment):
listener.enter_assignment(node)
traverse_node(node.node, listener)
traverse_node(node.expr, listener)
listener.exit_assignment(node)
#
# $a += 1
#
if isinstance(node, AssignOp):
listener.enter_assign_op(node)
listener.exit_assign_op(node)
#
# $a == $b
#
if isinstance(node, BinaryOp):
listener.enter_binary_op(node)
traverse_node(node.left, listener)
traverse_node(node.right, listener)
listener.exit_binary_op(node)
#
# Unclear
#
if isinstance(node, Block):
listener.enter_block(node)
for x in node.nodes:
traverse_node(x, listener)
listener.exit_block(node)
#
# while ($a)
#
if isinstance(node, DoWhile):
listener.enter_do_while(node)
traverse_node(node.node, listener)
traverse_node(node.expr, listener)
listener.exit_do_while(node)
#
# echo "..."
#
if isinstance(node, Echo):
listener.enter_echo(node)
for x in node.nodes:
traverse_node(x, listener)
listener.exit_echo(node)
#
# for($i = 0; $i < $n; $i++)
#
if isinstance(node, For):
listener.enter_for(node)
traverse_node(node.node, listener)
listener.exit_for(node)
#
# foreach ($a as $b)
#
if isinstance(node, Foreach):
listener.enter_foreach(node)
traverse_node(node.expr, listener)
traverse_node(node.node, listener)
listener.exit_foreach(node)
#
# function foo($a, $b)
#
if isinstance(node, Function):
listener.enter_function_declaration(node)
for x in node.nodes:
traverse_node(x, listener)
listener.exit_function_declaration(node)
#
# foo($a, $b)
#
if isinstance(node, FunctionCall):
listener.enter_function_call(node)
for param in node.params:
traverse_node(param, listener)
listener.exit_function_call(node)
#
# if ($a)
#
if isinstance(node, If):
listener.enter_if(node)
traverse_node(node.expr, listener)
traverse_node(node.node, listener)
for elseif in node.elseifs:
listener.enter_if(node)
traverse_node(elseif.node, listener)
listener.exit_if(node)
if node.else_:
listener.enter_else(node)
traverse_node(node.else_.node, listener)
listener.exit_else(node)
listener.exit_if(node)
#
# $a->method()
#
if isinstance(node, MethodCall):
listener.enter_method_call(node)
for param in node.params:
traverse_node(param, listener)
listener.exit_method_call(node)
#
# return $a
#
if isinstance(node, Return):
listener.enter_return(node)
traverse_node(node.node, listener)
listener.exit_return(node)
#
# $a
#
if isinstance(node, Variable):
listener.enter_variable(node)
listener.exit_variable(node)
#
# while ($a)
#
if isinstance(node, While):
listener.enter_while(node)
traverse_node(node.expr, listener)
traverse_node(node.node, listener)
listener.exit_while(node)
```
#### File: wirecaml/preparation/dataset_custom.py
```python
import os
import pickle
from os import walk
from os.path import isdir, join, abspath
from wirecaml.preparation.dataset import Dataset
from wirecaml.tools import config
from wirecaml.tools.ascii import print_notice
class CustomDataset(Dataset):
def __init__(self):
super(CustomDataset, self).__init__(config.get_str('analysis', 'CustomPickle'))
@staticmethod
def add_to_list(lst, language, vuln_type, file):
if language not in lst:
lst[language] = {vuln_type: [file]}
elif vuln_type not in lst[language]:
lst[language][vuln_type] = [file]
else:
lst[language][vuln_type].append(file)
return lst
def create_list(self, app_path, languages, vuln_types):
vuln_list = {}
training_set = {}
tuning_set = {}
testing_set = {}
flaw_dict = {}
if isdir(app_path):
files = [abspath(join(dp, f)) for dp, dn, fn in walk(app_path) for f in fn if f.endswith('.php') or
f.endswith('.phar')]
else:
files = None
# Create list
for language in languages:
if language not in flaw_dict:
flaw_dict[language] = {}
for vuln_type in vuln_types:
if vuln_type not in flaw_dict:
flaw_dict[language][vuln_type] = {}
if isdir(app_path) and files is not None:
for file in files:
vuln_list = self.add_to_list(vuln_list, language, vuln_type, file)
for language in languages:
training_set[language] = {}
tuning_set[language] = {}
testing_set[language] = {}
for vuln_type in vuln_types:
if vuln_type not in vuln_list[language]:
continue
training_set[language][vuln_type] = []
tuning_set[language][vuln_type] = []
testing_set[language][vuln_type] = vuln_list[language][vuln_type]
return {'training_set': training_set, 'tuning_set': tuning_set, 'testing_set': testing_set,
'flaw_dict': flaw_dict}
def create_sets(self):
source_dir = config.get_str('analysis', 'CustomTestSet')
custom_pickle = config.get_str('analysis', 'CustomPickle')
languages = config.get_list('dataset', 'Languages')
vulnerabilities = config.get_list('dataset', 'Vulnerabilities')
if not os.path.isfile(custom_pickle):
dataset = self.create_list(source_dir, languages, vulnerabilities)
# Save to pickle file for future use
with open(custom_pickle, 'wb') as pickle_file:
pickle.dump(dataset, pickle_file)
else:
print_notice("Pickle file already created")
def get_sets(self):
sets = super(CustomDataset, self).get_sets()
return sets
```
#### File: wirecaml/preparation/dataset.py
```python
import os
import random
import pickle
from wirecaml.tools import config
from wirecaml.tools.ascii import print_notice, print_warning
class Dataset:
def __init__(self, pickle_path):
self.pickle_path = pickle_path
self.sampling_perc = dict()
self.sampling_perc['SQLi'] = config.get_float('dataset', 'SamplingPercentageSQLi')
self.sampling_perc['XSS'] = config.get_float('dataset', 'SamplingPercentageXSS')
def create_sets(self):
pass
def get_sets(self):
pkl = self.pickle_path
# Load the pickle file
print_notice("Loading pickle file")
with open(pkl, 'rb') as pickle_file:
sets = pickle.load(pickle_file)
if self.sampling_perc['SQLi'] < 1.0 or self.sampling_perc['XSS'] < 1.0:
return self.sample_set(sets)
return sets
def sample_set(self, sets):
filtered_set = dict()
filtered_set['flaw_dict'] = sets['flaw_dict']
for set_name in ['training_set', 'tuning_set', 'testing_set']:
filtered_set[set_name] = dict()
filtered_set[set_name]['PHP'] = dict()
for vuln_type in ['SQLi', 'XSS']:
filtered_set[set_name]['PHP'][vuln_type] = []
for file in sets[set_name]['PHP'][vuln_type]:
r = random.random()
if file in sets['flaw_dict']['PHP'][vuln_type] and len(sets['flaw_dict']['PHP'][vuln_type][file]) > 0:
filtered_set[set_name]['PHP'][vuln_type].append(file)
elif r < self.sampling_perc[vuln_type]:
filtered_set[set_name]['PHP'][vuln_type].append(file)
return filtered_set
def delete_sets(self):
pkl = self.pickle_path
if os.path.isfile(pkl):
print_notice("Removing %s" % pkl)
os.remove(pkl)
else:
print_warning("Unable to remove %s. File does not exist." % pkl)
``` |
{
"source": "jorlamd/noms",
"score": 3
} |
#### File: noms/client/main.py
```python
import requests
import json
import copy
import operator
from itertools import islice
from .dict_parse import search_parse, food_parse
from ..objects.nutrient_dict import nutrient_dict
class SearchResults():
"""
An object returned by Client.search_query which stores a Python dictionary
containing all of the search result information.
"""
def __init__(self, json):
self.json = json
def __str__(self, max_entries=None):
r_str = ""
if self.json == None:
r_str += "There are no search results for this query\n"
else:
r_str +="="*112 + "\n"
r_str +="Search results for \'{}\' on USDA Standard Reference Database".format(self.json["search_term"]) + "\n"
r_str +="="*112 + "\n"
print(self.json.keys())
if max_entries == None:
max_entries = len(self.json["items"]["foods"])
if max_entries < len(self.json["items"]["foods"]):
self.json["items"]["foods"] = self.json["items"]["foods"][:max_entries]
self.json["items"]["foods"].sort(key=operator.itemgetter("foodCategory"))
r_str +="{name:<72} {group:^30} {id:>8}".format(name="Name",group="Group",id="ID") + "\n"
for item in self.json["items"]["foods"]:
if len(item["description"]) > 70:
item["description"] = item["description"][:70] + ".."
if len(item["foodCategory"]) > 28:
item["foodCategory"] = item["foodCategory"][:28] + ".."
r_str +="{name:<72} {group:^30} {id:>8}".format(name=item["description"],group=item["foodCategory"],id=item["fdcId"]) + "\n"
r_str +="="*112 + "\n"
return r_str
class Client:
"""
The Client class is used to interface with the USDA Standard Reference Database
API. It must be initialized with an API key.
"""
url = 'https://api.nal.usda.gov/fdc/v1/foods'
def __init__(self, key):
"""
A Client instance must be initialized with a key from
data.gov. This is free to obtain, and you can request one
here: https://api.data.gov/signup/
"""
self.key = key
def call(self, params, url_suffix):
""" target_url could be:
https://api.nal.usda.gov/usda/ndb/V2/reports
https://api.nal.usda.gov/usda/ndb/search
depending on which service of the api is being used
"""
target_url = self.url + url_suffix
# add the key to the API call
call_params = dict(params, api_key=self.key)
response = json.loads(requests.get(url=target_url, params=call_params).text)
return response
def search_query(self, name):
params = dict(
query=name,
ds='Standard Reference',
format='json'
)
result = search_parse(self.call(params,'/search'))
if result == None:
return None
else:
return SearchResults(search_parse(self.call(params, '/search')))
def food_query(self, ids):
# allow for either a single id (ndbno) query, or a list of queries
if type(ids) == list:
if len(ids) > 25:
raise Exception("Too many Food ID arguments. API limits it to 25.")
params = dict(ndbno=ids)
params.update(dict(type='f', format='json'))
return_obj = self.call(params, '/V2/reports')
offset = 0
if 'foods' not in return_obj:
print("See the following error: {}".format(return_obj))
return None
for i in range(0, len(return_obj["foods"])):
if 'error' in return_obj["foods"][i-offset].keys():
del return_obj["foods"][i-offset]
offset += 1
return return_obj
def get_foods(self, id_value_dict):
# If more than 25 words are being queried, split it up
if len(id_value_dict.keys()) > 25:
print("Must call the database {} times, this may take a couple moments. Status: {leng}/{leng}".format(len(id_value_dict.keys())//25+1,leng=len(id_value_dict.keys())))
dict_copy = id_value_dict.copy()
food_obj = []
while len(dict_copy.keys()) > 25:
current_dict = {}
items = islice(dict_copy.items(), 25)
current_dict.update(items)
call = self.food_query(current_dict.keys())
food_obj += food_parse(call, nutrient_dict, list(current_dict.values()))
for key in current_dict.keys():
del dict_copy[key]
print("Status: {}/{}".format(len(dict_copy.keys()), len(id_value_dict.keys())))
call = self.food_query(dict_copy.keys())
food_obj += food_parse(call, nutrient_dict, list(dict_copy.values()))
print("Complete!")
else:
food_obj = self.food_query(id_value_dict.keys())
food_obj = food_parse(food_obj, nutrient_dict, list(id_value_dict.values()))
return food_obj
``` |
{
"source": "jorlamd/turbinia",
"score": 2
} |
#### File: turbinia/turbinia/client_test.py
```python
from __future__ import unicode_literals
from datetime import datetime
from datetime import timedelta
import json
import unittest
import os
import shutil
import tempfile
import textwrap
import mock
from turbinia import config
from turbinia.client import TurbiniaClient
from turbinia.client import TurbiniaServer
from turbinia.client import TurbiniaStats
from turbinia.client import TurbiniaPsqWorker
from turbinia.client import check_dependencies
from turbinia import TurbiniaException
SHORT_REPORT = textwrap.dedent(
"""\
# Turbinia report 0xFakeRequestId
* Processed 3 Tasks for user myuser
# High Priority Tasks
* TaskName2: This second fake task executed
# Successful Tasks
* TaskName: This fake task executed
# Failed Tasks
* TaskName3: Third Task Failed...
# Scheduled or Running Tasks
* None
""")
LONG_REPORT = textwrap.dedent(
"""\
# Turbinia report 0xFakeRequestId
* Processed 3 Tasks for user myuser
# High Priority Tasks
## TaskName2
* **Status:** This second fake task executed
* Task Id: 0xfakeTaskId2
* Executed on worker fake_worker2
### Task Reported Data
#### Fake High priority Report
* Fake Bullet
# Successful Tasks
* TaskName: This fake task executed
# Failed Tasks
* TaskName3: Third Task Failed...
# Scheduled or Running Tasks
* None
""")
LONG_REPORT_FILES = textwrap.dedent(
"""\
# Turbinia report 0xFakeRequestId
* Processed 3 Tasks for user myuser
# High Priority Tasks
## TaskName2
* **Status:** This second fake task executed
* Task Id: 0xfakeTaskId2
* Executed on worker fake_worker2
### Task Reported Data
#### Fake High priority Report
* Fake Bullet
### Saved Task Files:
* `/no/path/2`
* `/fake/path/2`
# Successful Tasks
* TaskName: This fake task executed
* `/no/path/`
* `/fake/path`
# Failed Tasks
* TaskName3: Third Task Failed...
* `/no/path/3`
* `/fake/path/3`
# Scheduled or Running Tasks
* None
""")
STATISTICS_REPORT = textwrap.dedent(
"""\
Execution time statistics for Turbinia:
All Tasks: Count: 3, Min: 0:01:00, Mean: 0:03:00, Max: 0:05:00
Successful Tasks: Count: 2, Min: 0:01:00, Mean: 0:05:00, Max: 0:05:00
Failed Tasks: Count: 1, Min: 0:03:00, Mean: 0:03:00, Max: 0:03:00
Total Request Time: Count: 2, Min: 0:03:00, Mean: 0:21:00, Max: 0:21:00
Task type TaskName: Count: 1, Min: 0:01:00, Mean: 0:01:00, Max: 0:01:00
Task type TaskName2: Count: 1, Min: 0:05:00, Mean: 0:05:00, Max: 0:05:00
Task type TaskName3: Count: 1, Min: 0:03:00, Mean: 0:03:00, Max: 0:03:00
Worker fake_worker: Count: 2, Min: 0:01:00, Mean: 0:03:00, Max: 0:03:00
Worker fake_worker2: Count: 1, Min: 0:05:00, Mean: 0:05:00, Max: 0:05:00
User myuser: Count: 2, Min: 0:01:00, Mean: 0:05:00, Max: 0:05:00
User myuser2: Count: 1, Min: 0:03:00, Mean: 0:03:00, Max: 0:03:00
""")
STATISTICS_REPORT_CSV = textwrap.dedent(
"""\
stat_type, count, min, mean, max
All Tasks, 3, 0:01:00, 0:03:00, 0:05:00
Successful Tasks, 2, 0:01:00, 0:05:00, 0:05:00
Failed Tasks, 1, 0:03:00, 0:03:00, 0:03:00
Total Request Time, 2, 0:03:00, 0:21:00, 0:21:00
Task type TaskName, 1, 0:01:00, 0:01:00, 0:01:00
Task type TaskName2, 1, 0:05:00, 0:05:00, 0:05:00
Task type TaskName3, 1, 0:03:00, 0:03:00, 0:03:00
Worker fake_worker, 2, 0:01:00, 0:03:00, 0:03:00
Worker fake_worker2, 1, 0:05:00, 0:05:00, 0:05:00
User myuser, 2, 0:01:00, 0:05:00, 0:05:00
User myuser2, 1, 0:03:00, 0:03:00, 0:03:00
""")
class TestTurbiniaClient(unittest.TestCase):
"""Test Turbinia client class."""
def setUp(self):
last_update = datetime.now()
self.task_data = [
{
'id': '0xfakeTaskId',
'instance': 'MyTurbiniaInstance',
'last_update': last_update,
'name': 'TaskName',
'report_data': '#### Fake Low priority Report\n* Fake Bullet',
'report_priority': 80,
'request_id': '0xFakeRequestId',
'run_time': timedelta(minutes=1),
'saved_paths': ['/no/path/', '/fake/path'],
'status': 'This fake task executed',
'successful': True,
'requester': 'myuser',
'worker_name': 'fake_worker'
}, {
'id': '0xfakeTaskId2',
'instance': 'MyTurbiniaInstance',
'last_update': last_update + timedelta(minutes=20),
'name': 'TaskName2',
'report_data': '#### Fake High priority Report\n* Fake Bullet',
'report_priority': 10,
'request_id': '0xFakeRequestId',
'run_time': timedelta(minutes=5),
'saved_paths': ['/no/path/2', '/fake/path/2'],
'status': 'This second fake task executed',
'successful': True,
'requester': 'myuser',
'worker_name': 'fake_worker2'
}, {
'id': '0xfakeTaskId3',
'instance': 'MyTurbiniaInstance',
'last_update': last_update,
'name': 'TaskName3',
'report_data': '',
'report_priority': 80,
'request_id': '0xFakeRequestId2',
'run_time': timedelta(minutes=3),
'saved_paths': ['/no/path/3', '/fake/path/3'],
'status': 'Third Task Failed...',
'successful': False,
'requester': 'myuser2',
'worker_name': 'fake_worker'
}
] # yapf: disable
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaClientInit(self, _, __):
"""Basic test for client."""
config.LoadConfig()
client = TurbiniaClient()
self.assertTrue(hasattr(client, 'task_manager'))
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaClientGetTaskData(self, _, __, mock_cloud_function):
"""Basic test for client.get_task_data"""
# ExecuteFunction returns a dict with a 'result' key that has a json-encoded
# list. This contains our task data, which is a list of dicts.
run_time = timedelta(seconds=3)
test_task_data = [{'bar': 'bar2', 'run_time': run_time.total_seconds()}]
gcf_result = [test_task_data, 'Unused GCF data']
gcf_result = json.dumps(gcf_result)
function_return = {'result': gcf_result}
mock_cloud_function.return_value = function_return
client = TurbiniaClient()
task_data = client.get_task_data('inst', 'proj', 'reg')
# get_task_data() converts this back into a timedelta(). We returned it
# seconds from the GCF function call because that is what it is stored in
# Datastore as.
test_task_data[0]['run_time'] = run_time
self.assertEqual(task_data, test_task_data)
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaClientGetTaskDataNoResults(self, _, __, mock_cloud_function):
"""Test for exception after empty results from cloud functions."""
mock_cloud_function.return_value = {}
client = TurbiniaClient()
self.assertRaises(
TurbiniaException, client.get_task_data, "inst", "proj", "reg")
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaClientGetTaskDataInvalidJson(
self, _, __, mock_cloud_function):
"""Test for exception after bad json results from cloud functions."""
mock_cloud_function.return_value = {'result': None}
client = TurbiniaClient()
self.assertRaises(
TurbiniaException, client.get_task_data, "inst", "proj", "reg")
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatistics(self, _, __, ___):
"""Tests format_task_statistics() report output."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
stats_report = client.format_task_statistics('inst', 'proj', 'reg')
self.maxDiff = None
self.assertEqual(stats_report, STATISTICS_REPORT)
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatisticsCsv(self, _, __, ___):
"""Tests format_task_statistics() CSV report output."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
stats_report = client.format_task_statistics(
'inst', 'proj', 'reg', csv=True)
self.maxDiff = None
self.assertEqual(stats_report, STATISTICS_REPORT_CSV)
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientGetTaskStatistics(self, _, __, ___):
"""Tests get_task_statistics() basic functionality."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
task_stats = client.get_task_statistics('inst', 'proj', 'reg')
# Make sure we have the right number of tasks for all sections
self.assertEqual(task_stats['all_tasks'].count, 3)
self.assertEqual(task_stats['successful_tasks'].count, 2)
self.assertEqual(task_stats['failed_tasks'].count, 1)
self.assertEqual(task_stats['requests'].count, 2)
self.assertEqual(len(task_stats['tasks_per_user']), 2)
self.assertEqual(len(task_stats['tasks_per_worker']), 2)
self.assertEqual(len(task_stats['tasks_per_type']), 3)
# Checking min/mean/max
self.assertEqual(task_stats['all_tasks'].min, timedelta(minutes=1))
self.assertEqual(task_stats['all_tasks'].mean, timedelta(minutes=3))
self.assertEqual(task_stats['all_tasks'].max, timedelta(minutes=5))
# Delta for this is 21 minutes because the last_update for 0xfakeTaskId2 is
# 20 minutes later than the first task, and the first task ran for 1 minute.
self.assertEqual(task_stats['requests'].max, timedelta(minutes=21))
self.assertEqual(
task_stats['tasks_per_user']['myuser'].max, timedelta(minutes=5))
self.assertEqual(
task_stats['tasks_per_worker']['fake_worker'].max, timedelta(minutes=3))
self.assertEqual(
task_stats['tasks_per_type']['TaskName2'].mean, timedelta(minutes=5))
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatus(self, _, __, ___):
"""Tests format_task_status() with empty report_priority."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
self.task_data[0]['report_priority'] = None
self.task_data[1]['report_priority'] = ''
self.task_data[2].pop('report_priority')
client.get_task_data.return_value = self.task_data
result = client.format_task_status('inst', 'proj', 'reg')
self.assertIn('Processed 3 Tasks', result.strip())
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatusShortReport(self, _, __, ___):
"""Tests format_task_status() has valid output with short report."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
result = client.format_task_status('inst', 'proj', 'reg')
self.assertEqual(result.strip(), SHORT_REPORT.strip())
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatusFullReport(self, _, __, ___):
"""Tests format_task_status() has valid output with full report."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
result = client.format_task_status('inst', 'proj', 'reg', full_report=True)
self.assertEqual(result.strip(), LONG_REPORT.strip())
@mock.patch('turbinia.client.GoogleCloudFunction.ExecuteFunction')
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testClientFormatTaskStatusFiles(self, _, __, ___):
"""Tests format_task_status() has valid output with report and files."""
client = TurbiniaClient()
client.get_task_data = mock.MagicMock()
client.get_task_data.return_value = self.task_data
result = client.format_task_status(
'inst', 'proj', 'reg', all_fields=True, full_report=True)
self.assertEqual(result.strip(), LONG_REPORT_FILES.strip())
class TestTurbiniaStats(unittest.TestCase):
"""Test TurbiniaStats class."""
def testTurbiniaStatsAddTask(self):
"""Tests TurbiniaStats.add_task() method."""
test_task = {'run_time': None, 'last_update': None}
stats = TurbiniaStats()
stats.add_task(test_task)
self.assertIn(test_task, stats.tasks)
self.assertEqual(stats.count, 1)
def testTurbiniaStatsCalculateStats(self):
"""Tests TurbiniaStats.calculateStats() method."""
last_update = datetime.now()
test_task1 = {'run_time': timedelta(minutes=3), 'last_update': last_update}
test_task2 = {'run_time': timedelta(minutes=5), 'last_update': last_update}
test_task3 = {'run_time': timedelta(minutes=1), 'last_update': last_update}
stats = TurbiniaStats()
stats.add_task(test_task1)
stats.add_task(test_task2)
stats.add_task(test_task3)
stats.calculate_stats()
self.assertEqual(stats.min, timedelta(minutes=1))
self.assertEqual(stats.mean, timedelta(minutes=3))
self.assertEqual(stats.max, timedelta(minutes=5))
self.assertEqual(stats.count, 3)
def testTurbiniaStatsCalculateStatsEmpty(self):
"""Tests that calculate_stats() works when no tasks are added."""
stats = TurbiniaStats()
stats.calculate_stats()
self.assertEqual(stats.count, 0)
self.assertEqual(stats.min, None)
def testTurbiniaStatsFormatStats(self):
"""Tests TurbiniaStats.format_stats() returns valid output."""
test_output = (
'Test Task Results: Count: 1, Min: 0:03:00, Mean: 0:03:00, '
'Max: 0:03:00')
test_task1 = {
'run_time': timedelta(minutes=3),
'last_update': datetime.now()
}
stats = TurbiniaStats('Test Task Results')
stats.add_task(test_task1)
stats.calculate_stats()
report = stats.format_stats()
self.assertEqual(report, test_output)
def testTurbiniaStatsFormatStatsCsv(self):
"""Tests TurbiniaStats.format_stats() returns valid CSV output."""
test_output = ('Test Task Results, 1, 0:03:00, 0:03:00, 0:03:00')
test_task1 = {
'run_time': timedelta(minutes=3),
'last_update': datetime.now()
}
stats = TurbiniaStats('Test Task Results')
stats.add_task(test_task1)
stats.calculate_stats()
report = stats.format_stats_csv()
self.assertEqual(report, test_output)
class TestTurbiniaServer(unittest.TestCase):
"""Test Turbinia Server class."""
@mock.patch('turbinia.client.task_manager.PSQTaskManager._backend_setup')
@mock.patch('turbinia.state_manager.get_state_manager')
def testTurbiniaServerInit(self, _, __):
"""Basic test for Turbinia Server init."""
server = TurbiniaServer()
self.assertTrue(hasattr(server, 'task_manager'))
class TestTurbiniaPsqWorker(unittest.TestCase):
"""Test Turbinia PSQ Worker class."""
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(prefix='turbinia-test')
config.LoadConfig()
config.OUTPUT_DIR = self.tmp_dir
config.MOUNT_DIR_PREFIX = self.tmp_dir
config.DEPENDENCIES = []
def tearDown(self):
if 'turbinia-test' in self.tmp_dir:
shutil.rmtree(self.tmp_dir)
@mock.patch('turbinia.client.pubsub')
@mock.patch('turbinia.client.datastore.Client')
@mock.patch('turbinia.client.psq.Worker')
def testTurbiniaPsqWorkerInit(self, _, __, ___):
"""Basic test for PSQ worker."""
worker = TurbiniaPsqWorker([], [])
self.assertTrue(hasattr(worker, 'worker'))
@mock.patch('turbinia.client.pubsub')
@mock.patch('turbinia.client.datastore.Client')
@mock.patch('turbinia.client.psq.Worker')
def testTurbiniaClientNoDir(self, _, __, ___):
"""Test that OUTPUT_DIR path is created."""
config.OUTPUT_DIR = os.path.join(self.tmp_dir, 'no_such_dir')
TurbiniaPsqWorker([], [])
self.assertTrue(os.path.exists(config.OUTPUT_DIR))
@mock.patch('turbinia.client.pubsub')
@mock.patch('turbinia.client.datastore.Client')
@mock.patch('turbinia.client.psq.Worker')
def testTurbiniaClientIsNonDir(self, _, __, ___):
"""Test that OUTPUT_DIR does not point to an existing non-directory."""
config.OUTPUT_DIR = os.path.join(self.tmp_dir, 'empty_file')
open(config.OUTPUT_DIR, 'a').close()
self.assertRaises(TurbiniaException, TurbiniaPsqWorker)
@mock.patch('turbinia.client.shutil')
@mock.patch('logging.Logger.warning')
def testDependencyCheck(self, mock_logger, mock_shutil):
"""Test system dependency check."""
dependencies = [{
'job': 'PlasoJob',
'programs': ['non_exist'],
'docker_image': None
}]
# Dependency not found.
mock_shutil.which.return_value = None
self.assertRaises(TurbiniaException, check_dependencies, dependencies)
# Normal run.
mock_shutil.which.return_value = True
check_dependencies(dependencies)
# Job not found.
dependencies[0]['job'] = 'non_exist'
check_dependencies(dependencies)
mock_logger.assert_called_with(
'The job: non_exist was not found or has been disabled. '
'Skipping dependency check...')
# Bad dependency config.
self.assertRaises(TurbiniaException, check_dependencies, [{'test': 'test'}])
```
#### File: turbinia/workers/finalize_request.py
```python
from __future__ import unicode_literals
import os
from turbinia import config
from turbinia.evidence import FinalReport
from turbinia.workers import TurbiniaTask
class FinalizeRequestTask(TurbiniaTask):
"""Task to finalize the Turbinia request."""
def run(self, evidence, result):
"""Main entry point for Task.
This generates a final report.
Args:
evidence (EvidenceCollection): All Evidence that has been generated as
part of this request.
result (TurbiniaTaskResult): The result to place task output into.
Returns:
TurbiniaTaskResult: Task execution results.
"""
# Doing a delayed import to avoid circular dependencies.
from turbinia.client import TurbiniaClient
client = TurbiniaClient()
report_file = os.path.join(
self.tmp_dir, 'final_turbinia_report_{0:s}.md'.format(self.id))
report = FinalReport(source_path=report_file)
report_data = client.format_task_status(
config.INSTANCE_ID, config.TURBINIA_PROJECT, config.TURBINIA_REGION,
request_id=evidence.request_id, full_report=True)
result.log('Writing report data to [{0:s}]'.format(report.local_path))
with open(report.local_path, 'wb') as file_handle:
file_handle.write(report_data.encode('utf-8'))
result.add_evidence(report, evidence.config)
result.close(self, True)
return result
``` |
{
"source": "Jorlejeu/podcast-transcriber",
"score": 3
} |
#### File: podcast-transcriber/podcast_transcriber/transcriber.py
```python
import base64
from googleapiclient import discovery
class Transcriber(object):
# the transcript chunks
transcript_chunks = []
def __init__(self, api_key):
self.api_key = api_key
def get_speech_service(self):
"""
Get the Google Speech service.
"""
return discovery.build('speech', 'v1', developerKey=self.api_key)
def transcribe(self, filepath):
"""
Transcribe the given audio file.
Params:
filepath (string): The name of the audio file.
"""
with open(filepath, 'rb') as speech:
# Base64 encode the binary audio file for inclusion in the JSON
# request.
speech_content = base64.b64encode(speech.read())
service = self.get_speech_service()
service_request = service.speech().recognize(
body={
'config': {
'encoding': 'LINEAR16', # raw 16-bit signed LE samples
'sampleRateHertz': 16000, # 16 khz
'languageCode': 'en-US', # a BCP-47 language tag
'enableAutomaticPunctuation': 'true',
},
'audio': {
'content': speech_content.decode('UTF-8')
}
})
response = service_request.execute()
return response
def transcribe_many(self, filepaths):
"""
Transcribe the given list of audio files.
Params:
filepaths (list[string]): The list of audio files.
"""
items = len(filepaths)
# loop through the files and transcribe them
for i, f in enumerate(filepaths, start=1):
print "Transcribing [ %d / %d ] %s ..." % (i, items, f)
response = self.transcribe(f)
# read the response and extract the transcript
for alternatives in response['results']:
self.transcript_chunks.append(
alternatives['alternatives'][0]['transcript'])
return self.get_transcript_str()
def get_transcript_str(self, glue=""):
"""
Returns a string representation of the transcript chunks.
Params:
glue (string): The glue to join the chunks. Default value is the
newline character (\n)
"""
if not glue:
glue = "\n"
return glue.join(self.transcript_chunks)
```
#### File: podcast-transcriber/tests/test_transcriber.py
```python
import unittest
from podcast_transcriber.transcriber import Transcriber
class TranscriberTest(unittest.TestCase):
def setUp(self):
self.transc = Transcriber("")
def tearDown(self):
self.transc = None
def test_transcript_str_wrong_input(self):
with self.assertRaises(AttributeError):
self.transc.get_transcript_str(int(1))
def test_transcript_str_empty_chunks(self):
self.transc.transcript_chunks = []
self.assertEqual("", self.transc.get_transcript_str())
def test_transcript_str_correct_no_arg(self):
self.transc.transcript_chunks = [
"correctly", "join", "chunks", "together"]
self.assertEqual(
"correctly\njoin\nchunks\ntogether",
self.transc.get_transcript_str())
def test_transcript_str_correct_other_arg(self):
self.transc.transcript_chunks = [
"correctly", "join", "chunks", "together"]
self.assertEqual(
"correctly join chunks together",
self.transc.get_transcript_str(" "))
```
#### File: podcast-transcriber/tests/test_utilities.py
```python
import os
import unittest
from mock import patch
from podcast_transcriber.utilities import (
check_env_vars,
create_temporary_file_name,
create_temporary_folder
)
class UtilitiesTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch("tempfile.mkdtemp")
def test_create_temporary_folder_correct(self, mock_mkdtemp):
mock_mkdtemp.return_value = '/path/to/tmp/dir'
return_dirpath = "/path/to/tmp/dir"
self.assertEqual(return_dirpath, create_temporary_folder())
@patch("tempfile.mkdtemp")
def test_create_temporary_folder_incorrect(self, mock_mkdtemp):
mock_mkdtemp.return_value = '/path/to/tmp/wrong_dir'
return_dirpath = "/path/to/tmp/dir"
self.assertNotEqual(return_dirpath, create_temporary_folder())
def test_create_temporary_file_name_correct(self):
temp_file_result = "/path/to/tmp/file.txt"
temp_file_dir = "/path/to/tmp"
temp_file_name = "file.txt"
self.assertEqual(
temp_file_result, create_temporary_file_name(
temp_file_dir, temp_file_name))
def test_create_temporary_file_name_incorrect(self):
temp_file_result = "/path/to/tmp/file.txt"
temp_file_dir = "/path/to/tmp"
temp_file_name = "wrong_file.txt"
self.assertNotEqual(
temp_file_result, create_temporary_file_name(
temp_file_dir, temp_file_name))
def test_check_env_vars_true(self):
os.environ["GOOGLE_API_KEY"] = "test"
self.assertEqual(True, check_env_vars())
def test_check_env_vars_false(self):
if "GOOGLE_API_KEY" in os.environ:
del os.environ["GOOGLE_API_KEY"]
self.assertEqual(False, check_env_vars())
``` |
{
"source": "jormacmoo/ah",
"score": 3
} |
#### File: ah/test/test_search.py
```python
import unittest
import numpy as np
from repytah.search import find_complete_list
from repytah.search import __find_add_rows as find_add_rows
from repytah.search import find_all_repeats
from repytah.search import find_complete_list_anno_only
class TestSearch(unittest.TestCase):
def test_find_complete_list(self):
"""
Tests if find_complete_list finds the correct smaller diagonals
(and the associated pairs of repeats).
"""
input_mat = np.array([[8, 8, 14, 14, 1],
[14, 14, 56, 56, 1],
[8, 8, 62, 62, 1],
[56, 56, 62, 62, 1],
[14, 14, 104, 104, 1],
[62, 62, 104, 104, 1],
[8, 8, 110, 110, 1],
[56, 56, 110, 110, 1],
[104, 104, 110, 110, 1],
[4, 14, 52, 62, 11],
[4, 14, 100, 110, 11],
[26, 71, 74, 119, 46],
[1, 119, 1, 119, 119]])
song_length = 119
output = find_complete_list(input_mat, song_length)
expect_output = np.array([[8, 8, 14, 14, 1, 1],
[8, 8, 56, 56, 1, 1],
[8, 8, 62, 62, 1, 1],
[8, 8, 104, 104, 1, 1],
[8, 8, 110, 110, 1, 1],
[14, 14, 56, 56, 1, 1],
[14, 14, 62, 62, 1, 1],
[14, 14, 104, 104, 1, 1],
[14, 14, 110, 110, 1, 1],
[56, 56, 62, 62, 1, 1],
[56, 56, 104, 104, 1, 1],
[56, 56, 110, 110, 1, 1],
[62, 62, 104, 104, 1, 1],
[62, 62, 110, 110, 1, 1],
[104, 104, 110, 110, 1, 1],
[4, 7, 52, 55, 4, 1],
[4, 7, 100, 103, 4, 1],
[9, 14, 57, 62, 6, 1],
[9, 14, 105, 110, 6, 1],
[63, 71, 111, 119, 9, 1],
[4, 13, 52, 61, 10, 1],
[4, 13, 100, 109, 10, 1],
[4, 14, 52, 62, 11, 1],
[4, 14, 100, 110, 11, 1],
[52, 62, 100, 110, 11, 1],
[57, 71, 105, 119, 15, 1],
[26, 51, 74, 99, 26, 1],
[26, 55, 74, 103, 30, 1],
[26, 61, 74, 109, 36, 1],
[26, 71, 74, 119, 46, 1]])
# Test output type
self.assertIs(type(output), np.ndarray)
# Test output size
self.assertEqual(np.size(output), np.size(expect_output))
# Test output result
self.assertEqual(output.tolist(), expect_output.tolist())
def test__find_add_rows(self):
"""
Tests if __find_add_rows finds the correct pairs of repeated
structures, represented as diagonals of a certain length, k.
"""
# Test for pairs of repeated structures that start at the same time
# step as previously found pairs of repeated structures of the same
# length
lst_no_anno_ep1 = np.array([[1, 15, 31, 45, 15],
[1, 10, 46, 55, 10],
[31, 40, 46, 55, 10],
[10, 20, 40, 50, 11]])
check_inds_ep1 = np.array([1, 31, 46])
k_ep1 = 10
output_ep1 = find_add_rows(lst_no_anno_ep1, check_inds_ep1, k_ep1)
expect_output_ep1 = np.array([[1, 10, 31, 40, 10],
[11, 15, 41, 45, 5],
[1, 10, 31, 40, 10],
[11, 15, 41, 45, 5]])
# Test output type
self.assertIs(type(output_ep1), np.ndarray)
# Test output size
self.assertEqual(np.size(output_ep1), np.size(expect_output_ep1))
# Test output result
self.assertEqual(output_ep1.tolist(), expect_output_ep1.tolist())
# Test for pairs of repeated structures that end at the same time step
# as previously found pairs of repeated structures of the same length
lst_no_anno_ep2 = np.array([[4, 4, 14, 14, 1],
[4, 4, 56, 56, 1],
[4, 4, 110, 110, 1],
[14, 14, 56, 56, 1],
[14, 14, 110, 110, 1],
[56, 56, 110, 110, 1],
[4, 14, 52, 62, 11]])
check_inds_ep2 = np.array([4, 14, 56, 110])
k_ep2 = 1
output_ep2 = find_add_rows(lst_no_anno_ep2, check_inds_ep2, k_ep2)
expect_output_ep2 = np.array([[4, 4, 52, 52, 1],
[5, 14, 53, 62, 10],
[4, 13, 52, 61, 10],
[14, 14, 62, 62, 1],
[4, 7, 52, 55, 4],
[8, 8, 56, 56, 1],
[9, 14, 57, 62, 6]])
self.assertIs(type(output_ep2), np.ndarray)
# Test output size
self.assertEqual(np.size(output_ep2), np.size(expect_output_ep2))
# Test output result
self.assertEqual(output_ep2.tolist(), expect_output_ep2.tolist())
# Test for pairs of repeated structures that neither start nor end at
# the same time step as previously found pairs of repeated structures
# of the same length
lst_no_anno_ep3 = np.array([[8, 8, 14, 14, 1],
[14, 14, 56, 56, 1],
[8, 8, 62, 62, 1],
[56, 56, 62, 62, 1],
[14, 14, 104, 104, 1],
[62, 62, 104, 104, 1],
[8, 8, 110, 110, 1],
[56, 56, 110, 110, 1],
[104, 104, 110, 110, 1],
[4, 14, 52, 62, 11],
[4, 14, 100, 110, 11],
[26, 71, 74, 119, 46]])
check_inds_ep3 = np.array([4, 52, 100])
k = 11
output_ep3 = find_add_rows(lst_no_anno_ep3, check_inds_ep3, k)
expect_output_ep3 = np.array([[26, 51, 74, 99, 26],
[52, 62, 100, 110, 11],
[63, 71, 111, 119, 9],
[26, 51, 74, 99, 26],
[52, 62, 100, 110, 11],
[63, 71, 111, 119, 9]])
# Test output type
self.assertIs(type(output_ep3), np.ndarray)
# Test output size
self.assertEqual(np.size(output_ep3), np.size(expect_output_ep3))
# Test output result
self.assertEqual(output_ep3.tolist(), expect_output_ep3.tolist())
def test_find_all_repeats(self):
"""
Tests if find_all_repeats finds all the correct diagonals present
in thresh_mat.
"""
thresh_temp = np.array([[1, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 1]])
band_width_vec = np.array([1, 2, 3, 4, 5])
output = find_all_repeats(thresh_temp, band_width_vec)
expect_output = np.array([[1, 1, 3, 3, 1],
[2, 2, 4, 4, 1],
[3, 3, 5, 5, 1],
[1, 2, 3, 4, 2],
[2, 3, 4, 5, 2],
[1, 2, 3, 4, 2],
[2, 3, 4, 5, 2]])
# Test output type
self.assertIs(type(output), np.ndarray)
# Test output size
self.assertEqual(np.size(output), np.size(expect_output))
# Test output result
self.assertEqual(output.tolist(), expect_output.tolist())
def test_find_complete_list_anno_only(self):
"""
Tests if find_complete_list_anno_only finds all the correct annotations
for all pairs of repeats found in find_all_repeats.
"""
pair_list = np.array([[3, 3, 5, 5, 1],
[2, 2, 8, 8, 1],
[3, 3, 9, 9, 1],
[2, 2, 15, 15, 1],
[8, 8, 15, 15, 1],
[4, 4, 17, 17, 1],
[2, 3, 8, 9, 2],
[3, 4, 9, 10, 2],
[2, 3, 15, 16, 2],
[8, 9, 15, 16, 2],
[3, 4, 16, 17, 2],
[2, 4, 8, 10, 3],
[3, 5, 9, 11, 3],
[7, 9, 14, 16, 3],
[2, 4, 15, 17, 3],
[3, 5, 16, 18, 3],
[9, 11, 16, 18, 3],
[7, 10, 14, 17, 4],
[7, 11, 14, 18, 5],
[8, 12, 15, 19, 5],
[7, 12, 14, 19, 6]])
song_length = 19
output = find_complete_list_anno_only(pair_list, song_length)
expect_output = np.array([[2, 2, 8, 8, 1, 1],
[2, 2, 15, 15, 1, 1],
[8, 8, 15, 15, 1, 1],
[3, 3, 5, 5, 1, 2],
[3, 3, 9, 9, 1, 2],
[4, 4, 17, 17, 1, 3],
[2, 3, 8, 9, 2, 1],
[2, 3, 15, 16, 2, 1],
[8, 9, 15, 16, 2, 1],
[3, 4, 9, 10, 2, 2],
[3, 4, 16, 17, 2, 2],
[2, 4, 8, 10, 3, 1],
[2, 4, 15, 17, 3, 1],
[3, 5, 9, 11, 3, 2],
[3, 5, 16, 18, 3, 2],
[9, 11, 16, 18, 3, 2],
[7, 9, 14, 16, 3, 3],
[7, 10, 14, 17, 4, 1],
[7, 11, 14, 18, 5, 1],
[8, 12, 15, 19, 5, 2],
[7, 12, 14, 19, 6, 1]])
# Test output type
self.assertIs(type(output), np.ndarray)
# Test output size
self.assertEqual(np.size(output), np.size(expect_output))
# Test output result
self.assertEqual(output.tolist(), expect_output.tolist())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jormanfernandez/crud",
"score": 4
} |
#### File: data/database/NoSQL.py
```python
import json
import os
class DB:
"""
Handles the connection with the .json file that will be used as database
"""
def __init__(self):
self.path = os.path.join(os.getcwd(), "data/database/storage.json")
def read(self, fromDoc = None):
"""
It opens the .json file and reads its content transforming it in a JSON object.
If a document is indicated it will send what that key contain in the json.
Args:
fromDoc (None, str): The document to read data from
Returns:
list: If the fromDoc parameter is send, it will return a list of all the rows stored
dict: If the fromDOc is None, it will return a dict with the entire database object
"""
f = open(self.path, "r")
data = json.loads(f.read())
f.close()
if fromDoc is None:
return data
else:
return data[fromDoc] if fromDoc in data else []
def write(self, onDoc, value = []):
"""
It opens the .json file reading the content and then applying the necessary updates on the fields.
Args:
onDoc (str): The document that will be written. This is the JSON key in the dict.
value (list): The data that will be stored on the document or key of the JSON.
Returns:
None
"""
data = self.read()
if isinstance(value, list) is False:
value = []
data[onDoc] = value
f = open(self.path, "w+")
f.write(json.dumps(data))
f.close()
```
#### File: api/handler/PersonSearch.py
```python
import falcon
import json
from data.operators.PersonOperator import PersonOperator
from utils.logger import Log
from utils.form.fields import Fields
fields = Fields()
class PersonSearchHandler(object):
"""
It handles the request to no specific persons
Args:
object (object): No info
"""
def on_get(self, req, resp, mode, value):
"""
Handles the GET for search of persons and gives a list of people that match the search
Args:
req (Falcon HTTP Request): Request received by the server
resp (Falcon HTTP Response): Response constructed by the server
mode (str): The search method to be used (name, lastname, phone, email)
value (str, int): The value to be search
Returns:
None
"""
Log(f"GET Request Received", req=req)
resp.content_type = "application/json"
if mode not in getattr(fields, "field"):
resp.status = falcon.HTTP_400
resp.body = json.dumps({
"msg": "Search method not allowed"
})
else:
resp.status = falcon.HTTP_200
resp.body = json.dumps(PersonOperator.searchBy(key=mode, value=value), ensure_ascii=False)
resp.body += "\n"
```
#### File: api/utils/system.py
```python
def isSQL():
"""
Determine if the program should run with the SQL or NoSQL database.
Args:
None
Returns:
boolean: True if is SQL false with NoSQL
"""
import sys
return False if len(sys.argv) > 1 and "nosql" in sys.argv else True
```
#### File: utils/form/userInput.py
```python
from src.utils.form.validate import Validate
validator = Validate()
def get(field, message=""):
"""
Search through the fields that can be validated and it applies the respecting one.
If it does not match the validations, then it executes itself again until all validations are True
Args:
field (str): The field to be validated in the form
message (str): The message that will be displayed in the prompt to the user
Returns:
str: The input of the user already validated
"""
fieldProps = validator.getConfig(field)
text = fieldProps["label"] if message == "" else message
data = str(input(text)).strip()
validation = validator.runValidations(data, fieldProps)
if validation["ok"] is False:
print(validation["msg"])
return get(field, text)
return data
```
#### File: views/pages/Home.py
```python
from src.utils.common.clear import clear
from src.utils.system import closeApp
from src.utils.form.userInput import get
from time import sleep
from datetime import date
def Home():
clear()
today = date.today().strftime("%d/%m/%Y")
print(f"Today is: {today}")
print("""
Hello, welcome to your main agenda...
To navigate in the basic system, the menu would be:
A .- Add new register to your agenda
S .- Search for a specific person
V .- View the entire list
C .- Close
""")
option = str(input("Please, tell me where do you want to go: "))
option = option.strip().lower()
goTo(option)
def goTo(place):
if place == "a":
from src.views.pages.Register import Register
Register()
elif place == "s":
from src.views.pages.Search import PersonSearch
PersonSearch()
elif place == "v":
from src.views.pages.AllPersons import AllPersons
AllPersons()
elif place == "c":
closeApp()
else:
print("Wrong option....")
sleep(2)
Home()
```
#### File: views/pages/Search.py
```python
from time import sleep
from src.utils.common.clear import clear
from src.views.common.PersonDetail import printPerson
from src.views.common.exitOrHome import exitOrHome
from src.views.components.SearchBy import searchByEmail
from src.views.components.SearchBy import searchByLastname
from src.views.components.SearchBy import searchByName
from src.views.components.SearchBy import searchByPhone
from src.views.components.SelectEdit import SelectEdit
switch = {
"n": searchByName,
"l": searchByLastname,
"e": searchByEmail,
"p": searchByPhone,
"b": exitOrHome
}
def PersonSearch():
clear()
print("""
> Searching a person
So we are going to help you look for someone in detail
and maybe edit some of their data (or delete them if you want to)
Here is how you can search them:
N .- Name
L .- Last Name
E .- Email (if it has one)
P .- Phone
B .- Back
""")
how = str(input("What's your move?: ")).strip().lower()
if how not in switch:
print("Wrong option")
sleep(1)
PersonSearch()
if how == "b":
switch[how]()
return
persons = switch[how](PersonSearch)
SelectEdit(persons)
``` |
{
"source": "jormanfernandez/wsgi-server",
"score": 3
} |
#### File: wsgi-server/src/Application.py
```python
from types import FunctionType
from src.managers.RequestManager import RequestManager
class Application:
def __call__(self, environ: dict, start_response: FunctionType) -> tuple:
"""
Method to be called when make_server function starts
Args:
environ (dict): Enviroment variable with all the request data
start_response (function): Function to setup the status code and response headers
Returns:
bytes: Byte sequence to be handled in the main layer
"""
return RequestManager.process(environ, start_response)
```
#### File: src/managers/PathManager.py
```python
from re import search
class PathManager:
def __init__(self, paths):
self.paths = paths
def extractHandler(self, requestPath: str) -> tuple:
"""
Extracts which handler from the paths should be used
and the arguments to be send in that handler acording to the route structure
Returns:
tuple: handler (function), kwargs (dict)
"""
index, kwargs = self.getMatchs(requestPath)
handler = self.paths[index]() if index is not None else None
return handler, kwargs
def getMatchs(self, path: str) -> tuple:
"""
It get the index of the path if it matchs and the keyword arguments to be passed based on the url that it matches
Args:
path (str): request path to be compared
Returns:
tuple: index(str), kwargs(dict) The index is the key index in the paths attribute of the instance
"""
index = None
kwargs = {}
requestPathSplited = path.rstrip("/").split("?")[0].split("/")[1:]
for route in self.paths:
routeSplited = route.rstrip("/").split("/")[1:]
if len(requestPathSplited) != len(routeSplited):
continue
if ":" in route:
isValid, matchs = self.getKwargs(routeSplited, requestPathSplited)
if isValid:
kwargs.update(matchs)
index = route
break
elif requestPathSplited == routeSplited:
index = route
break
return index, kwargs
def getKwargs(self, route: list, requestPath: list) -> tuple:
"""
If the url has variables defined as /<str:name> in it. It will extract those and put it in a dict returning this values
and if the values in the url are invalid it will return False as firts value in the tuple
Args:
route (list): Route to be compared against the request
requestPath (list): URI from the Request to be compared
Returns:
tuple: isValid(bool), matchs(dict)
"""
matchs = {}
isValid = True
regex = "(\<[a-zA-Z]{3}:[a-zA-Z]{1,}\>)"
for part in range(len(route)):
if search(regex, route[part]) is not None:
"""
If based on the regex this part in the route matches, it means its a variable and the data should be extracted
"""
varType = route[part][1:4]
"""
First three letters are the variable's type to be compared
"""
varName = route[part][5:-1]
"""
The rest is the variable's name
"""
varData = self.getData(requestPath[part], varType)
if varData is None:
matchs = {}
isValid = False
break
matchs[varName] = varData
elif requestPath[part] != route[part]:
matchs = {}
isValid = False
break
return isValid, matchs
def getData(self, varData: any, dataType: str) -> any:
"""
Depending on the dataType passed, it will check if it matches with the data. If it doesn't it will return None
Args:
varData (any): Data to be checked
dataType (str): For now, only can be checked int and str. Anything else will return None
Returns:
any: None if the data does not match the type, else the data sended in varData
"""
data = None
try:
if dataType == "int" and search("[1-9]{1,}", varData) is not None:
data = int(varData)
elif dataType == "str" and search("[\w]{1,}", varData) is not None:
data = str(varData)
except BaseException as e:
print(e)
return data
```
#### File: src/views/ErrorView.py
```python
from src.views.BaseView import BaseView
from src.util.Request import Request
class ErrorView(BaseView):
def handle(self, request: Request, errors: any) -> tuple:
self.statusCode = self.HTTPStatusCodes.BAD_REQUEST
self.body = {
"message": "There has been an error in your request"
}
self.body.update(self.extractErrors(errors))
return self.serialize()
def extractErrors(self, errors: any) -> dict:
return {"error": e}
```
#### File: src/views/NotFoundView.py
```python
from src.views.BaseView import BaseView
from src.util.Request import Request
class NotFoundView(BaseView):
def handle(self, request: Request, **kwargs) -> tuple:
self.statusCode = self.HTTPStatusCodes.NOT_FOUND
self.body = {
"error": "Not Found"
}
return self.serialize()
``` |
{
"source": "jormaral/aifh",
"score": 3
} |
#### File: lib/aifh/rbf_network.py
```python
__author__ = 'jheaton'
import numpy as np
from rbf import RbfGaussian
class RbfNetwork(object):
""" A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform
regression. It can also perform classification by means of one-of-n encoding.
The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as
input and output weighting.
http://en.wikipedia.org/wiki/RBF_network
"""
def __init__(self, input_count, rbf_count, output_count):
""" Create an RBF network with the specified shape.
@param input_count: The input count.
@param rbf_count: The RBF function count.
@param output_count: The output count.
"""
self.input_count = input_count
self.output_count = output_count
# calculate input and output weight counts
# add 1 to output to account for an extra bias node
input_weight_count = input_count * rbf_count
output_weight_count = (rbf_count + 1) * output_count
rbf_params = (input_count + 1) * rbf_count
self.long_term_memory = np.zeros((input_weight_count + output_weight_count + rbf_params), dtype=float)
self.index_input_weights = 0
self.index_output_weights = input_weight_count + rbf_params
self.rbf = {}
# default the Rbf's to gaussian
for i in xrange(0, rbf_count):
rbf_index = input_weight_count + ((input_count + 1) * i)
self.rbf[i] = RbfGaussian(input_count, self.long_term_memory, rbf_index)
def compute_regression(self, input):
""" Compute the output for the network.
@param input: The input pattern.
@return: The output pattern.
"""
# first, compute the output values of each of the RBFs
# Add in one additional RBF output for bias (always set to one).
rbf_output = [0] * (len(self.rbf) + 1)
# bias
rbf_output[len(rbf_output) - 1] = 1.0
for rbfIndex in xrange(0, len(self.rbf)):
# weight the input
weighted_input = [0] * len(input)
for inputIndex in xrange(0, len(input)):
memory_index = self.index_input_weights + (rbfIndex * self.input_count) + inputIndex
weighted_input[inputIndex] = input[inputIndex] * self.long_term_memory[memory_index]
# calculate the rbf
rbf_output[rbfIndex] = self.rbf[rbfIndex].evaluate(weighted_input)
# Second, calculate the output, which is the result of the weighted result of the RBF's.
result = [0] * self.output_count
for outputIndex in xrange(0, len(result)):
sum_value = 0
for rbfIndex in xrange(0, len(rbf_output)):
# add 1 to rbf length for bias
memory_index = self.index_output_weights + (outputIndex * (len(self.rbf) + 1)) + rbfIndex
sum_value += rbf_output[rbfIndex] * self.long_term_memory[memory_index]
result[outputIndex] = sum_value
# finally, return the result.
return result
def reset(self):
"""
Reset the network to a random state.
"""
for i in xrange(0, len(self.long_term_memory)):
self.long_term_memory[i] = np.random.uniform(0, 1)
def compure_classification(self, input):
""" Compute the output and return the index of the output with the largest value. This is the class that
the network recognized.
@param input: The input pattern.
@return:
"""
output = self.compute_regression(input)
return output.index(max(output))
def copy_memory(self, source):
""" Copy the specified vector into the long term memory of the network.
@param source: The source vector.
"""
for i in xrange(0, len(source)):
self.long_term_memory[i] = source[i]
```
#### File: examples/capstone_alife/alife_milestone2.py
```python
from alife_milestone1 import *
class PlantGrowth:
# Transformations to move from a cell to the 9 neighboring cells.
# These are the column values.
col_transform = [0, 0, -1, 1, -1, 1, 1, -1]
# Transformations to move from a cell to the 9 neighboring cells.
# These are the row values.
row_transform = [-1, 1, 0, 0, -1, 1, -1, 1]
def __init__(self):
# Used to hold the new cells that have grown.
self.new_composition = [[False for j in range(PlantUniverse.UNIVERSE_HEIGHT)]
for i in range(PlantUniverse.UNIVERSE_HEIGHT)]
def calc_distance(self, v1, v1_start, v2, v2_start, l):
sum = 0
for i in range(0, l):
d = v1[v1_start + i] - v2[v2_start + i]
sum = sum + (d * d)
return math.sqrt(sum)
def get_growth_potential(self, universe, row, col, genome):
"""
Calculate the growth potential for a candidate cell. Evaluates the distance between the candidate cell's info
vector and the two growth vectors in the genome. The minimum of these two vectors will be returned if
it is below a specified minimum threshold.
@param universe The universe to evaluate.
@param row The row to evaluate.
@param col The column to evaluate.
@param genome The genome.
@return The minimum distance.
"""
cellVec = universe.get_cell_info_vector(row, col)
d1 = self.calc_distance(cellVec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH * 2,
PlantUniverse.CELL_VECTOR_LENGTH)
d2 = self.calc_distance(cellVec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH * 3,
PlantUniverse.CELL_VECTOR_LENGTH)
result = min(d1, d2)
if result > PlantUniverse.MIN_GROWTH_DIST:
result = -1
return result
def evaluate_neighbors(self, universe, row, col, genome, allow_root, allow_surface):
"""
Evaluate neighbors to see where to grow into.
@param universe The universe.
@param row The row.
@param col The column.
@param genome The genome.
@param allowRoot Are roots allowed?
* @param allowSurface Is surface growth allowed.
"""
growth_target_row = row
growth_target_col = col
growth_target_score = float("inf")
for i in range(0, len(PlantGrowth.col_transform)):
eval_col = col + PlantGrowth.col_transform[i]
eval_row = row + PlantGrowth.row_transform[i]
if not allow_root and eval_row >= PlantUniverse.GROUND_LINE:
continue
if not allow_surface and eval_row < PlantUniverse.GROUND_LINE:
continue
if universe.is_valid(eval_row, eval_col):
p = self.get_growth_potential(universe, eval_row, eval_col, genome)
if p > 0:
if p < growth_target_score:
growth_target_score = p
growth_target_row = eval_row
growth_target_col = eval_col
# Grow new cell, if requested, did we ever set target row & col to anything?
if growth_target_row <> row or growth_target_col <> col:
self.new_composition[growth_target_row][growth_target_col] = True
def run_growth(self, universe, genome):
"""
Run a growth cycle for the universe.
@param universe The universe.
@param genome The genome.
"""
# Does this plant have enough roots to grow?
if universe.surface_count == 0:
return
# The amount of leafy material per root nourishment. A higher number indicates
# more root nourishment than leafs.
root_ratio = universe.root_count / universe.surface_count
allow_root = root_ratio < 0.5
allow_surface = root_ratio > 0.5
# Reset the new composition to be the composition of the current universe
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
self.new_composition[row][col] = False
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
cell = universe.grid[row][col]
# see if we want to change the composition
if row < PlantUniverse.GROUND_LINE:
cell_vec = universe.get_cell_info_vector(row, col)
d1 = self.calc_distance(cell_vec, 0, genome, 0, PlantUniverse.CELL_VECTOR_LENGTH)
d2 = self.calc_distance(cell_vec, 0, genome, PlantUniverse.CELL_VECTOR_LENGTH,
PlantUniverse.CELL_VECTOR_LENGTH)
if d1 < d2:
cell.leafyness = cell.leafyness * PlantUniverse.STEM_TRANSITION
# Evaluate growth into each neighbor cell
if universe.can_grow(row, col):
self.evaluate_neighbors(universe, row, col, genome, allow_root, allow_surface)
# Copy the new composition back to the universe
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
cell = universe.grid[row][col]
if self.new_composition[row][col]:
if row >= PlantUniverse.GROUND_LINE:
# Roots are always 100% stem for transfer.
cell.leafyness = 0
else:
cell.leafyness = 1.0
cell.energy = 1.0
cell.nourishment = 1.0
class PlantPhysics:
def distribute_energy(self, universe):
"""
Distribute the sunlight energy in the universe.
@param universe The universe.
"""
# Distribute sun energy downward
sunlight = [0] * PlantUniverse.UNIVERSE_WIDTH
for i in range(0, len(sunlight)):
sunlight[i] = 1.0
for row in range(0, PlantUniverse.UNIVERSE_HEIGHT):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
# no sun underground
if row >= PlantUniverse.GROUND_LINE:
# blocked
decay = 0
else:
# no decay until otherwise calculated
decay = 1
cell = universe.grid[row][col]
cell.calculated_sunlight = sunlight[col]
# Collect resources for live cells
if cell.is_alive():
# Live cells cause the sunlight to decay (shade)
decay *= PlantUniverse.DECAY * cell.leafyness
# Set the energy based on sunlight level and composition of the live cell
my_energy = cell.calculated_sunlight * cell.leafyness
trans_energy = universe.calculate_transfer_energy(row, col) * (1.0 - cell.leafyness)
e = max(my_energy, trans_energy)
e = max(PlantUniverse.MIN_LIVING_ENERGY, e)
cell.energy = e
sunlight[col] = sunlight[col] * decay
def distribute_nourishment(self, universe):
"""
Distribute nourishment in the universe.
@param universe The universe.
"""
root_count = 0
surface_count = 0
# Distribute sun energy downward
water_table = [1.0] * PlantUniverse.UNIVERSE_WIDTH
for row in range(PlantUniverse.UNIVERSE_HEIGHT - 1, -1, -1):
for col in range(0, PlantUniverse.UNIVERSE_WIDTH):
# no water above ground
if row < PlantUniverse.GROUND_LINE:
# blocked
decay = 0
else:
# no decay until otherwise calculated
decay = 1
cell = universe.grid[row][col]
cell.calculated_water = water_table[col]
# Collect resources for live cells
if cell.is_alive():
# Live cells cause the water to decay (roots collect)
decay *= PlantUniverse.DECAY
# Set the energy based on sunlight level and composition of the live cell
my_water = cell.calculated_water * cell.leafyness
trans_water = universe.calculate_transfer_nourishment(row, col) * (1.0 - cell.leafyness)
n = max(my_water, trans_water)
n = max(PlantUniverse.MIN_LIVING_ENERGY, n)
cell.nourishment = n
# update the root and surface counts
if row >= PlantUniverse.GROUND_LINE:
root_count += cell.nourishment
else:
surface_count += cell.leafyness
water_table[col] = water_table[col] * decay
universe.root_count = root_count
universe.surface_count = surface_count
def run_physics(self, universe):
self.distribute_energy(universe)
self.distribute_nourishment(universe)
class PlantBoxMilestone2:
SAMPLE_PLANT = [
0.08414097456375995, 0.11845586131703176, 0.1868971940834313, 0.4346911204161327,
0.024190631402031804, 0.5773526701833149, 0.8997253827355136, 0.9267311086327318,
0.04639229538493471, 0.8190692654645835, 0.06531672676605614, 0.026431639742068264,
0.31497914852215286, 1.0276526539348398, 0.03303133293309127, 0.35946010922382937]
def __init__(self):
# Setup the seed.
self.universe = PlantUniverse()
self.universe.reset()
self.physics = PlantPhysics()
self.growth = PlantGrowth()
self.cycle = 0
# Init TK
self.root = Tk()
# A sample plant that we will animate.
self.display = DisplayPlant(self.root, self.universe)
self.display.update()
self.update_clock()
self.root.mainloop()
def update_clock(self):
self.physics.run_physics(self.universe)
self.growth.run_growth(self.universe, PlantBoxMilestone2.SAMPLE_PLANT)
self.display.update()
self.cycle = self.cycle + 1
if self.cycle < PlantUniverse.EVALUATION_CYCLES:
self.root.after(100, self.update_clock)
```
#### File: vol2-python-examples/examples/example_flock.py
```python
__author__ = 'jheaton'
# for python 3.x use 'tkinter' rather than 'Tkinter'
from Tkinter import *
import time
from random import *
import math
import Tkinter
# The number of particles.
PARTICLE_COUNT = 25
# The size of each particle.
PARTICLE_SIZE = 10
# The constant for cohesion.
COHESION = 0.01
# The constant for alignment.
ALIGNMENT = 0.5
# The constant for separation.
SEPARATION = 0.25
CANVAS_HEIGHT = 400
CANVAS_WIDTH = 400
class Particle:
def __init__(self):
self.location = [0] * 2
self.velocity = [0] * 2
self.poly = None
class App():
"""
Flocking.
"""
def __init__(self):
self.root = Tk()
self.c = Canvas(self.root,width=CANVAS_WIDTH, height=CANVAS_HEIGHT)
self.c.pack()
self.particles = []
self.c.create_rectangle(0, 0, CANVAS_WIDTH, CANVAS_HEIGHT, outline="black", fill="black")
for i in range(0, PARTICLE_COUNT) :
p = Particle()
p.location = [0] * 2
p.velocity = [0] * 2
p.location[0] = randint(0,CANVAS_WIDTH)
p.location[1] = randint(0,CANVAS_HEIGHT)
p.velocity[0] = 3
p.velocity[1] = uniform(0,2.0*math.pi)
p.poly = self.c.create_polygon([0,0,0,0,0,0],fill='white')
self.particles.append(p)
self.update_clock()
self.root.mainloop()
def max_index(self,data):
result = -1
for i in range(0,len(data)):
if result==-1 or data[i] > data[result]:
result = i
return result
def particle_location_mean(self,particles,dimension):
sum = 0
count = 0
for p in particles:
sum = sum + p.location[dimension]
count = count + 1
return sum / count
def particle_velocity_mean(self,particles,dimension):
sum = 0
count = 0
for p in particles:
sum = sum + p.velocity[dimension]
count = count + 1
return sum / count
def find_nearest(self,target,particles,k,max_dist):
result = []
temp_dist = [0] * k
worst_index = -1
for particle in particles:
if particle!=target:
# Euclidean distance
d = math.sqrt(
math.pow(particle.location[0] - target.location[0],2) +
math.pow(particle.location[1] - target.location[1],2) )
if d<=max_dist:
if len(result) < k:
temp_dist[len(result)] = d
result.append(particle)
worst_index = self.max_index(temp_dist)
elif d<temp_dist[worst_index]:
temp_dist[worst_index] = particle
worst_index = self.max_index(temp_dist)
return result
def flock(self):
for particle in self.particles:
###############################################################
## Begin implementation of three very basic laws of flocking.
###############################################################
neighbors = self.find_nearest(particle, self.particles, 5, sys.float_info.max)
nearest = self.find_nearest(particle, self.particles, 5, 10)
# 1. Separation - avoid crowding neighbors (short range repulsion)
separation = 0
if len(nearest) > 0:
meanX = self.particle_location_mean(nearest, 0)
meanY = self.particle_location_mean(nearest, 1)
dx = meanX - particle.location[0]
dy = meanY - particle.location[1]
separation = math.atan2(dx, dy) - particle.velocity[1]
separation += math.pi
# 2. Alignment - steer towards average heading of neighbors
alignment = 0
if len(neighbors) > 0:
alignment = self.particle_velocity_mean(neighbors, 1) - particle.velocity[1]
# 3. Cohesion - steer towards average position of neighbors (long range attraction)
cohesion = 0
if len(neighbors):
meanX = self.particle_location_mean(self.particles, 0)
meanY = self.particle_location_mean(self.particles, 1)
dx = meanX - particle.location[0]
dy = meanY - particle.location[1]
cohesion = math.atan2(dx, dy) - particle.velocity[1]
# perform the turn
# The degree to which each of the three laws is applied is configurable.
# The three default ratios that I provide work well.
turnAmount = (cohesion * COHESION) + (alignment * ALIGNMENT) + (separation * SEPARATION)
particle.velocity[1] += turnAmount
###############################################################
## End implementation of three very basic laws of flocking.
###############################################################
def update_clock(self):
# render the particles
points = [0] * 6
for p in self.particles:
points[0] = p.location[0]
points[1] = p.location[1]
r = p.velocity[1] + (math.pi * 5.0) / 12.0
points[2] = points[0] - (int) (math.cos(r) * PARTICLE_SIZE)
points[3] = points[1] - (int) (math.sin(r) * PARTICLE_SIZE)
r2 = p.velocity[1] + (math.pi * 7.0) / 12.0
points[4] = points[0] - (int) (math.cos(r2) * PARTICLE_SIZE)
points[5] = points[1] - (int) (math.sin(r2) * PARTICLE_SIZE)
self.c.coords(p.poly,Tkinter._flatten(points))
# move the particle
dx = math.cos(r)
dy = math.sin(r)
p.location[0] = p.location[0] + (dx * p.velocity[0])
p.location[1] = p.location[1] + (dy * p.velocity[0])
# handle wraps
if p.location[0] < 0:
p.location[0] = CANVAS_WIDTH
if p.location[1] < 0:
p.location[1] = CANVAS_HEIGHT
if p.location[0] > CANVAS_WIDTH:
p.location[0] = 0
if p.location[1] > CANVAS_HEIGHT:
p.location[1] = 0
self.flock()
# Next frame.
self.root.after(100, self.update_clock)
app=App()
``` |
{
"source": "jormaster3k/nest-datagraph",
"score": 3
} |
#### File: nest-datagraph/backend/poller.py
```python
import configparser
import datetime
import mysql.connector
from nest import Nest
import os
import pyowm
import sys
def to_f(temp):
return temp*1.8 + 32.0
def polling(c, n, w, d):
nstat = n.show_status()
if c['common']['units'] == "F":
owmTemp = to_f(w.get_temperature('celsius')['temp'])
nestCurrent = to_f(nstat['current_temperature'])
nestTarget = to_f(nstat['target_temperature'])
else:
owmTemp = w.get_temperature('celsius')['temp']
nestCurrent = nstat['current_temperature']
nestTarget = nstat['target_temperature']
query = "INSERT INTO status(date,city_curr_temp,city_curr_hum, \
nest_curr_temp,nest_targ_temp,nest_curr_hum,nest_targ_hum, \
nest_heat_state, current_schedule_mode, leaf,auto_away, \
time_to_target) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
args = (datetime.datetime.now(),
owmTemp,
w.get_humidity(),
nestCurrent,
nestTarget,
nstat['current_humidity'],
nstat['target_humidity'],
int(nstat['hvac_heater_state']),
nstat['current_schedule_mode'],
int(nstat['leaf']),
int(nstat['auto_away']),
nstat['time_to_target'])
d.execute(query, args)
def main():
try:
c = configparser.ConfigParser()
c.read(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'../frontend/conf',
'settings.ini'))
# Setup Nest account
n = Nest(c['nest']['nest_username'],
c['nest']['nest_password'],
c['nest']['nest_sn'],
c['nest']['nest_index'],
units=c['common']['units'])
n.login()
n.get_status()
# Setup OpenWeatherMap account
owm = pyowm.OWM(c['owm']['owm_id'])
observation = owm.weather_at_id(int(c['owm']['owm_city_id']))
w = observation.get_weather()
# Connect to DB
cnx = mysql.connector.connect(user=c['mysql']['mysql_username'],
password=c['<PASSWORD>']['<PASSWORD>_password'],
host=c['mysql']['mysql_hostname'],
database=c['mysql']['mysql_database'])
d = cnx.cursor()
polling(c, n, w, d)
cnx.commit()
d.close()
except Exception:
print(sys.exc_info()[1])
if __name__ == "__main__":
main()
``` |
{
"source": "JormaWuorio/Painonhallinta",
"score": 4
} |
#### File: JormaWuorio/Painonhallinta/test_suttu_luokat.py
```python
stock = {
'football': 4,
'boardgame': 10,
'leggos': 1,
'doll': 5,
}
def fillable(stock, merch, n):
try:
if stock[merch] >= n:
return True
except Exception as e:
return False
fillable(stock, 'action figure', 1)
``` |
{
"source": "jormono/Vinyl_Inventory",
"score": 3
} |
#### File: jormono/Vinyl_Inventory/Vinyl_inventory_6.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import sqlite3
# TODO: Error Handling on integer inputs
conn = sqlite3.connect('vinyl_inventory.db')
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS vinyl(id INTEGER, rack INTEGER, shelf INTEGER, box INTEGER, album TEXT, artist TEXT, year INTEGER, revisions INTEGER)")
def assign_address():
# pulls address of highest address item from db
c = conn.cursor()
c.execute('SELECT MAX(rack), MAX(shelf), MAX(box) FROM vinyl')
address = list(c.fetchall())
rack = address[0][0]
# first itteration will yield None while db is empty
if rack == None:
rack = 1
shelf = address[0][1]
if shelf == None:
shelf = 1
box = address[0][2]
if box == None:
box = 1
address_output = [str(int(rack)), str(int(shelf)), str(int(box))]
c.close()
return address_output
def retrieve_info(item_num):
c = conn.cursor()
c.execute('SELECT * FROM vinyl WHERE id = ?', str(item_num))
from_db = c.fetchall()
return from_db
def item_max():
c = conn.cursor()
c.execute('SELECT MAX(id) FROM vinyl')
max_id = c.fetchone()
return int(max_id[0])
def item_min():
c = conn.cursor()
c.execute('SELECT MIN(id) FROM vinyl')
min_id = c.fetchone()
return int(min_id[0])
class Ui_Vinyl_Inventory_Main(object):
def setupUi(self, Vinyl_Inventory_Main):
Vinyl_Inventory_Main.setObjectName("Vinyl_Inventory_Main")
Vinyl_Inventory_Main.resize(803, 619)
self.centralwidget = QtWidgets.QWidget(Vinyl_Inventory_Main)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(-4, -1, 801, 581))
self.tabWidget.setObjectName("tabWidget")
current_address = assign_address()
# add inventory tab start
self.Inventory_Add = QtWidgets.QWidget()
self.Inventory_Add.setObjectName("Inventory_Add")
self.gridLayoutWidget_3 = QtWidgets.QWidget(self.Inventory_Add)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.gridLayout_Add = QtWidgets.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_Add.setContentsMargins(0, 0, 0, 0)
self.gridLayout_Add.setObjectName("gridLayout_Add")
self.Box_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Box_Label_Add.setFont(font)
self.Box_Label_Add.setObjectName("Box_Label_Add")
self.gridLayout_Add.addWidget(self.Box_Label_Add, 2, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Rack_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Rack_Label_Add.setFont(font)
self.Rack_Label_Add.setObjectName("Rack_Label_Add")
self.gridLayout_Add.addWidget(self.Rack_Label_Add, 2, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Year_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Year_Input_Add.setObjectName("Year_Input_Add") # year input (add tab)
self.gridLayout_Add.addWidget(self.Year_Input_Add, 5, 3, 1, 1)
self.Artist_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Artist_Label_Add.setFont(font)
self.Artist_Label_Add.setObjectName("Artist_Label_Add")
self.gridLayout_Add.addWidget(self.Artist_Label_Add, 4, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Rack_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Rack_Input_Add.setObjectName("Rack_Input_Add") # rack input (add tab)
self.Rack_Input_Add.setText(str(current_address[0]))
self.gridLayout_Add.addWidget(self.Rack_Input_Add, 3, 1, 1, 1)
self.Submit_Data_Add = QtWidgets.QPushButton(self.gridLayoutWidget_3) # submit data button (add tab)
font = QtGui.QFont() # submit data button (add tab)
font.setPointSize(14) # submit data button (add tab)
self.Submit_Data_Add.setFont(font) # submit data button (add tab)
self.Submit_Data_Add.setObjectName("Submit_Data_Add") # submit data button (add tab)
self.gridLayout_Add.addWidget(self.Submit_Data_Add, 8, 1, 3, 3) # submit data button (add tab)
self.Box_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Box_Input_Add.setObjectName("Box_Input_Add") # box input (add tab)
self.Box_Input_Add.setText(str(current_address[2]))
self.gridLayout_Add.addWidget(self.Box_Input_Add, 3, 3, 1, 1)
self.Year_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Year_Label_Add.setFont(font)
self.Year_Label_Add.setObjectName("Year_Label_Add")
self.gridLayout_Add.addWidget(self.Year_Label_Add, 4, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Item_Num_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Item_Num_Label_Add.setObjectName("Item_Num_Label_Add")
self.gridLayout_Add.addWidget(self.Item_Num_Label_Add, 1, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Shelf_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Shelf_Input_Add.setObjectName("Shelf_Input_Add") # shelf input (add tab)
self.Shelf_Input_Add.setText(str(current_address[1]))
self.gridLayout_Add.addWidget(self.Shelf_Input_Add, 3, 2, 1, 1)
self.Album_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Album_Label_Add.setFont(font)
self.Album_Label_Add.setObjectName("Album_Label_Add")
self.gridLayout_Add.addWidget(self.Album_Label_Add, 4, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Album_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Album_Input_Add.setObjectName("Album_Input_Add") # album input (add tab)
self.gridLayout_Add.addWidget(self.Album_Input_Add, 5, 1, 1, 1)
self.Artist_Input_Add = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.Artist_Input_Add.setObjectName("Artist_Input_Add") # artist input (add tab)
self.gridLayout_Add.addWidget(self.Artist_Input_Add, 5, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Add.addItem(spacerItem, 1, 4, 10, 1)
self.Shelf_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
font = QtGui.QFont()
font.setPointSize(14)
self.Shelf_Label_Add.setFont(font)
self.Shelf_Label_Add.setObjectName("Shelf_Label_Add")
self.gridLayout_Add.addWidget(self.Shelf_Label_Add, 2, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Add.addItem(spacerItem1, 1, 0, 10, 1)
self.Warning_Label_Add = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Warning_Label_Add.setObjectName("Warning_Label_Add")
self.gridLayout_Add.addWidget(self.Warning_Label_Add, 7, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.Warning_Label_Add_2 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.Warning_Label_Add_2.setObjectName("Warning_Label_Add_2")
self.gridLayout_Add.addWidget(self.Warning_Label_Add_2, 6, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.tabWidget.addTab(self.Inventory_Add, "")
# edit inventory tab start
self.Inventory_Edit = QtWidgets.QWidget()
self.Inventory_Edit.setObjectName("Inventory_Edit")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.Inventory_Edit)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 801, 551))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_Edit = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_Edit.setContentsMargins(0, 0, 0, 0)
self.gridLayout_Edit.setObjectName("gridLayout_Edit")
self.Submit_Data_Edit = QtWidgets.QPushButton(self.gridLayoutWidget_2) # submit data button (edit tab)
font = QtGui.QFont() # submit data button (edit tab)
font.setPointSize(14) # submit data button (edit tab)
self.Submit_Data_Edit.setFont(font) # submit data button (edit tab)
self.Submit_Data_Edit.setObjectName("Submit__Data_Edit") # submit data button (edit tab)
self.gridLayout_Edit.addWidget(self.Submit_Data_Edit, 8, 1, 3, 3) # submit data button (edit tab)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Edit.addItem(spacerItem2, 1, 0, 10, 1)
self.Rack_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Rack_Input_Edit.setObjectName("Rack_Input_Edit")
self.gridLayout_Edit.addWidget(self.Rack_Input_Edit, 3, 1, 1, 1)
self.Album_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Album_Label_Edit.setFont(font)
self.Album_Label_Edit.setObjectName("Album_Label_Edit")
self.gridLayout_Edit.addWidget(self.Album_Label_Edit, 4, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Artist_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Artist_Label_Edit.setFont(font)
self.Artist_Label_Edit.setObjectName("Artist_Label_Edit")
self.gridLayout_Edit.addWidget(self.Artist_Label_Edit, 4, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Shelf_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Shelf_Input_Edit.setObjectName("Shelf_Input_Edit")
self.gridLayout_Edit.addWidget(self.Shelf_Input_Edit, 3, 2, 1, 1)
self.Shelf_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Shelf_Label_Edit.setFont(font)
self.Shelf_Label_Edit.setObjectName("Shelf_Label_Edit")
self.gridLayout_Edit.addWidget(self.Shelf_Label_Edit, 2, 2, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Box_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Box_Input_Edit.setObjectName("Box_Input_Edit")
self.gridLayout_Edit.addWidget(self.Box_Input_Edit, 3, 3, 1, 1)
self.Year_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Year_Input_Edit.setObjectName("Year_Input_Edit")
self.gridLayout_Edit.addWidget(self.Year_Input_Edit, 5, 3, 1, 1)
self.Year_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Year_Label_Edit.setFont(font)
self.Year_Label_Edit.setObjectName("Year_Label_Edit")
self.gridLayout_Edit.addWidget(self.Year_Label_Edit, 4, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Album_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Album_Input_Edit.setObjectName("Album_Input_Edit")
self.gridLayout_Edit.addWidget(self.Album_Input_Edit, 5, 1, 1, 1)
self.Artist_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Artist_Input_Edit.setObjectName("Artist_Input_Edit")
self.gridLayout_Edit.addWidget(self.Artist_Input_Edit, 5, 2, 1, 1)
self.Rack_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Rack_Label_Edit.setFont(font)
self.Rack_Label_Edit.setObjectName("Rack_Label_Edit")
self.gridLayout_Edit.addWidget(self.Rack_Label_Edit, 2, 1, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Box_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(14)
self.Box_Label_Edit.setFont(font)
self.Box_Label_Edit.setObjectName("Box_Label_Edit")
self.gridLayout_Edit.addWidget(self.Box_Label_Edit, 2, 3, 1, 1, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_Edit.addItem(spacerItem3, 1, 4, 10, 1)
self.Warning_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Warning_Label_Edit.setObjectName("Warning_Label_Edit")
self.gridLayout_Edit.addWidget(self.Warning_Label_Edit, 6, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignBottom)
self.Warning_Label_2_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Warning_Label_2_Edit.setObjectName("Warning_Label_2_Edit")
self.gridLayout_Edit.addWidget(self.Warning_Label_2_Edit, 7, 1, 1, 3, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.Item_Num_Label_Edit = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.Item_Num_Label_Edit.setObjectName("Item_Num_Label_Edit")
self.gridLayout_Edit.addWidget(self.Item_Num_Label_Edit, 1, 1, 1, 1, QtCore.Qt.AlignRight)
self.Item_Num_Input_Edit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.Item_Num_Input_Edit.setObjectName("Item_Num_Input_Edit")
self.gridLayout_Edit.addWidget(self.Item_Num_Input_Edit, 1, 2, 1, 1) # item number view button (edit tab)
self.Item_Num_View_Edit = QtWidgets.QPushButton(self.gridLayoutWidget_2) # item number view button (edit tab)
self.Item_Num_View_Edit.setObjectName("Item_Num_View_Edit") # item number view button (edit tab)
self.gridLayout_Edit.addWidget(self.Item_Num_View_Edit, 1, 3, 1, 1) # item number view button (edit tab)
self.tabWidget.addTab(self.Inventory_Edit, "")
# View inventory tab start
input_range = 10
self.Inventory_View = QtWidgets.QWidget()
self.Inventory_View.setObjectName("Inventory_View")
self.tableWidget = QtWidgets.QTableWidget(self.Inventory_View)
self.tableWidget.setGeometry(QtCore.QRect(20, 50, 761, 491))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(8)
self.tableWidget.setRowCount(input_range)
self.tableWidget.verticalHeader().setVisible(False)
self.horizontalLayoutWidget = QtWidgets.QWidget(self.Inventory_View)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(19, 0, 761, 51))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Viewing_item_num_label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.Viewing_item_num_label.setObjectName("Viewing_item_num_label")
self.horizontalLayout.addWidget(self.Viewing_item_num_label)
self.item_num_input_1_view = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.item_num_input_1_view.setObjectName("item_num_input_1_view")
self.horizontalLayout.addWidget(self.item_num_input_1_view)
self.Viewing_item_num_label_2 = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.Viewing_item_num_label_2.setObjectName("Viewing_item_num_label_2")
self.horizontalLayout.addWidget(self.Viewing_item_num_label_2)
self.item_num_input_2_view = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.item_num_input_2_view.setObjectName("item_num_input_2_view")
input_high = item_max()
if input_high - input_range < item_min():
input_low = item_min()
else:
input_low = input_high - input_range
self.item_num_input_1_view.setText(str(input_low))
self.item_num_input_2_view.setText(str(input_high))
header_labels = ['ID', 'Rack', 'Shelf', 'Box', 'Album', 'Artist', 'Year', 'Revisions']
self.tableWidget.setHorizontalHeaderLabels(header_labels)
self.tableWidget.resizeColumnsToContents()
self.tableWidget.setColumnWidth(4, 250)
self.tableWidget.setColumnWidth(5, 250)
self.update_view_data()
#self.tableWidget.setEditTriggers(
self.horizontalLayout.addWidget(self.item_num_input_2_view)
self.update_view = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.update_view.setObjectName("update_view")
self.horizontalLayout.addWidget(self.update_view)
self.tabWidget.addTab(self.Inventory_View, "")
Vinyl_Inventory_Main.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Vinyl_Inventory_Main)
self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 21))
self.menubar.setObjectName("menubar")
Vinyl_Inventory_Main.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Vinyl_Inventory_Main)
self.statusbar.setObjectName("statusbar")
Vinyl_Inventory_Main.setStatusBar(self.statusbar)
# button calls
self.Submit_Data_Add.clicked.connect(self.submit_data_add)
self.Submit_Data_Edit.clicked.connect(self.submit_data_edit)
self.Item_Num_View_Edit.clicked.connect(self.lookup_item)
self.update_view.clicked.connect(self.update_view_data)
# GUI calls
self.retranslateUi(Vinyl_Inventory_Main)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Vinyl_Inventory_Main)
def retranslateUi(self, Vinyl_Inventory_Main):
_translate = QtCore.QCoreApplication.translate
Vinyl_Inventory_Main.setWindowTitle(_translate("Vinyl_Inventory_Main", "Vinyl Inventory"))
self.Box_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Box Number"))
self.Rack_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Rack Number"))
self.Artist_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Artist Name"))
self.Submit_Data_Add.setText(_translate("Vinyl_Inventory_Main", "Submit Data"))
self.Year_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Year of Album"))
item_num_label_add = "Item Number: " + str(item_max()+1)
self.Item_Num_Label_Add.setText(_translate("Vinyl_Inventory_Main", item_num_label_add)) ### text label, insert item num variable here (add tab)
self.Album_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Album Name"))
self.Shelf_Label_Add.setText(_translate("Vinyl_Inventory_Main", "Shelf Number"))
self.Warning_Label_Add.setText(_translate("Vinyl_Inventory_Main", "All Submitted Data is Final! There is no \"Undo\" Functionality!"))
self.Warning_Label_Add_2.setText(_translate("Vinyl_Inventory_Main", "WARNING!"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_Add), _translate("Vinyl_Inventory_Main", "Add Inventory"))
self.Submit_Data_Edit.setText(_translate("Vinyl_Inventory_Main", "Submit Data"))
self.Album_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Album Name"))
self.Artist_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Artist Name"))
self.Shelf_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Shelf Number"))
self.Year_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Year of Album"))
self.Rack_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Rack Number"))
self.Box_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Box Number"))
self.Warning_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "WARNING!"))
self.Warning_Label_2_Edit.setText(_translate("Vinyl_Inventory_Main", "All Submitted Data is Final! There is no \"Undo\" Functionality!"))
self.Item_Num_Label_Edit.setText(_translate("Vinyl_Inventory_Main", "Viewing Item Number:"))
self.Item_Num_View_Edit.setText(_translate("Vinyl_Inventory_Main", "View Item"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_Edit), _translate("Vinyl_Inventory_Main", "Edit Inventory"))
self.Viewing_item_num_label.setText(_translate("Vinyl_Inventory_Main", "Viewing Item Numbers:"))
self.Viewing_item_num_label_2.setText(_translate("Vinyl_Inventory_Main", "to"))
self.update_view.setText(_translate("Vinyl_Inventory_Main", "Update"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Inventory_View), _translate("Vinyl_Inventory_Main", "View Inventory"))
def submit_data_add(self, data_add):
_translate = QtCore.QCoreApplication.translate
rack_value = int(float(self.Rack_Input_Add.text()))
shelf_value = int(self.Shelf_Input_Add.text())
box_value = int(self.Box_Input_Add.text())
item_value = item_max() + 1
album_value = self.Album_Input_Add.text()
artist_value = self.Artist_Input_Add.text()
year_value = int(self.Year_Input_Add.text())
c.execute("INSERT INTO vinyl (id, rack, shelf, box, album, artist, year, revisions) values (?, ?, ?, ?, ?, ?, ?, 0)", (item_value, rack_value, shelf_value, box_value, album_value, artist_value, year_value))
conn.commit()
new_item_num_label_add = "Item Number: " + str(item_max + 1)
self.Item_Num_Label_Add.setText(_translate("Vinyl_Inventory_Main", new_item_num_label_add))
self.Album_Input_Add.clear()
self.Artist_Input_Add.clear()
self.Year_Input_Add.clear()
def submit_data_edit(self, data_edit):
item_value_edit = self.Item_Num_Input_Edit.text()
rack_value_edit = int(float(self.Rack_Input_Edit.text()))
shelf_value_edit = int(float(self.Shelf_Input_Edit.text()))
box_value_edit = int(float(self.Box_Input_Edit.text()))
album_value_edit = self.Album_Input_Edit.text()
artist_value_edit = self.Artist_Input_Edit.text()
year_value_edit = int(float(self.Year_Input_Edit.text()))
edit_item_lookup = retrieve_info(item_value_edit)
revision_num_edit = 1 + edit_item_lookup[0][7]
c.execute("UPDATE vinyl SET rack = ?, shelf = ?, box = ?, album = ?, artist = ?, year = ?, revisions = ? WHERE id = ?",
(rack_value_edit, shelf_value_edit, box_value_edit, album_value_edit, artist_value_edit, year_value_edit, revision_num_edit, item_value_edit))
conn.commit()
self.Rack_Input_Edit.clear()
self.Shelf_Input_Edit.clear()
self.Box_Input_Edit.clear()
self.Album_Input_Edit.clear()
self.Artist_Input_Edit.clear()
self.Year_Input_Edit.clear()
def lookup_item(self):
item_selection_edit = int(self.Item_Num_Input_Edit.text())
edit_item_lookup = retrieve_info(item_selection_edit)
rack_lookup = edit_item_lookup[0][1]
shelf_lookup = edit_item_lookup[0][2]
box_lookup = edit_item_lookup[0][3]
album_lookup = edit_item_lookup[0][4]
artist_lookup = edit_item_lookup[0][5]
year_lookup = edit_item_lookup[0][6]
self.Rack_Input_Edit.setText(str(rack_lookup))
self.Shelf_Input_Edit.setText(str(shelf_lookup))
self.Box_Input_Edit.setText(str(box_lookup))
self.Album_Input_Edit.setText(str(album_lookup))
self.Artist_Input_Edit.setText(str(artist_lookup))
self.Year_Input_Edit.setText(str(year_lookup))
def update_view_data(self):
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0)
self.tableWidget.hideRow(0)
list_min = int(self.item_num_input_1_view.text())
list_max = int(self.item_num_input_2_view.text())
# makes sure data is within database
min_id = item_min()
max_id = item_max()
if max_id == None:
return
elif list_max > max_id:
list_max = max_id
if min_id > list_min:
list_min = min_id
# populates view field
list_range = []
item_id = list_min
if list_max == list_min:
list_range.append(list_min)
else:
while item_id < list_max + 1:
list_range.append(item_id)
item_id += 1
self.tableWidget.setRowCount(len(list_range))
for row in list_range:
item_view_data = retrieve_info(row)
self.tableWidget.insertRow(row)
self.tableWidget.setItem(row,0, QtWidgets.QTableWidgetItem(str(item_view_data[0][0])))
self.tableWidget.setItem(row,1, QtWidgets.QTableWidgetItem(str(item_view_data[0][1])))
self.tableWidget.setItem(row,2, QtWidgets.QTableWidgetItem(str(item_view_data[0][2])))
self.tableWidget.setItem(row,3, QtWidgets.QTableWidgetItem(str(item_view_data[0][3])))
self.tableWidget.setItem(row,4, QtWidgets.QTableWidgetItem(str(item_view_data[0][4])))
self.tableWidget.setItem(row,5, QtWidgets.QTableWidgetItem(str(item_view_data[0][5])))
self.tableWidget.setItem(row,6, QtWidgets.QTableWidgetItem(str(item_view_data[0][6])))
self.tableWidget.setItem(row,7, QtWidgets.QTableWidgetItem(str(item_view_data[0][7])))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Vinyl_Inventory_Main = QtWidgets.QMainWindow()
ui = Ui_Vinyl_Inventory_Main()
ui.setupUi(Vinyl_Inventory_Main)
Vinyl_Inventory_Main.show()
sys.exit(app.exec_())
# I moved these here, because if I include them where they were, they closed the cursor and connection too soon(probably)
c.close()
conn.close()
``` |
{
"source": "Jormunganders/Daedalus",
"score": 4
} |
#### File: data_structure/list/ArrayList.py
```python
class ArrayList(object):
"""数组构成的线性表,参考 Java 中的 ArrayList"""
__size = 0
def __init__(self, initial_capacity=10):
if initial_capacity < 0:
raise Exception("Illegal capacity") # 抛出异常
self.__max_size = initial_capacity # 最大数据容量
self.__element_data = [None] * self.__max_size # 内部数组,用于存放数据
def add(self, element):
"""尾部添加"""
if self.__size >= self.__max_size:
raise Exception("ArrayList is full!")
self.__element_data[self.__size] = element
self.__size += 1
def insert(self, index, element):
"""任意部分插入数据"""
if index < 0 or index > self.__size:
raise Exception("Illegal index") # 抛出异常
if self.__size >= self.__max_size:
raise Exception("ArrayList is full!")
for temp_index in range(index, self.__size): # 不包含尾部
self.__element_data[index + 1] = self.__element_data[index]
self.__element_data[index] = element
self.__size += 1
def update(self, index, new_element):
self.__check_index(index)
self.__element_data[index] = new_element
def get(self, index):
self.__check_index(index)
return self.__element_data[index]
def size(self):
return self.__size
def remove(self, index):
self.__check_index(index)
self.__element_data.pop(index)
self.__size -= 1
def clear(self):
self.__element_data.clear()
self.__size = 0
pass
def __check_index(self, index):
"""检查索引范围,不支持反向"""
if index < 0 or index >= self.__size:
raise Exception("Illegal index") # 抛出异常
def __str__(self):
return "size is " + str(self.__size) + ", data is " + str(self.__element_data)
```
#### File: data_structure/list/Queue.py
```python
class Node(object):
value = None
next = None
def __init__(self, value=None, next=None):
self.value = value
self.next = next
class Queue:
__head = None # 头节点
__tail = None # 尾节点
__size = 0
def __init__(self, initial_capacity=10):
if initial_capacity < 0:
raise Exception("Illegal capacity")
self.__max_size = initial_capacity
def append(self, value):
if self.__size >= self.__max_size:
raise Exception("Queue is full!")
new_node = Node(value)
if self.__head is None:
self.__head = new_node
self.__tail = new_node
else:
self.__tail.next = new_node
self.__tail = self.__tail.next
self.__size += 1
def remove(self):
if self.__size == 0:
return None
temp_node = self.__head
self.__head = self.__head.next
if self.__head is None: # 队列中没有数据了
self.__tail = None
self.__size -= 1
return temp_node.value
def index(self, index):
if index < 0 or index > self.__size:
raise Exception("Illegal index") # 抛出异常
if index == 0:
return self.__head.value
if index == self.__size - 1:
return self.__tail.value
temp_node = self.__head
temp_index = 0
while temp_node is not None and temp_index < index:
temp_node = temp_node.next
temp_index += 1
if temp_node is None:
return None
else:
return temp_node.value
def size(self):
return self.__size
``` |
{
"source": "Jormungandr1105/Ascii_Art",
"score": 3
} |
#### File: Jormungandr1105/Ascii_Art/main.py
```python
from PIL import Image, ImageFont, ImageDraw
import csv
import os
def load_image(image_name):
global image_types
for image_type in image_types:
try:
image = Image.open(r"Photos\to_ascii-tize\{}{}".format(image_name, image_type))
image = image.convert("L")
return image
except FileNotFoundError or AttributeError:
pass
def get_chars(char_file):
f = open(char_file, "r")
in_text = f.read()
lines = in_text.split("\n")
for line in lines:
values = line.split("\t")
char_intensities[values[0]] = float(values[1])
def process_image(image, spacing=1):
global darkest_pixel
global lightest_pixel
y_spacing = spacing * 2
x, y = image.size
px = image.load()
total_pix = 0
for b in range(0, y, y_spacing):
line = []
for a in range(0, x, spacing):
pix_total = 0
num_pix = 0
for d in range(b, min(b + y_spacing, y)):
for c in range(a, min(a + spacing, x)):
pix_total += (px[c, d])
num_pix += 1
avg_pix_val = int(pix_total/num_pix)
if avg_pix_val < darkest_pixel:
darkest_pixel = avg_pix_val
if avg_pix_val > lightest_pixel:
lightest_pixel = avg_pix_val
total_pix += pix_total
line.append(avg_pix_val)
canvas.append(line)
return total_pix/(x*y)
def convert_to_text():
global text
global canvas
global char_intensities
global darkest_pixel
global lightest_pixel
darkest_char = 255
for char in char_intensities:
if char_intensities[char] < darkest_char:
darkest_char = char_intensities[char]
for line in canvas:
line_text = ""
for number in line:
old_number = number
new_number = 255 - ((lightest_pixel - old_number) * ((255-darkest_char)/(lightest_pixel-darkest_pixel)))
lowest = 0
next_char = "@"
for char in char_intensities:
if new_number >= char_intensities[char] >= lowest:
lowest = char_intensities[char]
next_char = char
line_text += next_char
line_text += "\n"
text += line_text
def save_text(filename):
global text
global path
with open("TSV\\{}.tsv".format(filename.split(".")[0]), 'wt') as output:
tsv_writer = csv.writer(output, delimiter='\t', lineterminator='\n')
for line in canvas:
tsv_writer.writerow(line)
output.close()
x1, y1 = len(canvas[0]), len(canvas)
try:
f = open("Ascii\\{0}\\{0}_{1}x{2}.txt".format(filename.split(".")[0], x1, y1), "wt")
except FileNotFoundError:
os.mkdir(path + filename.split(".")[0])
f = open("Ascii\\{0}\\{0}_{1}x{2}.txt".format(filename.split(".")[0], x1, y1), "wt")
f.write(text)
f.close()
def save_png(filename):
global text
global path
global font_name
mult = 2
x1, y1 = len(canvas[0]), len(canvas)
image = Image.new("RGB", (mult*3 * x1, mult*6 * y1), (255, 255, 255))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("c:/Windows/Fonts\\{}.ttf".format(font_name), size=mult*5)
draw.text((0, 0), text, fill=(0, 0, 0), font=font)
try:
image.save(".\\Photos\\outfiles\\{0}\\{0}_{1}x{2}_{3}.png".format(filename, x1, y1, font_name))
except FileNotFoundError:
os.mkdir(".\\Photos\\outfiles\\{}".format(filename))
image.save(".\\Photos\\outfiles\\{0}\\{0}_{1}x{2}_{3}.png".format(filename, x1, y1, font_name))
path = "./Ascii/"
canvas = []
text = ""
darkest_pixel = 255
lightest_pixel = 0
# char intensities for consolas
char_intensities = {}
# Image Types
image_types = [".jpg", ".png"]
if __name__ == '__main__':
filename = input("Name of Photo to ASCII-tize ==> ")
n = int(input("How Many Pixels for Width of Character ==> "))
font_name = input("Choose Font ==> ")
get_chars(".\\fonts\\{}.tsv".format(font_name))
c_im = load_image(filename)
pix_val_avg = process_image(c_im, n)
convert_to_text()
save_text(filename)
save_png(filename)
print(pix_val_avg)
print(darkest_pixel)
print(lightest_pixel)
``` |
{
"source": "Jormungandr1105/COTS-Star-Tracker",
"score": 3
} |
#### File: star_tracker/star_tracker/test_array_transformations.py
```python
from unittest import TestCase
import array_transformations as xforms
class Test(TestCase):
# def test_rx_passive(self):
# self.fail()
#
# def test_ry_passive(self):
# self.fail()
#
# def test_rz_passive(self):
# self.fail()
#
# def test_attitude_matrix2quat(self):
# self.fail()
#
# def test_quat2attitude_matrix(self):
# self.fail()
#
# def test_vector_dot(self):
# self.fail()
#
# def test_vector_norm(self):
# self.fail()
#
# def test_normalize_vector_array(self):
# self.fail()
#
def test_camera2homogeneous(self):
self.fail()
# def test_camera2vector(self):
# self.fail()
#
# def test_pixel2vector(self):
# self.fail()
#
# def test_vector2pixel(self):
# self.fail()
#
# def test_vector2homogeneous(self):
# self.fail()
#
# def test_homogeneous2vector(self):
# self.fail()
#
# def test_vector_array_transform(self):
# self.fail()
#
# def test_matrix_multiplication(self):
# self.fail()
#
# def test_sub_ind_format(self):
# self.fail()
#
# def test_sub2ind(self):
# self.fail()
#
# def test_ind2sub(self):
# self.fail()
def test_check_axis(self):
import numpy as np
reqd_dim = 2
output_axis = 1
nrows, ncols = (reqd_dim, 1)
a = np.arange(nrows*ncols)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=None), output_axis)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=output_axis), output_axis)
with self.assertRaises(ValueError):
xforms.check_axis(a, reqd_dim, axis=0)
nrows, ncols = (1, reqd_dim)
output_axis = 1
a = np.arange(nrows*ncols)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=None), output_axis)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=output_axis), output_axis)
with self.assertRaises(ValueError):
xforms.check_axis(a, reqd_dim, axis=0)
reqd_dim = 3
nrows, ncols = (reqd_dim, 4)
output_axis = 0
a = np.arange(nrows*ncols).reshape((nrows, ncols))
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=None), output_axis)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=output_axis), output_axis)
with self.assertRaises(ValueError):
xforms.check_axis(a, reqd_dim, axis=1)
nrows, ncols = (4, reqd_dim)
output_axis = 1
a = np.arange(nrows*ncols).reshape((nrows, ncols))
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=None), output_axis)
self.assertEqual(
xforms.check_axis(a, reqd_dim, axis=output_axis), output_axis)
with self.assertRaises(ValueError):
xforms.check_axis(a, reqd_dim, axis=0)
nrows, ncols = (0, 0)
a = np.arange(nrows*ncols).reshape((nrows, ncols))
with self.assertRaises(ValueError):
xforms.check_axis(a, reqd_dim, axis=None)
xforms.check_axis(a, reqd_dim, axis=0)
xforms.check_axis(a, reqd_dim, axis=1)
with self.assertRaises(ValueError):
# dimensions exceed 2
a = np.arange(nrows*ncols).reshape((nrows, ncols, 1))
xforms.check_axis(a, reqd_dim, axis=None)
xforms.check_axis(a, 0, axis=None)
with self.assertRaises(TypeError):
# not supplied with numpy array as arr
xforms.check_axis((0, 0), reqd_dim, axis=None)
################################
#MAIN CODE
################################
if __name__ == '__main__':
import unittest
unittest.main()
``` |
{
"source": "Jormungandr1105/Project-Atlas",
"score": 3
} |
#### File: Code/plane_shift/PlaneShift.py
```python
import math
class PlaneShift():
'''
'''
def __init__(self, pl1_p1, pl2_p1, pl1_p2, pl2_p2):
self.min_error = 0.1
self.point_1 = Point(pl1_p1, pl2_p1)
self.point_2 = Point(pl1_p2, pl2_p2)
self.theta = self.calculate_theta() #RADIANS
self.calculate_offset()
# Find the angle theta between the axis of the two
# 2D planes with shared Z axis
def calculate_theta(self):
x_diff = self.point_2.pl1.x - self.point_1.pl1.x
y_diff = self.point_2.pl1.y - self.point_1.pl1.y
x_p_diff = self.point_2.pl2.x - self.point_1.pl2.x
y_p_diff = self.point_2.pl2.y - self.point_1.pl2.y
r1 = math.hypot(x_diff, y_diff)
r2 = math.hypot(x_p_diff, y_p_diff)
#print(r1)
#print(r2)
assert(abs(r2-r1) < self.min_error)
theta_0 = math.acos(x_diff/r1)
theta_p = math.acos(x_p_diff/r2)
return theta_0 - theta_p
# Takes a point from plane 1 and outputs the coords
# for the same point from the POV of plane 2
def calculate_shifted(self, point0):
pi_2 = float(math.pi/2.0) # PI/2
point = Vec2(0,0) # Just initializing
# Follows the format: p' = x*cos(theta) + y*sin(theta)
point.x = (point0.x)*math.cos(self.theta) + \
(point0.y)*math.sin(self.theta) + self.offset.x
point.y = (point0.x)*math.cos(self.theta+pi_2) + \
(point0.y)*math.sin(self.theta+pi_2)+self.offset.y
return point
# We have the slope, but require an offset in the x and y dimensions
# to give us some kind of reference for the locations of the centers
# of the planes relative to each other
def calculate_offset(self):
self.offset = Vec2(0,0)
point_1_calc = self.calculate_shifted(self.point_1.pl1)
self.offset.x = self.point_1.pl2.x - point_1_calc.x
self.offset.y = self.point_1.pl2.y - point_1_calc.y
print(self.offset.x, self.offset.y)
# Dummy Struct to hold x y coords
class Vec2():
def __init__(self, x, y):
self.x = x
self.y = y
# Dummy Struct to hold same points from both planes
class Point():
def __init__(self, pl1, pl2):
self.pl1 = pl1
self.pl2 = pl2
``` |
{
"source": "Jormungandr1105/wordle_solver",
"score": 3
} |
#### File: wordle_solver/src/Dictionary.py
```python
class Dictionary:
def __init__(self,file,subdir):
self.file = file
self.conversions = {}
self.add_conversions()
if not subdir:
self.create_dict()
def add_conversions(self):
# I figured any way I did it would
# involve me writing at least the
# letters out, so I just did the
# whole thing
self.conversions["a"] = 2
self.conversions["b"] = 3
self.conversions["c"] = 5
self.conversions["d"] = 7
self.conversions["e"] = 11
self.conversions["f"] = 13
self.conversions["g"] = 17
self.conversions["h"] = 19
self.conversions["i"] = 23
self.conversions["j"] = 29
self.conversions["k"] = 31
self.conversions["l"] = 37
self.conversions["m"] = 41
self.conversions["n"] = 43
self.conversions["o"] = 47
self.conversions["p"] = 53
self.conversions["q"] = 59
self.conversions["r"] = 61
self.conversions["s"] = 67
self.conversions["t"] = 71
self.conversions["u"] = 73
self.conversions["v"] = 79
self.conversions["w"] = 83
self.conversions["x"] = 89
self.conversions["y"] = 97
self.conversions["z"] = 101
def create_dict(self):
self.words = {}
f = open(self.file,"r")
text = f.read()
f.close()
words = text.split("\n")
for word in words:
hash_val = 1
for letter in word:
hash_val *= self.conversions[letter]
self.words[word] = hash_val
def create_subdictionary(self):
newdict = Dictionary(self.file,True)
newdict.words = self.words
return newdict
def remove(self,letter,contains,count):
new_words = {}
char_val = self.conversions[letter]**count
for word in self.words:
if contains:
if self.words[word] % char_val != 0:
new_words[word] = self.words[word]
else:
if self.words[word] % char_val == 0:
new_words[word] = self.words[word]
self.words = new_words
def remove_position(self,letter,position,contains):
# If contains, removes words where letter not at postion
# If not, removes words where letter is at positions
new_words = {}
for word in self.words:
if word[position] == letter and not contains:
new_words[word] = self.words[word]
elif word[position] != letter and contains:
new_words[word] = self.words[word]
self.words = new_words
def word_list(self):
word_list = []
for word in self.words:
word_list.append(word)
return word_list
def print(self):
for word in self.words:
print("[{}]".format(word))
def check_contains(letter,word,count):
for x in range(5):
if letter == word[x]:
count-=1
return count < 1
def check_location(letter,word,position):
# Checks if "letter" at position "index"
return letter == word[position]
```
#### File: wordle_solver/src/Pidgeon.py
```python
from Dictionary import Dictionary
import random as rand
class Pidgeon:
def __init__(self,dict_file):
self.dictionary = Dictionary(dict_file,False)
def generate_word(self):
return rand.choice(self.dictionary.word_list())
def get_letter_pop(self):
chars = []
for letter in self.dictionary.conversions:
char_val = self.dictionary.conversions[letter]
for word in self.dictionary.words:
if self.dictionary.words[word] % char_val == 0:
pass
``` |
{
"source": "jorn86/adventofcode2019",
"score": 3
} |
#### File: adventofcode2019/day11/day11.py
```python
from typing import List
from IntCoder import IntCoder
class Ship:
def __init__(self, height, width) -> None:
super().__init__()
self.moves = {
0: (1, 0), # up
1: (0, -1), # left
2: (-1, 0), # down
3: (0, 1), # right
}
self.grid = list([0] * height for _ in range(width))
self.direction = 0
self.position = (width // 2, height // 2)
def read(self):
return self.grid[self.position[0]][self.position[1]]
def paint(self, color):
self.grid[self.position[0]][self.position[1]] = color
def turn(self, relative_direction):
self.direction += relative_direction + 4
self.direction %= 4
move = self.moves[self.direction]
self.position = (self.position[0] + move[0], self.position[1] + move[1])
class Day11Coder(IntCoder):
def __init__(self, memory: List[int], ship):
super().__init__(memory)
self._next_is_color = True
self.ship = ship
def get_input(self) -> int:
return self.ship.read()
def handle_output(self, value: int):
if self._next_is_color:
self._next_is_color = False
self.ship.paint(value)
return True
else:
self._next_is_color = True
self.ship.turn(1 if value == 1 else -1)
return False
@staticmethod
def _halt(mode1, mode2, mode3):
raise RuntimeError('finished')
def step(coder):
try:
coder.run()
return True
except RuntimeError:
return False
def part1():
ship = Ship(100, 100)
coder = Day11Coder(IntCoder.extended_memory(IntCoder.read_file('./input.txt'), 1107), ship)
covered = set()
while step(coder):
covered.add(ship.position)
print(len(covered))
def part2():
ship = Ship(80, 10)
ship.paint(1)
coder = Day11Coder(IntCoder.extended_memory(IntCoder.read_file('./input.txt'), 851), ship)
while step(coder):
pass
ship.grid.reverse()
for line in ship.grid:
chars = ['\u2588' if c == 1 else ' ' for c in line]
chars.reverse()
print(''.join(chars))
part1()
part2()
```
#### File: adventofcode2019/day17/day17.py
```python
from IntCoder import IntCoder
coder = IntCoder(IntCoder.extended_memory(IntCoder.read_file('./input.txt'), 4000))
coder.run()
width = coder.output.index(10) + 1
lines = [coder.output[i:i + width] for i in range(0, len(coder.output), width)]
def is_intersection(x, y):
return lines[x][y] == 35 and lines[x-1][y] == 35 and lines[x+1][y] == 35 and lines[x][y-1] == 35 and lines[x][y+1] == 35
result = 0
for x in range(1, len(lines) - 2):
for y in range(1, len(lines[x]) - 1):
if is_intersection(x, y):
result += (x * y)
print(result)
```
#### File: adventofcode2019/day23/day23.py
```python
from collections import deque
from typing import List, Deque, Dict
from IntCoder import IntCoder
class Day23Coder(IntCoder):
nat = (0, 0)
def __init__(self, program: List[int], address, queues: Dict[int, Deque[int]]):
super().__init__(IntCoder.extended_memory(program, 10000))
self.address = address
self.queues = queues
self.out_cache = []
self.first_input = True
def get_input(self) -> int:
if self.first_input:
self.first_input = False
print(f'init {self.address}')
return self.address
own_queue = self.queues[self.address]
if len(own_queue) == 0:
return -1
return own_queue.popleft()
def handle_output(self, value: int):
self.out_cache.append(value)
if len(self.out_cache) == 3:
address = self.out_cache[0]
x = self.out_cache[1]
y = self.out_cache[2]
if address == 255:
print(f'to nat: ({x}, {y})') # first print of this is part 1
Day23Coder.nat = (x, y)
else:
target_queue = self.queues[address]
target_queue.append(x)
target_queue.append(y)
self.out_cache = []
def run(self) -> None:
super().run()
print(f'{self.address} finished')
io = {i: deque() for i in range(50)}
coders = [Day23Coder(IntCoder.read_file('./input.txt'), i, io) for i in range(50)]
while True:
all_empty = True
for coder in coders:
coder._step()
all_empty &= len(io[coder.address]) == 0
if all_empty and Day23Coder.nat != (0, 0):
print(f'sending nat {Day23Coder.nat}') # last print of this is part 2
io[0].append(Day23Coder.nat[0])
io[0].append(Day23Coder.nat[1])
Day23Coder.nat = (0, 0)
```
#### File: adventofcode2019/day2/day2.py
```python
from IntCoder import IntCoder
def run(input1, input2):
memory = [int(i) for i in open('./input.txt', 'r').read().split(',')]
memory[1] = input1
memory[2] = input2
IntCoder(memory).run()
return memory[0]
print(run(12, 2)) # part 1
for in1 in range(0, 99):
for in2 in range(0, 99):
if run(in1, in2) == 19690720:
print(f'{in1}{in2}') # part 2
```
#### File: adventofcode2019/day8/day8.py
```python
size = 25 * 6
with open('./input.txt', 'r') as f:
inputs = [int(d) for d in f.read()]
layers = [inputs[i:i + size] for i in range(0, len(inputs), size)]
_min = 999
for layer in layers:
zeroes = layer.count(0)
if zeroes < _min:
_min = zeroes
ones = layer.count(1)
twos = layer.count(2)
print(f'{zeroes}: {ones}x{twos}={ones * twos}')
def paint(index):
for layer in layers:
if layer[index] != 2:
return layer[index]
chars = ['\u2588' if paint(i) == 1 else ' ' for i in range(0, 150)]
for line in [''.join(chars[i:i + 25]) for i in range(0, 150, 25)]:
print(line)
``` |
{
"source": "jornada-im/pyEDIutils",
"score": 3
} |
#### File: jornada-im/pyEDIutils/audit_rpts.py
```python
import pyEDIutils.pasta_api_requests as rq
import pandas as pd
from datetime import date
import os
def auditroot_to_df(ediroot):
"""
Convert an Element Tree object to a dataframe. Each element has
date, packageid, and service method extracted into a row in the
dataframe.
"""
# Iterate over each element in ediroot and extract the variables
print(ediroot.text)
df = pd.DataFrame({'scope':[scope.text for scope in ediroot.iter('scope')],
'identifier':[int(ident.text) for ident in ediroot.iter('identifier')],
'revision':[int(rev.text) for rev in ediroot.iter('revision')],
'resource':[rtype.text for rtype in ediroot.iter('resourceType')],
'total_reads':[int(tot.text) for tot in ediroot.iter('totalReads')],
'non_robot_reads':[int(nrr.text) for nrr
in ediroot.iter('nonRobotReads')]}
)
return(df)
def auditreport_to_df(ediroot):
"""
Convert an Element Tree object to a dataframe. Each element has
date, packageid, and service method extracted into a row in the
dataframe.
"""
# Iterate over each element in ediroot and extract the variables
print(ediroot.text)
df = pd.DataFrame({
'entry_dt':[etime.text for etime in ediroot.iter('entryTime')],
'method':[meth.text for meth in ediroot.iter('serviceMethod')],
'resource_id':[rid.text for rid in ediroot.iter('resourceId')],
'user':[user.text for user in ediroot.iter('user')]}
)
return(df)
def request_audit(identifier, rev=None, scope='knb-lter-jrn'):
"""Generate an audit report for a document or data package
Parameters
----------
identifier : [type]
[description]
rev : [type], optional
[description], by default None
scope : str, optional
[description], by default 'knb-lter-jrn'
"""
# An element tree will be returned from the api request
if rev is not None:
print('Requesting access data for {0}.{1}.{2}'.format(scope,
identifier, rev))
root = rq.aud_package(identifier, rev=rev, scope=scope)
else:
print('Requesting access data for {0}.{1}'.format(
scope, identifier))
root = rq.aud_document(identifier, scope=scope)
# Convert elements to rows in dataframe
df_out = auditroot_to_df(root)
return(df_out)
def request_aud_report(servmethod, dn, pw, user=None, group=None,
resid='knb-lter-jrn', fromdt=date.today(), todt=None,
lim=10000):
"""Get an audit report from PASTA+
Parameters
----------
servmethod : [type]
[description]
df : [type]
[description]
pw : [type]
[description]
user : [type], optional
[description], by default None
group : [type], optional
[description], by default None
resid : str, optional
[description], by default 'knb-lter-jrn'
fromdt : [type], optional
[description], by default date.today()
todt : [type], optional
[description], by default None
lim : int, optional
[description], by default 10000
"""
# An element tree will be returned from the api request
print('Requesting audit report for {0} starting {1}'.format(resid, fromdt))
root = rq.aud_report_dpm(servmethod, user, group, resid, fromdt, todt, lim,
dn, pw)
# Convert elements to rows in dataframe
df_out = auditreport_to_df(root)
return(df_out)
``` |
{
"source": "Jornason/RoboND-Kinematics-Project",
"score": 3
} |
#### File: Jornason/RoboND-Kinematics-Project/kinematics.py
```python
import rospy
import tf
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
import csv
class Kinematics:
#Compute and save in memory the transformation matrices when the object is created
#to speed-up run-time computations
def __init__(self):
# Create symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
# Create Modified DH parameters
self.s = {alpha0: 0, a0: 0, d1: 0.75, q1: q1,
alpha1: rad(-90), a1: 0.35, d2: 0, q2: q2-rad(90),
alpha2: 0, a2: 1.25, d3: 0, q3: q3,
alpha3: rad(-90), a3: -0.054, d4: 1.50, q4: q4,
alpha4: rad(90), a4: 0, d5: 0, q5: q5,
alpha5: rad(-90), a5: 0, d6: 0, q6: q6,
alpha6: 0, a6: 0, d7: 0.303, q7: 0
}
# Create individual transformation matrices
self.T0_1 = self.transformation_matrix(alpha0, a0, d1, q1).subs(self.s)
self.T1_2 = self.transformation_matrix(alpha1, a1, d2, q2).subs(self.s)
self.T2_3 = self.transformation_matrix(alpha2, a2, d3, q3).subs(self.s)
self.T3_4 = self.transformation_matrix(alpha3, a3, d4, q4).subs(self.s)
self.T4_5 = self.transformation_matrix(alpha4, a4, d5, q5).subs(self.s)
self.T5_6 = self.transformation_matrix(alpha5, a5, d6, q6).subs(self.s)
self.T6_EE = self.transformation_matrix(alpha6, a6, d7, q7).subs(self.s)
#Create complete transformation matrix
self.T0_EE = self.T0_1 * self.T1_2 * self.T2_3 * self.T3_4 * self.T4_5 * self.T5_6 * self.T6_EE
# Compensate for rotation discrepancy between DH parameters and Gazebo
self.Rot_err = self.rot_z(rad(180)) * self.rot_y(rad(-90))
#Generic computation part of R0_3 (only needs to be done once)
self.R0_3_gen = self.T0_1[0:3,0:3] * self.T1_2[0:3,0:3] * self.T2_3[0:3,0:3]
#Compute inverse kinematics
def compute_IK(self, px, py, pz, roll, pitch, yaw):
# Create symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
r, p, y = symbols('r p y')
#Create rotation matrices for x,y,z
R_x = self.rot_x(r)
R_y = self.rot_y(p)
R_z = self.rot_z(y)
# Create rotation matrix of the end effector
R_EE = R_z * R_y * R_x
# Compensate for rotation discrepancy between DH parameters and Gazebo
R_EE = R_EE * self.Rot_err
R_EE = R_EE.subs({'r': roll, 'p': pitch, 'y': yaw})
#Position of the end effector
EE = Matrix([[px], [py], [pz]])
#Position of the wrist center
WC = EE - (0.303) * R_EE[:, 2]
#Computation of joint angles using geometric inverse kinematics method
theta1 = atan2(WC[1], WC[0])
a = 1.501
b = sqrt(pow((sqrt(WC[0] * WC[0] + WC[1] * WC[1]) - 0.35), 2) + \
pow((WC[2] - 0.75), 2))
c = 1.25
angle_a = acos((b*b + c*c - a*a) / (2*b*c))
angle_b = acos((a*a + c*c - b*b) / (2*a*c))
delta = atan2(WC[2] - 0.75, sqrt(WC[0]*WC[0] + WC[1]*WC[1]) - 0.35)
theta2 = pi/2 - angle_a - delta
theta3 = pi/2 - (angle_b + 0.036)
R0_3 = self.R0_3_gen.evalf(subs={q1:theta1, q2:theta2, q3:theta3})
R3_6 = R0_3.inv("LU") * R_EE
theta6 = atan2(-R3_6[1,1], R3_6[1,0])
theta5 = atan2(-R3_6[1,1]/sin(theta6), R3_6[1,2])
theta4 = atan2(R3_6[2,2]/sin(theta5), -R3_6[0,2]/sin(theta5))
print("Theta1: %04.8f"% theta1)
print("Theta2: %04.8f"% theta2)
print("Theta3: %04.8f"% theta3)
print("Theta4: %04.8f"% theta4)
print("Theta5: %04.8f"% theta5)
print("Theta6: %04.8f"% theta6)
print("\n")
#simplify angles of theta4 and theta6, to try to prevent large rotations
theta4 = self.simplify_angle(theta4)
theta6 = self.simplify_angle(theta6)
return theta1, theta2, theta3, theta4, theta5, theta6
#Substitute the joint angles to return the transformation matrix
def compute_FK(self, theta1, theta2, theta3, theta4, theta5, theta6):
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
return self.T0_EE.evalf(subs={q1: theta1, q2: theta2, q3: theta3, q4: theta4, q5: theta5, q6: theta6})
#Prevent large rotations by clipping between -pi and pi
def simplify_angle(self,angle):
angle = abs(angle) % (2*pi) * sign(angle)
if angle > pi:
angle = angle - 2*pi
return angle
#Rotation matrix for x
def rot_x(self,q):
R_x = Matrix([[ 1, 0, 0],
[ 0, cos(q), -sin(q)],
[ 0, sin(q), cos(q)]])
return R_x
#Rotation matrix for y
def rot_y(self,q):
R_y = Matrix([[ cos(q), 0, sin(q)],
[ 0, 1, 0],
[-sin(q), 0, cos(q)]])
return R_y
#Rotation matrix for z
def rot_z(self,q):
R_z = Matrix([[ cos(q), -sin(q), 0],
[ sin(q), cos(q), 0],
[ 0, 0, 1]])
return R_z
#Generic transformation matrix
def transformation_matrix(self,alpha, a, d, q):
T = Matrix([[ cos(q), -sin(q), 0, a],
[ sin(q)*cos(alpha), cos(q)*cos(alpha), -sin(alpha), -sin(alpha)*d],
[ sin(q)*sin(alpha), cos(q)*sin(alpha), cos(alpha), cos(alpha)*d],
[ 0, 0, 0, 1]])
return T
``` |
{
"source": "JorneyJoy/tapi-yandex-metrika",
"score": 2
} |
#### File: tapi-yandex-metrika/tapi_yandex_metrika/tapi_yandex_metrika.py
```python
import json
import logging
import re
import time
import simplejson
from tapi import TapiAdapter, generate_wrapper_from_adapter, JSONAdapterMixin
from tapi.exceptions import ResponseProcessException, ClientError
from tapi_yandex_metrika import exceptions
from .resource_mapping import (
STATS_RESOURCE_MAPPING,
LOGSAPI_RESOURCE_MAPPING,
MANAGEMENT_RESOURCE_MAPPING,
)
logging.basicConfig(level=logging.INFO)
class YandexMetrikaManagementClientAdapter(JSONAdapterMixin, TapiAdapter):
resource_mapping = MANAGEMENT_RESOURCE_MAPPING # карта ресурсов
def get_api_root(self, api_params):
return "https://api-metrika.yandex.net/"
def get_request_kwargs(self, api_params, *args, **kwargs):
"""
Обогащение запроса, параметрами.
:param api_params: dict
:return: dict
"""
params = super().get_request_kwargs(api_params, *args, **kwargs)
params["headers"]["Authorization"] = "OAuth {}".format(
api_params["access_token"]
)
return params
def get_error_message(self, data, response=None):
"""Извлечение комментария к ошибке запроса."""
try:
if not data and response.content.strip():
data = json.loads(response.content.decode("utf-8"))
if data:
return data.get("message")
except (json.JSONDecodeError, simplejson.JSONDecodeError):
return response.text
def process_response(self, response, **request_kwargs):
"""Обработка ответа запроса."""
data = self.response_to_native(response)
if isinstance(data, dict) and data.get("errors"):
raise ResponseProcessException(ClientError, data)
else:
# Дополнительная обработка происходит в методе родительского класса.
data = super().process_response(response)
return data
def response_to_native(self, response):
"""Преобразование ответа."""
if response.content.strip():
try:
return response.json()
except (json.JSONDecodeError, simplejson.JSONDecodeError):
return response.text
def wrapper_call_exception(
self, response, tapi_exception, api_params, *args, **kwargs
):
"""
Для вызова кастомных исключений.
Когда например сервер отвечает 200,
а ошибки передаются внутри json.
"""
try:
jdata = response.json()
except (json.JSONDecodeError, simplejson.JSONDecodeError):
raise exceptions.YandexMetrikaApiError(response)
else:
error_code = int(jdata.get("code", 0))
message = jdata.get("message")
if error_code == 429:
raise exceptions.YandexMetrikaLimitError(response)
elif error_code == 403:
raise exceptions.YandexMetrikaTokenError(response)
elif message == "Incorrect part number" and api_params.get(
"receive_all_data", False
):
# Срабатывает при попытке скачать несуществующую часть отчета.
# А при получении всех частей отчета автоматически,
# всегда идет попытка получить следующий часть.
pass
else:
raise exceptions.YandexMetrikaClientError(response)
def transform_results(self, results, requests_kwargs, responses, api_params):
"""
Преобразователь данных после получения всех ответов.
:param results: list : данные всех запросов
:param requests_kwargs: list : параметры всех запросов
:param responses: list : ответы всех запросов
:param api_params: dict : входящие параметры класса
:return: list
"""
return results[0] if isinstance(results, list) and results else results
class YandexMetrikaLogsapiClientAdapter(YandexMetrikaManagementClientAdapter):
resource_mapping = LOGSAPI_RESOURCE_MAPPING
def transform_results(self, results, requests_kwargs, responses, api_params):
"""
Преобразователь данных после получения всех ответов.
:param results: list : данные всех запросов
:param requests_kwargs: list : параметры всех запросов
:param responses: list : ответы всех запросов
:param api_params: dict : входящие параметры класса
:return: list
"""
if (
api_params.get("receive_all_data", False)
and responses[0].url.find("download") > -1
):
# Собирает все части отчета в один.
data, cols = "", ""
for i in results:
cols = i[:i.find("\n")] # строка с именами столбцов
# Данные без строки со столбцами.
data += i[i.find("\n") + 1:]
return "{}\n{}".format(cols, data)
else:
return results[0] if isinstance(results, list) and results else results
def transform(self, data, request_kwargs, response, api_params, *args, **kwargs):
"""Преобразование данных."""
if response.url.find("download") > -1:
json_data = [
i.split("\t") for i in data.split("\n")
if i != "" # удаляется пустая последняя строка
]
return json_data
else:
raise NotImplementedError(
"Преобразование в JSON доступно только для ответов ресурса download"
)
def retry_request(
self,
response,
tapi_exception,
api_params,
count_request_error,
*args,
**kwargs
):
"""
Условия повторения запроса.
Если вернет True, то запрос повторится.
response = tapi_exception.client().response
status_code = tapi_exception.client().status_code
response_data = tapi_exception.client().data
"""
status_code = tapi_exception.client().status_code
response_data = tapi_exception.client().data or {}
error_code = int((response_data).get("code", 0))
message = response_data.get("message")
if (
message == "Only log of requests in status 'processed' can be downloaded"
and api_params.get("wait_report", False)
and response.url.find("download") > -1
):
# Ошибка появляется при попытке скачать неготовый отчет.
sleep_time = count_request_error * 20
logging.info(
"Включен режим ожидания готовности отчета. "
"Проверка готовности отчета через {} сек.".format(sleep_time)
)
time.sleep(sleep_time)
return True
return False
def extra_request(
self,
api_params,
current_request_kwargs,
request_kwargs_list,
response,
current_result,
):
"""
Чтобы получить все части отчета,
генерирует параметры для новых запросов к апи.
Формирование дополнительных запросов.
Они будут сделаны, если отсюда вернется
непустой массив с набором параметров для запросов.
:param api_params: dict : входящие параметры класса
:param current_request_kwargs: dict : {headers, data, url, params} : параметры текущего запроса
:param request_kwargs_list: list :
Наборы параметров для запросов, которые будут сделаны.
В него можно добавлять дополнительные наборы параметров, чтоб сделать дополнительные запросы.
:param response: request_object : текущий ответ
:param current_result: json : текущий результат
:return: list : request_kwargs_list
"""
# request_kwargs_list может содержать наборы параметров запросов, которые еще не сделаны.
# Поэтому в него нужно добавлять новые, а не заменять.
if (
api_params.get("receive_all_data", False)
and response.url.find("download") > -1
):
url = current_request_kwargs["url"]
part = int(re.findall(r"part/([0-9]*)/", url)[0])
new_part = part + 1
logging.info("Включен режим получения всех данных. "
f"Запрашиваю следующую часть отчета: {new_part}")
new_url = re.sub(r"part/[0-9]*/", "part/{}/".format(new_part), url)
new_request_kwargs = {**current_request_kwargs, "url": new_url}
request_kwargs_list.append(new_request_kwargs)
return request_kwargs_list
def fill_resource_template_url(self, template, params):
"""
Заполнение параметрами, адреса ресурса из которого формируется URL.
:param template: str : ресурс
:param params: dict : параметры
:return:
"""
if template.find("/part/") > -1 and not params.get("partNumber"):
# Принудительно добавляет параметр partNumber, если его нет.
params.update(partNumber=0)
return template.format(**params)
class YandexMetrikaStatsClientAdapter(YandexMetrikaManagementClientAdapter):
resource_mapping = STATS_RESOURCE_MAPPING
def retry_request(
self,
response,
tapi_exception,
api_params,
count_request_error,
*args,
**kwargs
):
"""
Условия повторения запроса.
Если вернет True, то запрос повторится.
response = tapi_exception.client().response
status_code = tapi_exception.client().status_code
response_data = tapi_exception.client().data
"""
status_code = tapi_exception.client().status_code
response_data = tapi_exception.client().data
error_code = int((response_data or {}).get("code", 0))
if error_code == 429:
logging.error("Превышен лимит запросов")
elif error_code == 503:
if count_request_error < api_params.get("retries_if_server_error", 3):
logging.warning("Серверная ошибка. Повторный запрос через 3 секунды")
time.sleep(3)
return True
return False
def extra_request(
self,
api_params,
current_request_kwargs,
request_kwargs_list,
response,
current_result,
):
"""
Чтобы получить все строки отчета,
генерирует параметры для новых запросов к апи.
Формирование дополнительных запросов.
Они будут сделаны, если отсюда вернется
непустой массив с набором параметров для запросов.
:param api_params: dict : входящие параметры класса
:param current_request_kwargs: dict : {headers, data, url, params} : параметры текущего запроса
:param request_kwargs_list: list :
Наборы параметров для запросов, которые будут сделаны.
В него можно добавлять дополнительные наборы параметров, чтоб сделать дополнительные запросы.
:param response: request_object : текущий ответ
:param current_result: json : текущий результат
:return: list : request_kwargs_list
"""
# request_kwargs_list может содержать наборы параметров запросов, которые еще не сделаны.
# Поэтому в него нужно добавлять новые, а не заменять.
total_rows = int(current_result["total_rows"])
sampled = current_result["sampled"]
logging.info("Наличие семплирования: " + str(sampled))
limit = current_request_kwargs["params"].get("limit", 10000)
offset = current_result["query"]["offset"] + limit
if offset <= total_rows:
logging.info(
"Получено строк {}. Всего строк {}".format(offset-1, total_rows)
)
if api_params.get("receive_all_data", False):
logging.info("Включен режим получения всех данных. "
"Запрашиваю следующие части отчета.")
new_request_kwargs = current_request_kwargs.copy()
new_request_kwargs["params"]["offset"] = offset
request_kwargs_list.append(new_request_kwargs)
return request_kwargs_list
def transform(self, data, request_kwargs, response, api_params, *args, **kwargs):
"""Преобразование данных."""
new_data = []
columns = data[0]["query"]["dimensions"] + data[0]["query"]["metrics"]
for result in data:
data = result.pop("data")
for row in data:
dimensions = [i["name"] for i in row["dimensions"]]
metrics = row["metrics"]
new_data.append(dimensions + metrics)
return [columns] + new_data
def transform_results(self, results, requests_kwargs, responses, api_params):
"""
Преобразователь данных после получения всех ответов.
:param results: list : данные всех запросов
:param requests_kwargs: list : параметры всех запросов
:param responses: list : ответы всех запросов
:param api_params: dict : входящие параметры класса
:return: list
"""
return results
YandexMetrikaStats = generate_wrapper_from_adapter(YandexMetrikaStatsClientAdapter)
YandexMetrikaLogsapi = generate_wrapper_from_adapter(YandexMetrikaLogsapiClientAdapter)
YandexMetrikaManagement = generate_wrapper_from_adapter(YandexMetrikaManagementClientAdapter)
```
#### File: tapi-yandex-metrika/tests/stats_tests.py
```python
import logging
from pprint import pprint
import yaml
from tapi_yandex_metrika import YandexMetrikaStats
logging.basicConfig(level=logging.DEBUG)
with open("../config.yml", "r") as stream:
data_loaded = yaml.safe_load(stream)
ACCESS_TOKEN = data_loaded["token"]
api = YandexMetrikaStats(
access_token=ACCESS_TOKEN,
receive_all_data=False
)
def test_info():
api.stats().info()
def test_get_stats():
params = dict(
ids="178620",
metrics="ym:s:visits,ym:s:bounces",
dimensions="ym:s:date,ym:s:startOfMonth",
sort="ym:s:date",
limit=3
)
r = api.stats().get(params=params)
print()
print(r)
print()
print(r().data)
print()
pprint(r().transform())
def test_get_stats2():
params = dict(
direct_client_logins="ya-ozon-travel1",
ids="178620",
metrics="ym:ad:clicks,ym:ad:RUBAdCost",
dimensions="ym:ad:startOfHour,ym:ad:<attribution>DirectID,ym:ad:<attribution>DirectPhraseOrCond",
sort="ym:ad:startOfHour",
filters="ym:ad:hour==10",
date1="today",
date2="today",
group="hour",
accuracy="full",
attribution="lastsign",
limit=10
)
r = api.stats().get(params=params)
import datetime as dt
import re
data = r().transform()
pprint(data)
def t(i):
i[0] = dt.datetime.strptime(i[0], "%Y-%m-%d %H:%M:%S")
i[1] = int(str(i[1] or 0).replace("N-", ""))
i[2] = re.sub(r" -.*", "", i[2] or "")
i[3] = int(i[3] or 0)
i.insert(1, i[0].hour)
data = list(map(t, data))
pprint(data)
``` |
{
"source": "jornh/amundsensearchlibrary",
"score": 3
} |
#### File: search_service/models/table.py
```python
from typing import Iterable
class Table:
def __init__(self, *,
name: str,
key: str,
description: str,
cluster: str,
database: str,
schema_name: str,
column_names: Iterable[str],
tags: Iterable[str],
last_updated_epoch: int) -> None:
self.name = name
self.key = key
self.description = description
self.cluster = cluster
self.database = database
self.schema_name = schema_name
self.column_names = column_names
self.tags = tags
self.last_updated_epoch = last_updated_epoch
def __repr__(self) -> str:
return 'Table(name={!r}, key={!r}, description={!r}, ' \
'cluster={!r} database={!r}, schema_name={!r}, column_names={!r}, ' \
'tags={!r}, last_updated={!r})'.format(self.name,
self.key,
self.description,
self.cluster,
self.database,
self.schema_name,
self.column_names,
self.tags,
self.last_updated_epoch)
```
#### File: unit/proxy/test_elasticsearch.py
```python
import unittest
from unittest.mock import patch, MagicMock
from typing import Iterable
from search_service import create_app
from search_service.proxy.elasticsearch import ElasticsearchProxy
from search_service.models.search_result import SearchResult
from search_service.models.table import Table
class MockSearchResult:
def __init__(self, *,
table_name: str,
table_key: str,
table_description: str,
cluster: str,
database: str,
schema_name: str,
column_names: Iterable[str],
tag_names: Iterable[str],
table_last_updated_epoch: int) -> None:
self.table_name = table_name
self.table_key = table_key
self.table_description = table_description
self.cluster = cluster
self.database = database
self.schema_name = schema_name
self.column_names = column_names
self.tag_names = tag_names
self.table_last_updated_epoch = table_last_updated_epoch
class TestElasticsearchProxy(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app(config_module_class='search_service.config.LocalConfig')
self.app_context = self.app.app_context()
self.app_context.push()
mock_elasticsearch_client = MagicMock()
self.es_proxy = ElasticsearchProxy(elasticsearch_client=mock_elasticsearch_client)
self.mock_result1 = MockSearchResult(table_name='test_table',
table_key='test_key',
table_description='test_description',
cluster='gold',
database='test_db',
schema_name='test_schema',
column_names=['test_col1', 'test_col2'],
tag_names=[],
table_last_updated_epoch=1527283287)
self.mock_result2 = MockSearchResult(table_name='test_table2',
table_key='test_key2',
table_description='test_description2',
cluster='gold',
database='test_db2',
schema_name='test_schema2',
column_names=['test_col1', 'test_col2'],
tag_names=[],
table_last_updated_epoch=1527283287)
self.mock_result3 = Table(name='test_table3',
key='test_key3',
description='test_description3',
cluster='gold',
database='test_db3',
schema_name='test_schema3',
column_names=['test_col1', 'test_col2'],
tags=['match'],
last_updated_epoch=1527283287)
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_empty_query_string(self, mock_search: MagicMock) -> None:
expected = SearchResult(total_results=0, results=[])
result = self.es_proxy.fetch_search_results(query_term='')
# check the output was empty list
self.assertDictEqual(vars(result), vars(expected),
"Received non-empty search results!")
# ensure elasticsearch_dsl Search endpoint was not called
# assert_not_called doesn't work. See here: http://engineroom.trackmaven.com/blog/mocking-mistakes/
self.assertTrue(mock_search.call_count == 0)
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_empty_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 0
mock_search.return_value = mock_results
expected = SearchResult(total_results=0, results=[])
result = self.es_proxy.fetch_search_results(query_term='test_query_term')
self.assertDictEqual(vars(result), vars(expected),
"Received non-empty search results!")
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_one_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 1
mock_results.__iter__.return_value = [self.mock_result1]
mock_search.return_value = mock_results
expected = SearchResult(total_results=1,
results=[Table(name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema_name='test_schema',
column_names=['test_col1', 'test_col2'],
tags=[],
last_updated_epoch=1527283287)])
resp = self.es_proxy.fetch_search_results(query_term='test_query_term')
self.assertEquals(resp.total_results, expected.total_results,
"search result is not of length 1")
self.assertIsInstance(resp.results[0],
Table,
"Search result received is not of 'Table' type!")
self.assertDictEqual(vars(resp.results[0]), vars(expected.results[0]),
"Search Result doesn't match with expected result!")
@patch('elasticsearch_dsl.Search.execute')
def test_search_with_multiple_result(self,
mock_search: MagicMock) -> None:
mock_results = MagicMock()
mock_results.hits.total = 2
mock_results.__iter__.return_value = [self.mock_result1, self.mock_result2]
mock_search.return_value = mock_results
expected = SearchResult(total_results=2,
results=[Table(name='test_table',
key='test_key',
description='test_description',
cluster='gold',
database='test_db',
schema_name='test_schema',
column_names=['test_col1', 'test_col2'],
tags=[],
last_updated_epoch=1527283287),
Table(name='test_table2',
key='test_key2',
description='test_description2',
cluster='gold',
database='test_db2',
schema_name='test_schema2',
column_names=['test_col1', 'test_col2'],
tags=[],
last_updated_epoch=1527283287)])
resp = self.es_proxy.fetch_search_results(query_term='test_query_term')
self.assertEquals(resp.total_results, expected.total_results,
"search result is not of length 2")
for i in range(2):
self.assertIsInstance(resp.results[i],
Table,
"Search result received is not of 'Table' type!")
self.assertDictEqual(vars(resp.results[i]),
vars(expected.results[i]),
"Search result doesn't match with expected result!")
@patch('search_service.proxy.elasticsearch.ElasticsearchProxy._search_helper')
def test_search_match_with_field(self,
mock_search: MagicMock) -> None:
mock_search.return_value = SearchResult(total_results=1,
results=[self.mock_result3])
expected = SearchResult(total_results=1,
results=[Table(name='test_table3',
key='test_key3',
description='test_description3',
cluster='gold',
database='test_db3',
schema_name='test_schema3',
column_names=['test_col1', 'test_col2'],
tags=['match'],
last_updated_epoch=1527283287)])
resp = self.es_proxy.fetch_search_results_with_field(query_term='test_query_term',
field_name='tag_names',
field_value='match')
self.assertEquals(resp.total_results, expected.total_results)
self.assertDictEqual(vars(resp.results[0]),
vars(expected.results[0]),
"Search result doesn't match with expected result!")
@patch('search_service.proxy.elasticsearch.ElasticsearchProxy._search_helper')
def test_search_not_match_with_field(self,
mock_search: MagicMock) -> None:
mock_search.return_value = SearchResult(total_results=0,
results=[])
resp = self.es_proxy.fetch_search_results_with_field(query_term='test_query_term',
field_name='tag_names',
field_value='match')
self.assertEquals(resp.total_results, 0)
@patch('search_service.proxy.elasticsearch.ElasticsearchProxy._search_wildcard_helper')
def test_search_regex_match_field(self,
mock_search: MagicMock) -> None:
mock_search.return_value = SearchResult(total_results=1,
results=[self.mock_result3])
expected = SearchResult(total_results=1,
results=[Table(name='test_table3',
key='test_key3',
description='test_description3',
cluster='gold',
database='test_db3',
schema_name='test_schema3',
column_names=['test_col1', 'test_col2'],
tags=['match'],
last_updated_epoch=1527283287)])
resp = self.es_proxy.fetch_search_results_with_field(query_term='test_query_term',
field_name='tag_names',
field_value='*match')
self.assertEquals(resp.total_results, expected.total_results)
self.assertDictEqual(vars(resp.results[0]),
vars(expected.results[0]),
"Search result doesn't match with expected result!")
``` |
{
"source": "JornoBG/lihtsamad-rakendused",
"score": 3
} |
#### File: JornoBG/lihtsamad-rakendused/LaborinthPL.py
```python
map = [
[12, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 24]
]
VABA_TEE = 0
SEIN = 1
start_x = 0
start_y = 0
def saab_liikuda_paremale(map, praegused_kordinaadid):
print("siin toimib see")
print(map[0][1] == VABA_TEE)
print("siin toimus see")
print (praegused_kordinaadid)
return False
print(saab_liikuda_paremale(map, [start_x, start_y]))
print(saab_liikuda_paremale(map, [1, 1]))
def print_kaart():
print(map[0])
print(map[1])
print(map[2])
print(map[3])
print(map[4])
print_kaart()
``` |
{
"source": "jornpeters/PyLaTeX",
"score": 3
} |
#### File: PyLaTeX/examples/basic.py
```python
from pylatex import Document, Section, Subsection
from pylatex.utils import italic, escape_latex
def fill_document(doc):
"""Adds a section, a subsection and some text to the document.
:param doc: the document
:type doc: :class:`pylatex.Document` instance
"""
with doc.create(Section('A section')):
doc.append('Some regular text and some ' + italic('italic text. '))
with doc.create(Subsection('A subsection')):
doc.append(escape_latex('Also some crazy characters: $&#{}'))
if __name__ == '__main__':
# Basic document
doc = Document('basic')
fill_document(doc)
doc.generate_pdf()
doc.generate_tex()
# Document with `\maketitle` command activated
doc = Document(author='Author', date='01/01/01', title='Title',
maketitle=True)
fill_document(doc)
doc.generate_pdf('basic_maketitle', clean=False)
# Add stuff to the document
doc.append(Section('A second section'))
doc.append('Some text.')
doc.generate_pdf('basic_maketitle2')
tex = doc.dumps() # The document as string in LaTeX syntax
```
#### File: PyLaTeX/pylatex/math.py
```python
from .base_classes import BaseLaTeXContainer
class Math(BaseLaTeXContainer):
def __init__(self, data=None, inline=False):
"""
:param data:
:param inline:
:type data: list
:type inline: bool
"""
self.inline = inline
super().__init__(data)
def dumps(self):
"""
:rtype: str
"""
if self.inline:
string = '$' + super().dumps(token=' ') + '$'
else:
string = '$$' + super().dumps(token=' ') + '$$\n'
super().dumps()
return string
```
#### File: PyLaTeX/pylatex/utils.py
```python
_latex_special_chars = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}',
'\n': r'\\\\',
}
def escape_latex(s):
"""Escape characters that are special in latex.
Sources:
* http://tex.stackexchange.com/a/34586/43228
* http://stackoverflow.com/a/16264094/2570866
:param s:
:type s: str
:return:
:rtype: str
"""
return ''.join(_latex_special_chars.get(c, c) for c in s)
def fix_filename(filename):
"""Latex has problems if there are one or more points in the filename,
thus 'abc.def.jpg' will be changed to '{abc.def}.jpg
:param filename:
:type filename: str
:return:
:rtype: str
"""
parts = filename.split('.')
return '{' + '.'.join(parts[0:-1]) + '}.' + parts[-1]
def dumps_list(l, escape=False, token='\n'):
"""Dumps a list that can contain anything.
:param l:
:param escape:
:param token:
:type l: list
:type escape: bool
:type token: str
:return:
:rtype: str
"""
return token.join(_latex_item_to_string(i, escape) for i in l)
def _latex_item_to_string(i, escape=False):
"""Uses the render method when possible, otherwise uses str.
:param i:
:param escape:
:type i: object
:type escape: bool
:return:
:rtype: str
"""
if hasattr(i, 'dumps'):
return i.dumps()
elif escape:
return str(escape_latex(i))
return str(i)
def bold(s):
"""Returns the string bold.
Source: http://stackoverflow.com/a/16264094/2570866
:param s:
:type s: str
:return:
:rtype: str
"""
return r'\textbf{' + s + '}'
def italic(s):
"""Returns the string italicized.
Source: http://stackoverflow.com/a/16264094/2570866
:param s:
:type s: str
:return:
:rtype: str
"""
return r'\textit{' + s + '}'
def verbatim(s, delimiter='|'):
"""Returns the string verbatim.
:param s:
:param delimiter:
:type s: str
:type delimiter: str
:return:
:rtype: str
"""
return r'\verb' + delimiter + s + delimiter
``` |
{
"source": "JornWildt/PyGameWorld",
"score": 3
} |
#### File: PyGameWorld/Blueprint/BallMovementSystem.py
```python
import random
from .BallMovementComponent import BallMovementComponent
from Core.Physics.PhysicsComponent import PhysicsComponent
from Core.Physics.BodyComponent import BodyComponent
class BallMovementSystem:
def __init__(self, message_bus):
message_bus.subscribe('tile_collision', BallMovementSystem.on_tile_collision)
def on_tile_collision(game_environment, p):
entity = p[0]
tile = p[1]
ball = entity.get_component_of_type(BallMovementComponent)
if ball == None:
return
body = entity.get_component_of_type(BodyComponent)
phys = entity.get_component_of_type(PhysicsComponent)
if tile.tile_type.is_blocking:
xdir = random.randint(0,1)*2-1
ydir = random.randint(0,1)*2-1
if (tile.position[0] < body.position[0]):
xdir = -1
#ydir = 1
elif (tile.position[0] > body.position[0]):
xdir = -1
#ydir = 1
elif (tile.position[1] < body.position[1]):
#xdir = 1
ydir = -1
elif (tile.position[1] > body.position[1]):
#xdir = 1
ydir = -1
body.position = body.previous_position
body.position = (
body.previous_position[0] - 2*phys.velocity[0],
body.previous_position[1] - 2*phys.velocity[1],
body.previous_position[2] - 2*phys.velocity[2])
phys.velocity = (xdir * phys.velocity[0]/2, ydir * phys.velocity[1]/2, 0)
phys.acceleration = (
xdir * phys.acceleration[0],
ydir * phys.acceleration[1],
phys.acceleration[2])
def update(self, game_environment):
maxv = 0.2
for (body,phys,mov) in game_environment.entities_repository.get_components_of_types(BodyComponent, PhysicsComponent, BallMovementComponent):
if phys.velocity[0] > maxv or phys.velocity[0] < -maxv or phys.velocity[1] > maxv or phys.velocity[1] < -maxv or phys.velocity[2] > maxv or phys.velocity[2] < -maxv:
phys.acceleration = (0,0,0)
phys.velocity = (min(maxv, phys.velocity[0]), min(maxv, phys.velocity[1]), min(maxv, phys.velocity[2]))
elif random.randint(0,50) == 0:
phys.acceleration = (
random.randint(0,100)/10000 - 0.0050,
random.randint(0,100)/10000 - 0.0050,
0)
```
#### File: PyGameWorld/Blueprint/PlatformMovementComponent.py
```python
class PlatformMovementComponent:
def __init__(self, route, pos):
super().__init__()
self.route = route
self.start_position = pos
self.leg = 0
self.count = 0
```
#### File: PyGameWorld/Blueprint/PlatformMovementSystem.py
```python
from Core.Physics.PhysicsComponent import PhysicsComponent
from Core.Physics.BodyComponent import BodyComponent
from .PlatformMovementComponent import PlatformMovementComponent
from Blueprint.Constants import Constants
class PlatformMovementSystem:
def __init__(self, message_bus):
pass
def update(self, game_environment):
for (body,phys,plat) in game_environment.entities_repository.get_components_of_types(BodyComponent, PhysicsComponent, PlatformMovementComponent):
plat.count += 1
if plat.count > plat.route[plat.leg][2]:
plat.count = 0
plat.leg = plat.leg + 1
if plat.leg >= len(plat.route):
plat.leg = 0
body.position = plat.start_position
leg = plat.route[plat.leg]
vector = Constants.direction_vectors[leg[0]]
phys.velocity = (vector[0] * leg[1], vector[1] * leg[1], vector[2] * leg[1])
```
#### File: PyGameWorld/Blueprint/PlayerFactory.py
```python
from Core.ECS.Entity import Entity
from Core.SimpleComponents.NameComponent import NameComponent
from Core.Physics.BodyComponent import BodyComponent
from Core.Physics.PhysicsComponent import PhysicsComponent
from Core.Rendering.SpriteComponent import SpriteComponent
from .PlayerMovementComponent import PlayerMovementComponent
def build_a_player(name, x,y):
player = Entity([
NameComponent(name),
BodyComponent((x,y,1), (0.5,0.5,2)),
PhysicsComponent((0,0,0), (0,0,0)),
PlayerMovementComponent(),
SpriteComponent('player_3')
], False)
return player
```
#### File: PyGameWorld/Blueprint/PlayerMovementComponent.py
```python
class PlayerMovementComponent:
def __init__(self):
self.hit_tile = None
self.prev_direction = None
self.speed = 1
```
#### File: Blueprint/Scenes/Scene_A3_Builder.py
```python
from Core.Scene.Tile import Tile
from Core.Scene.TileType import *
from .Scene_A_Builder import Scene_A_Builder
import Blueprint.PlatformFactory as PlatformFactory
from Core.Messages.NewSceneMessage import NewSceneMessage
class Scene_A3_Builder(Scene_A_Builder):
def build_teleport_end(self, symbol, pos):
self.scene.place_cube(pos[0],pos[1],0, TileType.Floor, self.floor_wall_sprite)
self.scene.place_animated_cube(pos[0],pos[1],3, TileType.Space, self.teleport_sprite)
self.place_location_event_trigger((pos[0],pos[1],3), (0.5,0.5,0.5), 'new_scene', NewSceneMessage('Scene_A1', (10,5,1)))
def build_platform(self, symbol, pos):
if symbol[1] == '1':
self.scene.place_animated_cube(pos[0],pos[1],0, TileType.Space, self.lava_sprite)
self.place_location_event_trigger((pos[0],pos[1],0), (1,1,0.5), 'new_scene', NewSceneMessage('Scene_A3', self.player_start_pos))
route = [(4, 0.03, 2.5), (0, 0.03, 2.5)]
platform = PlatformFactory.build_a_platform('Platform', (pos[0],pos[1],2), route)
if symbol[1] == '2':
self.scene.place_animated_cube(pos[0],pos[1],0, TileType.Space, self.lava_sprite)
self.place_location_event_trigger((pos[0],pos[1],0), (1,1,0.5), 'new_scene', NewSceneMessage('Scene_A3', self.player_start_pos))
route = [(0, 0.03, 2.5), (4, 0.03, 2.5)]
platform = PlatformFactory.build_a_platform('Platform', (pos[0],pos[1],2), route)
if symbol[1] == '3':
self.scene.place_animated_cube(pos[0],pos[1],0, TileType.Space, self.lava_sprite)
self.place_location_event_trigger((pos[0],pos[1],0), (1,1,0.5), 'new_scene', NewSceneMessage('Scene_A3', self.player_start_pos))
route = [(4, 0.02, 4), (2, 0.04, 3), (0,0.02,4), (6,0.04,3)]
platform = PlatformFactory.build_a_platform('Platform', (pos[0],pos[1],1), route)
if symbol[1] == '4':
self.scene.place_animated_cube(pos[0],pos[1],0, TileType.Space, self.lava_sprite)
self.place_location_event_trigger((pos[0],pos[1],0), (1,1,0.5), 'new_scene', NewSceneMessage('Scene_A3', self.player_start_pos))
route = [(2,0.04,3), (0,0.02,4), (6, 0.04, 3), (4, 0.02, 4)]
platform = PlatformFactory.build_a_platform('Platform', (pos[0],pos[1],1), route)
if symbol[1] == '5':
self.scene.place_cube(pos[0],pos[1],0, TileType.Floor, self.floor_wall_sprite)
route = [(4,0.02,2), (0,0.02,2)]
platform = PlatformFactory.build_a_platform('Platform', (pos[0],pos[1],1), route)
self.game_environment.entities_repository.add_entity(platform)
def __init__(self, game_environment):
super().__init__(game_environment)
self.define_tile_builder('E', self.build_teleport_end)
self.player_start_pos = (3,3,2)
self.scene_map = '''
x X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X X
x B * * * * * * . . . X * * * * * * * * * * * X P3* * * * * * * * B . . . X
x B B . P5. . * * * B X * * P1P1* * * * P1P1* X * * * * * * * * B B B . E X
x B B B . . . * . . B B B B B B B B B B B B B B * * * * * * * * B B B B # X
x B B . . . . * * * B X * * * * * P2P2* * * * B * * * * * * * * B B B . . X
x B * * * * * * . . . X * * * * * * * * * * * X * * * * P4* * * * B . . . X
x x x x x x x x x x x X x x x x x x x x x x x X x x x x x x x x x x x x x X
'''
```
#### File: PyGameWorld/Core/AssetsManager.py
```python
import pyganim
import json
import os
from .Rendering.SpriteSheet import SpriteSheet
from .Rendering.ExtPygAnimation import ExtPygAnimation
class AssetsManager:
def __init__(self, settings):
self.settings = settings
def __getitem__(self, name):
return self.assets[name]
def get(self, name):
return self.assets[name]
def declare(self, name, asset):
self.assets[name] = asset
def load_from_directory(self, dir):
self.assets = {}
self.assets_dir = dir
with open(dir + "/assets.json", encoding='utf-8-sig') as assets_file:
assets_json = json.load(assets_file)
for name in assets_json:
asset = assets_json[name]
if asset["type"] == "animation":
animation = self.load_animation(asset["file"], asset["rows"], asset["cols"])
self.assets[name] = animation
if asset["type"] == "spritesheet":
self.load_spritesheet(asset["file"], asset["content"])
def load_animation(self, file, rows, cols):
images = pyganim.getImagesFromSpriteSheet(os.path.join(self.assets_dir, file), rows=rows, cols=cols, rects=[])
# TODO: customizable delay
frames = list(zip(images, [100] * len(images)))
anim = pyganim.PygAnimation(frames)
anim.play()
return anim
# def load_spritesheets(self, assets_json):
# for name in assets_json:
# asset = assets_json[name]
# if asset["type"] == "spritesheet":
# self.load_spritesheet(asset["file"], asset["content"])
def load_spritesheet(self, file, content):
sheet = SpriteSheet(os.path.join(self.assets_dir, file))
for sprite_name in content:
if sprite_name in self.assets:
raise NameError("Repeated sprite name: " + sprite_name)
sprite = self.load_sprite_from_sheet(sheet, content[sprite_name])
self.assets[sprite_name] = sprite
# pyganim.getImagesFromSpriteSheet("Assets/Random/Ghost3D.png", rows=1, cols=1, rects=[])
def load_sprite_from_sheet(self, sheet, sprite_content):
if sprite_content["type"] == "image":
position = sprite_content["position"]
image = sheet.image_at(position)
return image
if sprite_content["type"] == "animation":
rectangle = sprite_content["rectangle"]
rows = sprite_content["rows"]
cols = sprite_content["cols"]
volume = sprite_content["volume"] if "volume" in sprite_content else None
return self.load_animation_from_sheet(sheet, rectangle, rows, cols, volume)
else:
raise NameError("Undefined asset type: " + sprite_content["type"])
def load_animation_from_sheet(self, sheet, rectangle, rows, cols, volume):
images =[]
for r in range(rows):
for c in range(cols):
sub_rectangle = (rectangle[0] + c * rectangle[2], rectangle[1] + r * rectangle[3], rectangle[2], rectangle[3])
image = sheet.image_at(sub_rectangle)
images.append(image)
frames = list(zip(images, [100] * len(images)))
anim = ExtPygAnimation(self.settings, frames, volume)
anim.play()
return anim
```
#### File: Core/Rendering/DisplayComponent.py
```python
class DisplayComponent:
def __init__(self, display):
self.display = display
```
#### File: Core/Rendering/SpriteSheet.py
```python
import pygame;
class SpriteSheet:
def __init__(self, filename):
"""Load the image sprite sheet from file."""
try:
self.sheet = pygame.image.load(filename).convert_alpha()
except pygame.error as e:
print(f"Unable to load sprite sheet image: {filename}")
raise SystemExit(e)
self.sprites = {}
def image_at(self, rectangle, colorkey = None):
"""Load a specific image from a specific rectangle."""
# Loads image from x, y, x+offset, y+offset.
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size, pygame.SRCALPHA)
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
return image
def define(self, name, sprite_rect):
r = pygame.Rect(sprite_rect)
image = self.image_at(r)
self.sprites[name] = image
def get(self, name):
return self.sprites[name]
```
#### File: Core/Scene/Scene.py
```python
import pygame as pygame
from .Tile import Tile
from .TileType import *
from ..Physics.BodyComponent import BodyComponent
class Scene:
def __init__(self, settings):
self.settings = settings
# Pixel multiplies for x/y/z directions
self.xmult = int(self.settings.map_tile_pixels/2)
self.ymult = int(self.settings.map_tile_pixels/4)
self.zmult = int(self.settings.map_tile_pixels/2)
# Viewport size - area (volume) of tiles (cubes) to render
self.xviewsize = settings.map_viewport_xsize
self.yviewsize = settings.map_viewport_ysize
self.zviewsize = settings.map_viewport_zsize
# Half viewport sized
self.xviewsize_2 = int(self.xviewsize/2)
self.yviewsize_2 = int(self.yviewsize/2)
self.zviewsize_2 = int(self.zviewsize/2) * 2
# These constants define size which part is cut out of the corners
# Top-left
self.corner_size_1 = int((self.xviewsize + self.yviewsize)/4) - 1
# Top-right and bottom-left
self.corner_size_2 = int((self.xviewsize + self.yviewsize)/2) - int(self.corner_size_1)/2
# Bottom-right
self.corner_size_3 = self.xviewsize + self.yviewsize - int((self.xviewsize + self.yviewsize)/4) - 1
# Center of window
self.window_x = int(self.settings.window_width/2)
self.window_y = int(self.settings.window_height/2)
def initialize(self, width, height, depth):
self.width = width
self.height = height
self.depth = depth
self.size = (self.width, self.height, self.depth)
# Tile map has optimized Z-first index
self.tile_map = [[[None for y in range(self.height)] for x in range(self.width)] for z in range(self.depth)]
self.trigger_map = [[[None for z in range(self.depth)] for y in range(self.height)] for x in range(self.width)]
def place_cube(self, x,y,z, tile_type, image):
self.tile_map[z][x][y] = Tile((x,y,z), tile_type, None, image)
def place_animated_cube(self, x,y,z, tile_type, sprite):
self.tile_map[z][x][y] = Tile((x,y,z), tile_type, sprite, None)
def start_frame(self):
#pass
self.items_index = [[[None for y in range(self.height)] for x in range(self.width)] for z in range(self.depth)]
def register_item(self, pos, size_box, sprite):
# x0 = int(pos[0] - size_box[0]/2)
# x1 = int(x0 + size_box[0]) + 1
# y0 = int(pos[1] - size_box[1]/2)
# y1 = int(y0 + size_box[1]) + 1
# z0 = int(pos[2] - size_box[2]/2)
# z1 = int(z0 + size_box[2]) + 1
x0 = int(pos[0] + 0.5)
x1 = int(x0+size_box[0] + 0.5)
y0 = int(pos[1] + 0.5)
y1 = int(y0+size_box[1] + 0.5)
z0 = int(pos[2])
z1 = int(pos[2]+size_box[2] + 0.5)
for x in range(x0,x1):
for y in range(y0,y1):
for z in range(z0,z1):
if x >= 0 and x < self.width and y >= 0 and y < self.height and z >= 0 and z < self.depth:
if self.items_index[z][x][y] == None:
self.items_index[z][x][y] = []
self.items_index[z][x][y].append(SceneItem(pos,(x-x0,y-y0,z-z0),sprite))
def get_tile_at(self, pos):
return self.tile_map[int(pos[2])][int(pos[0])][int(pos[1])]
def render(self, game_environment):
screen = game_environment.screen
center = game_environment.player_entity.get_component_of_type(BodyComponent).position
# Subtract 32 and 48 to get center of cube at (0,0,0) right at screen position 0,0
# (since sprites are offset at their top left corner)
xoffset = self.window_x - (center[0]+center[1]) * self.xmult - 32
yoffset = self.window_y -(center[1]-center[0]) * self.ymult + center[2] * self.zmult - 48
for z in range(max(0,int(center[2]-self.zviewsize_2)), min(int(center[2]+self.zviewsize_2),self.depth)):
# Take a reference to the z-index here (avoid looking it up too many times)
tile_map_z = self.tile_map[z]
for xx in range(self.xviewsize-1,-1,-1):
x = int(center[0]) - self.xviewsize_2 + xx
if x >= 0 and x < self.width:
# Take a reference to the z,x index here
tile_map_x = tile_map_z[x]
for yy in range(self.yviewsize):
if xx + yy > self.corner_size_1 and xx + yy < self.corner_size_3 and xx - yy > -self.corner_size_2 and yy - xx > -self.corner_size_2:
y = int(center[1]) - self.yviewsize_2 + yy
if y >= 0 and y < self.height:
tile = tile_map_x[y]
if tile != None:
xpos = (x+y) * self.xmult + xoffset# - 32
ypos = (y-x) * self.ymult - z * self.zmult + yoffset# - 48
if tile.image != None:
screen.blit(tile.image, (xpos,ypos))
elif tile.sprite != None:
tile.sprite.blit(screen, (xpos,ypos))
items = self.items_index[z][x][y]
for item in items if items != None else []:
# Items are supposed to be slices into cubes/tiles matching the map tiles, and
# each item is registered at a cube/tile location together with that cube's
# offset relative to the item's origin position.
ix = item.pos[0] + item.offset[0]
iy = item.pos[1] + item.offset[1]
iz = item.pos[2] + item.offset[2]
xpos = (ix+iy) * self.xmult + xoffset# - 32
ypos = (iy-ix) * self.ymult - iz * self.zmult + yoffset# - 48
item.sprite.blit(screen, (xpos,ypos), item.offset)
# Indicate tile (0,0,0) position with a 3x3 square
pygame.draw.rect(screen, (0,128,0,128), (xoffset-1+32,yoffset+48-1,3,3), 1)
# Indicate window (0,0,0) position with a 3x3 square
# pygame.draw.rect(screen, (0,128,0,128), (self.window_x-1,self.window_y-1,3,3), 1)
class SceneItem:
def __init__(self, pos, offset, sprite):
self.pos = pos
self.offset = offset
self.sprite = sprite
```
#### File: Core/Scene/TileType.py
```python
class TileType:
def __init__(self, name, is_blocking):
self.name = name
self.is_blocking = is_blocking
TileType.Sea = TileType('sea', True)
TileType.Grass = TileType('grass', False)
TileType.Floor = TileType('floor', True)
TileType.Wall = TileType('wall', True)
TileType.Space = TileType('space', False)
```
#### File: Core/Utility/Numbers.py
```python
def clamp(minimum, value, maximum):
return min(maximum, value, max(minimum, value))
``` |
{
"source": "joro2404/ApacheDeployment",
"score": 4
} |
#### File: joro2404/ApacheDeployment/database.py
```python
import sqlite3
DB_NAME = 'calc.db'
conn = sqlite3.connect(DB_NAME)
conn.cursor().execute('''
CREATE TABLE IF NOT EXISTS calc
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
num1 DOUBLE(1, 2) NOT NULL,
num2 DOUBLE(1, 2) NOT NULL,
result DOUBLE(1, 2) NOT NULL
)
''')
conn.commit()
class DB:
def __enter__(self):
self.conn = sqlite3.connect(DB_NAME)
return self.conn.cursor()
def __exit__(self, type, value, traceback):
self.conn.commit()
``` |
{
"source": "joro2404/DriveAI",
"score": 3
} |
#### File: DriveAI/app/auth.py
```python
from flask import Blueprint, render_template, request, redirect, url_for, flash
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_user, logout_user, login_required, current_user
from .models import User
from . import db, secret_key
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
auth = Blueprint('auth', __name__)
serializer = URLSafeTimedSerializer(secret_key)
@auth.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
flash('You are already logged in!', 'danger')
return redirect(url_for('main.index'))
else:
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
email = request.form.get('email')
password = <PASSWORD>('password')
remember = True if request.form.get('remember') else False
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash('Please check your login credentials!', 'danger')
return redirect(url_for('auth.login'))
if not user.is_confirmed:
flash('Please confirm your email!', 'warning')
return redirect(url_for('auth.login'))
login_user(user, remember=remember)
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
flash('You are logged in!', 'danger')
return redirect(url_for('main.index'))
else:
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
email = request.form.get('email')
username = request.form.get('username')
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
password = request.form.get('password')
confirm_password = request.form.get('password_confirm')
user = User.query.filter_by(email=email).first()
if user:
flash('Email address already registered!', 'danger')
return redirect(url_for('auth.register'))
if password != confirm_password:
flash('Password missmatch!', 'danger')
return redirect(url_for('auth.register'))
new_user = User(id=None, username=username, first_name=first_name, last_name=last_name, email=email, password=generate_password_hash(password, method='sha256'), is_admin=False, is_confirmed=False, phone_number=None)
db.session.add(new_user)
db.session.commit()
return redirect(url_for('auth.login'))
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index'))
```
#### File: DriveAI/app/main.py
```python
from flask import Flask, render_template, request, redirect, url_for, Blueprint
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/drivers')
def drivers():
return render_template('drivers.html')
@main.route('/statistics')
def statistics():
return render_template('statistics.html')
if __name__ == '__main__':
main.app.run()
``` |
{
"source": "joro2404/Programming_problems",
"score": 3
} |
#### File: Flask/intro/post.py
```python
from database import DB
class Post:
def __init__(self, id, name, author, content):
self.id = id
self.name = name
self.author = author
self.content = content
@staticmethod
def all():
with DB() as db:
rows = db.execute('SELECT * FROM posts').fetchall()
return [Post(*row) for row in rows]
@staticmethod
def fid(id):
with DB() as db:
rows = db.execute('SELECT * FROM posts WHERE id = ?', (id,)).fetchone()
return Post(*row)
def create(self):
with DB() as db:
values = (self.name, self.author, self.content)
rows = db.execute('INSERT INTO posts(name, author, content) VALUES (?, ?, ?)', values)
return self
```
#### File: python/OOP/multiple_inheritance.py
```python
class TeamMember(object):
def __init__(self, name, uid):
self.name = name
self.uid = uid
class Leader(object):
def __init__(self, skill, jobtitle):
self.skill = skill
self.jobtitle = jobtitle
class TeamLeader(TeamMember, Leader):
def __init__(self, name, uid, skill, jobtitle, exp):
self.exp = exp
TeamMember.__init__(self, name, uid)
Leader.__init__(self, skill, jobtitle)
print("Name: {}, Skill: {}, Exp: {}".format(self.name, self.skill, self.exp))
TL = TeamLeader('Jake', 1001, "Couching", 'Scrum Master', 5)
```
#### File: python/OOP/override.py
```python
class base(object):
def base_func(self):
print('Method of base class')
class child(base):
def base_func1(self):
print('Method of child class')
super(child, self).base_func()
class next_child(child):
def base_func(self):
print('Method of next_child class')
super(next_child, self).base_func()
obj = next_child()
obj.base_func()
```
#### File: python/OOP/students.py
```python
class Person :
def __init__(self, address):
self.address = address
class Student(Person) :
def __init__(self, id_in_class, marks, computer):
self.id_in_class = id_in_class
self.marks = marks
self.computer = computer
def addMark(self, mark):
marks.append(mark)
def getAverage(self):
avg = 0
k = 0
for i in marks:
avg += i
k += 1
return avg/k
p = Person("bul. Kopenhagen 38")
marks = [3, 6, 6, 5]
s = Student(10, marks, "wtf")
s.addMark(6)
print(s.getAverage())
```
#### File: python/OOP/vector.py
```python
import math
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def _coords(self):
return (self.x, self.y, self.z)
def length(self):
return sum(_ ** 2 for _ in self._coords()) ** 0.5
v1 = Vector(1.0, 2.0, 3.0)
v2 = Vector(4.0, 5.0, 6.0)
v3 = Vector(7.0, 8.0, 9.0)
print(*list(map(Vector.length, [v1, v2, v3])), sep = '\n')
```
#### File: python/zadachi/hello.py
```python
class Human:
def __init__(self, personal_id, height, name, age, interest):
self.personal_id = personal_id
self.height = height
self.color_of_eyes = color_of_eyes
self.name = name
self.age = age
self.interest = interest
def getOlder(self):
return age += 1
def meetLove(slef, humans):
for i in range(0, human.len()):
if(interest == humans[i].intereste):
return "Love of life met!\n"
class Profesion:
def __init__(slef, type_of_profession, skill):
self.type_of_profession = type_of_profession
self.skill = skill
def getExp(self):
skill += 1/40
``` |
{
"source": "joro75/mytoyota",
"score": 2
} |
#### File: mytoyota/mytoyota/controller.py
```python
from __future__ import annotations
from datetime import datetime
from http import HTTPStatus
import logging
from typing import Any
import httpx
from mytoyota.const import (
BASE_HEADERS,
CUSTOMERPROFILE,
ENDPOINT_AUTH,
SUPPORTED_REGIONS,
TIMEOUT,
TOKEN,
TOKEN_DURATION,
TOKEN_VALID_URL,
UUID,
)
from mytoyota.exceptions import ToyotaApiError, ToyotaInternalError, ToyotaLoginError
from mytoyota.utils.logs import censor_dict
from mytoyota.utils.token import is_valid_token
_LOGGER: logging.Logger = logging.getLogger(__package__)
class Controller:
"""Controller class."""
_token: str | None = None
_token_expiration: datetime | None = None
def __init__(
self,
locale: str,
region: str,
username: str,
password: str,
uuid: str | None = None,
) -> None:
self._locale = locale
self._region = region
self._username = username
self._password = password
self._uuid = uuid
@property
def _auth_endpoint(self) -> str:
"""Returns auth endpoint."""
return SUPPORTED_REGIONS[self._region].get(ENDPOINT_AUTH)
@property
def _auth_valid_endpoint(self) -> str:
"""Returns token is valid endpoint."""
return SUPPORTED_REGIONS[self._region].get(TOKEN_VALID_URL)
@property
def uuid(self) -> str | None:
"""Return uuid."""
return self._uuid
async def first_login(self) -> None:
"""Perform first login."""
await self._update_token()
@staticmethod
def _has_expired(creation_dt: datetime, duration: int) -> bool:
"""Checks if an specified token/object has expired"""
_LOGGER.debug("Checking if token has expired...")
return datetime.now().timestamp() - creation_dt.timestamp() > duration
async def _update_token(self, retry: bool = True) -> None:
"""Performs login to toyota servers and retrieves token and uuid for the account."""
# Cannot authenticate with aiohttp (returns 415),
# but it works with httpx.
_LOGGER.debug("Getting new token...")
async with httpx.AsyncClient() as client:
response = await client.post(
self._auth_endpoint,
headers={"X-TME-LC": self._locale},
json={"username": self._username, "password": self._password},
)
if response.status_code == HTTPStatus.OK:
result: dict[str, Any] = response.json()
if TOKEN not in result or UUID not in result[CUSTOMERPROFILE]:
raise ToyotaLoginError("Could not get token or UUID from result")
_LOGGER.debug("Extracting token from result")
token = result.get(TOKEN)
if is_valid_token(token):
_LOGGER.debug("Token is the correct format")
self._uuid = result[CUSTOMERPROFILE].get(UUID)
self._token = token
_LOGGER.debug("Saving token and uuid")
self._token_expiration = datetime.now()
elif response.status_code == HTTPStatus.BAD_GATEWAY:
if retry:
await self._update_token(retry=False)
return
raise ToyotaApiError("Servers are overloaded, try again later")
else:
raise ToyotaLoginError(
f"Login failed, check your credentials! {response.text}"
)
async def _is_token_valid(self, retry: bool = True) -> bool:
"""Checks if token is valid"""
_LOGGER.debug("Checking if token is still valid...")
async with httpx.AsyncClient() as client:
response = await client.post(
self._auth_valid_endpoint,
json={TOKEN: self._token},
)
if response.status_code == HTTPStatus.OK: # pylint: disable=no-else-return
result: dict[str, Any] = response.json()
if result.get("valid") is True:
_LOGGER.debug("Token is still valid")
return True
_LOGGER.debug("Token is not valid anymore")
return False
elif response.status_code == HTTPStatus.BAD_GATEWAY:
if retry:
return await self._is_token_valid(retry=False)
raise ToyotaApiError("Servers are overloaded, try again later")
else:
raise ToyotaLoginError(
f"Error when trying to check token: {response.text}"
)
async def request( # pylint: disable=too-many-branches
self,
method: str,
endpoint: str,
base_url: str | None = None,
body: dict[str, Any] | None = None,
params: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
) -> dict[str, Any] | list[Any] | None:
"""Shared request method"""
if headers is None:
headers = {}
if method not in ("GET", "POST", "PUT", "DELETE"):
raise ToyotaInternalError("Invalid request method provided")
if not self._token or self._has_expired(self._token_expiration, TOKEN_DURATION):
if not await self._is_token_valid():
await self._update_token()
if base_url:
url = SUPPORTED_REGIONS[self._region].get(base_url) + endpoint
else:
url = endpoint
_LOGGER.debug("Constructing additional headers...")
headers.update(
{
"X-TME-LC": self._locale,
"X-TME-LOCALE": self._locale,
"X-TME-TOKEN": self._token,
"X-TME-APP-VERSION": "4.10.0",
}
)
if method in ("GET", "POST"):
headers.update(
{
"Cookie": f"iPlanetDirectoryPro={self._token}",
"uuid": self.uuid,
}
)
_LOGGER.debug(f"Additional headers: {censor_dict(headers.copy())}")
# Cannot authenticate with aiohttp (returns 415),
# but it works with httpx.
_LOGGER.debug("Creating client...")
_LOGGER.debug(f"Base headers: {BASE_HEADERS} - Timeout: {TIMEOUT}")
async with httpx.AsyncClient(headers=BASE_HEADERS, timeout=TIMEOUT) as client:
_LOGGER.debug(
f"Body: {censor_dict(body) if body else body} - Parameters: {params}"
)
response = await client.request(
method, url, headers=headers, json=body, params=params
)
if response.status_code == HTTPStatus.OK:
result = response.json()
elif response.status_code == HTTPStatus.NO_CONTENT:
# This prevents raising or logging an error
# if the user have not setup Connected Services
result = None
_LOGGER.debug("Connected services is disabled")
elif response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR:
response = response.json()
if "code" in response:
error = ToyotaApiError(
"Internal server error occurred! Code: "
+ response.get("code")
+ " - "
+ response.get("message"),
)
else:
error = ToyotaApiError(
"Internal server error occurred! - " + response
)
raise error
elif response.status_code == HTTPStatus.BAD_GATEWAY:
raise ToyotaApiError("Servers are overloaded, try again later")
elif response.status_code == HTTPStatus.SERVICE_UNAVAILABLE:
raise ToyotaApiError("Servers are temporarily unavailable")
else:
raise ToyotaApiError(
"HTTP: " + str(response.status_code) + " - " + response.text
)
return result
```
#### File: mytoyota/models/hvac.py
```python
from __future__ import annotations
from typing import Any
from mytoyota.models.data import VehicleData
def get_attr_in_dict(data: dict[str, float], attr: str) -> float | None:
"""Get a specific attribute from a dict"""
return data.get(attr)
class Hvac(VehicleData):
"""HVAC data model."""
def __init__(self, data: dict[str, Any], legacy: bool = False) -> None:
# Support legacy method. Toyota seems to be changing their api for newer
# cars, though not a lot seems to use the method yet.
# This option enables support for older cars.
super().__init__(data)
self.legacy = legacy
@property
def current_temperature(self) -> float | None:
"""Current temperature."""
if self.legacy:
return self._data.get("InsideTemperature")
return get_attr_in_dict(
self._data.get("currentTemperatureIndication", {}), "value"
)
@property
def target_temperature(self) -> float | None:
"""Target temperature."""
if self.legacy:
return self._data.get("SettingTemperature")
return get_attr_in_dict(self._data.get("targetTemperature", {}), "value")
@property
def started_at(self) -> str | None:
"""Hvac started at."""
if self.legacy:
return None
return self._data.get("startedAt")
@property
def status(self) -> str | None:
"""Hvac status."""
if self.legacy:
return None
return self._data.get("status")
@property
def type(self) -> str | None:
"""Hvac type."""
if self.legacy:
return None
return self._data.get("type")
@property
def duration(self) -> str | None:
"""Hvac duration."""
if self.legacy:
return None
return self._data.get("duration")
@property
def options(self) -> dict | list | None:
"""Hvac options."""
if self.legacy:
return None
return self._data.get("options")
@property
def command_id(self) -> str | int | None:
"""Hvac command id."""
if self.legacy:
return None
return self._data.get("commandId")
@property
def front_defogger_is_on(self) -> bool | None:
"""If the front defogger is on."""
if self.legacy:
return self._data.get("FrontDefoggerStatus") == 1
return None
@property
def rear_defogger_is_on(self) -> bool | None:
"""If the rear defogger is on."""
if self.legacy:
return self._data.get("RearDefoggerStatus") == 1
return None
@property
def blower_on(self) -> int | None:
"""Hvac blower setting."""
if self.legacy:
return self._data.get("BlowerStatus")
return None
@property
def last_updated(self) -> str | None:
"""Hvac last updated."""
if self.legacy:
return None
return get_attr_in_dict(
self._data.get("currentTemperatureIndication", {}), "timestamp"
)
```
#### File: mytoyota/models/location.py
```python
from __future__ import annotations
from mytoyota.models.data import VehicleData
class ParkingLocation(VehicleData):
"""Parking Location data model."""
@property
def latitude(self) -> float:
"""Latitude."""
return float(self._data.get("lat", 0.0))
@property
def longitude(self) -> float:
"""Longitude."""
return float(self._data.get("lon", 0.0))
@property
def timestamp(self) -> int | None:
"""Timestamp."""
return self._data.get("timestamp")
```
#### File: mytoyota/tests/test_hvac.py
```python
from mytoyota.models.hvac import Hvac
# pylint: disable=no-self-use
class TestHvac:
"""pytest functions to test Hvac"""
@staticmethod
def _create_example_data():
"""Create hvac with predefined data"""
return Hvac(
{
"currentTemperatureIndication": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 22,
},
"targetTemperature": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 21,
},
"startedAt": "",
"status": "",
"type": "",
"duration": 1,
"options": {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
},
"commandId": "",
}
)
@staticmethod
def _create_example_legacy_data():
"""Create legacy hvac with predefined data"""
return Hvac(
{
"BlowerStatus": 0,
"FrontDefoggerStatus": 0,
"InsideTemperature": 22,
"LatestAcStartTime": "2020-10-16T03:50:15Z",
"RearDefoggerStatus": 0,
"RemoteHvacMode": 0,
"RemoteHvacProhibitionSignal": 1,
"SettingTemperature": 21,
"TemperatureDisplayFlag": 0,
"Temperaturelevel": 29,
},
legacy=True,
)
def test_hvac(self):
"""Test Hvac"""
hvac = self._create_example_data()
assert hvac.legacy is False
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.started_at == ""
assert hvac.status == ""
assert hvac.type == ""
assert hvac.duration == 1
assert hvac.command_id == ""
assert isinstance(hvac.options, dict)
assert hvac.options == {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
}
assert hvac.last_updated == "2020-10-16T03:50:15Z"
assert hvac.front_defogger_is_on is None
assert hvac.rear_defogger_is_on is None
assert hvac.blower_on is None
def test_hvac_legacy(self):
"""Test legacy Hvac"""
hvac = self._create_example_legacy_data()
assert hvac.legacy is True
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.blower_on == 0
assert hvac.front_defogger_is_on is False
assert hvac.rear_defogger_is_on is False
assert hvac.last_updated is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.options is None
assert hvac.command_id is None
def test_hvac_no_data(self):
"""Test Hvac with no initialization data"""
hvac = Hvac({})
assert hvac.legacy is False
assert hvac.current_temperature is None
assert hvac.target_temperature is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.command_id is None
assert hvac.options is None
assert hvac.last_updated is None
```
#### File: mytoyota/tests/test_sensors.py
```python
import json
import os
from mytoyota.models.sensors import (
Door,
Doors,
Key,
Light,
Lights,
Sensors,
Window,
Windows,
)
# pylint: disable=no-self-use
class TestSensors: # pylint: disable=too-many-public-methods
"""pytest functions to test Sensors"""
@staticmethod
def _load_from_file(filename: str):
"""Load a data structure from the specified JSON filename, and
return it."""
with open(filename, encoding="UTF-8") as json_file:
return json.load(json_file)
def test_hood(self):
"""Test hood"""
hood = Door({"warning": False, "closed": True})
assert hood.warning is False
assert hood.closed is True
assert hood.locked is None
def test_hood_no_data(self):
"""Test hood with no initialization data"""
hood = Door({})
assert hood.warning is None
assert hood.closed is None
assert hood.locked is None
@staticmethod
def _create_example_door():
"""Create a door with predefined data"""
return Door({"warning": False, "closed": True, "locked": False})
def test_door(self):
"""Test door"""
door = self._create_example_door()
assert door.warning is False
assert door.closed is True
assert door.locked is False
def test_door_no_data(self):
"""Test door with no initialization data"""
door = Door({})
assert door.warning is None
assert door.closed is None
assert door.locked is None
def test_doors(self):
"""Test Doors"""
doors = {
"warning": False,
"driverSeatDoor": {"warning": False, "closed": True, "locked": False},
"passengerSeatDoor": {"warning": False, "closed": True, "locked": False},
"rearRightSeatDoor": {"warning": False, "closed": True, "locked": False},
"rearLeftSeatDoor": {"warning": False, "closed": True, "locked": False},
"backDoor": {"warning": False, "closed": True, "locked": False},
}
doors = Doors(doors)
assert doors.warning is False
assert isinstance(doors.driver_seat, Door)
assert isinstance(doors.passenger_seat, Door)
assert isinstance(doors.leftrear_seat, Door)
assert isinstance(doors.rightrear_seat, Door)
assert isinstance(doors.trunk, Door)
def test_doors_no_data(self):
"""Test Windows with no initialization data"""
doors = Doors({})
assert doors.warning is None
assert isinstance(doors.driver_seat, Door)
assert isinstance(doors.passenger_seat, Door)
assert isinstance(doors.leftrear_seat, Door)
assert isinstance(doors.rightrear_seat, Door)
assert isinstance(doors.trunk, Door)
@staticmethod
def _create_example_window():
"""Create a window with predefined data"""
return Window({"warning": False, "state": "close"})
def test_window(self):
"""Test window"""
window = self._create_example_window()
assert window.warning is False
assert window.state == "close"
def test_window_no_data(self):
"""Test window with no initialization data"""
window = Window({})
assert window.warning is None
assert window.state is None
def test_windows(self):
"""Test Windows"""
windows = {
"warning": False,
"driverSeatWindow": {"warning": False, "state": "close"},
"passengerSeatWindow": {"warning": False, "state": "close"},
"rearRightSeatWindow": {"warning": False, "state": "close"},
"rearLeftSeatWindow": {"warning": False, "state": "close"},
}
windows = Windows(windows)
assert windows.warning is False
assert isinstance(windows.driver_seat, Window)
assert isinstance(windows.passenger_seat, Window)
assert isinstance(windows.rightrear_seat, Window)
assert isinstance(windows.leftrear_seat, Window)
def test_windows_no_data(self):
"""Test Windows with no initialization data"""
windows = Windows({})
assert windows.warning is None
assert isinstance(windows.driver_seat, Window)
assert isinstance(windows.passenger_seat, Window)
assert isinstance(windows.rightrear_seat, Window)
assert isinstance(windows.leftrear_seat, Window)
@staticmethod
def _create_example_light():
"""Create a example light with predefined data."""
return Light({"warning": False, "off": True})
def test_light(self):
"""Test light"""
light = self._create_example_light()
assert light.warning is False
assert light.off is True
def test_light_no_data(self):
"""Test light with no initialization data"""
light = Light({})
assert light.warning is None
assert light.off is None
def test_lights(self):
"""Test ligts"""
lights = {
"warning": False,
"headLamp": {"warning": False, "off": True},
"tailLamp": {"warning": False, "off": True},
"hazardLamp": {"warning": False, "off": True},
}
lights = Lights(lights)
assert lights.warning is False
assert isinstance(lights.headlights, Light)
assert isinstance(lights.taillights, Light)
assert isinstance(lights.hazardlights, Light)
def test_lights_no_data(self):
"""Test Lights with no initialization data"""
lights = Lights({})
assert lights.warning is None
assert isinstance(lights.headlights, Light)
assert isinstance(lights.taillights, Light)
assert isinstance(lights.hazardlights, Light)
def test_key(self):
"""Test key"""
key = Key({"warning": False, "inCar": True})
assert key.warning is False
assert key.in_car is True
def test_key_no_data(self):
"""Test key with no initialization data"""
key = Key({})
assert key.warning is None
assert key.in_car is None
def test_sensors(self):
"""Test sensors"""
data_files = os.path.join(os.path.curdir, "tests", "data")
fixture = self._load_from_file(
os.path.join(data_files, "vehicle_JTMW1234565432109_status.json")
)
sensors = Sensors(fixture.get("protectionState"))
assert sensors.overallstatus == "OK"
assert sensors.last_updated == "2021-10-12T15:22:53Z"
assert isinstance(sensors.doors, Doors)
assert sensors.doors.driver_seat.warning is False
assert sensors.doors.driver_seat.closed is True
assert sensors.doors.driver_seat.locked is True
assert isinstance(sensors.hood, Door)
assert sensors.hood.warning is False
assert sensors.hood.closed is True
assert sensors.hood.locked is None
assert isinstance(sensors.lights, Lights)
assert sensors.lights.headlights.warning is False
assert sensors.lights.headlights.off is True
assert isinstance(sensors.windows, Windows)
assert sensors.windows.passenger_seat.warning is False
assert sensors.windows.passenger_seat.state == "close"
assert isinstance(sensors.key, Key)
assert isinstance(sensors.raw_json, dict)
assert sensors.raw_json == fixture.get("protectionState")
``` |
{
"source": "joro75/pre-commit-hooks",
"score": 3
} |
#### File: pre-commit-hooks/pre_commit_hooks/check_successful_c_msbuild.py
```python
import argparse
import datetime
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
class DetectedProblem:
"""Class that contains a single instance of a detected problem"""
def __init__(
self, buildtype: str, project: str,
build: bool = True, outdated: bool = False,
):
self.buildtype = buildtype
self.project = project
self.build = build
self.outdated = outdated
def __hash__(self) -> int:
return hash((self.buildtype, self.project, self.build, self.outdated))
def __eq__(self, other: object) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return (
self.buildtype == other.buildtype and
self.project == other.project and
self.build == other.build and
self.outdated == other.outdated
)
def report(self) -> None:
"""Reports the problem that is detected"""
if not self.build:
print(
f'Unsuccessful build for {self.buildtype} '
f'in {self.project}.',
)
if self.outdated:
print(
f'Build for {self.buildtype} in {self.project} '
f'is older than the file.',
)
def get_file_modified_time(filename: Path) -> datetime.datetime:
"""Determine the file modified time of the passed file."""
return datetime.datetime.fromtimestamp(filename.stat().st_mtime)
def get_included_files_from_project(project_file: Path) -> List[Path]:
"""Gets a list of all the files that are included by the passed
projectfile."""
files = []
# Load the file
tree = ET.parse(str(project_file))
root = tree.getroot()
if root:
namespace = 'http://schemas.microsoft.com/developer/msbuild/2003'
ns = {'msbuild': namespace}
# Use the XPath expression to find all nodes
# with an 'Include' attribute
items = root.findall(
'./msbuild:ItemGroup/'
'*[@Include]', ns,
)
for item in items:
if item.tag != f'{{{namespace}}}None':
include_file = item.attrib['Include']
if include_file:
files.append(project_file.parent.joinpath(include_file))
return files
def build_directory_check_list(files: List[Path]) -> Dict[
Path, List[Tuple[Path, datetime.datetime]],
]:
"""Builds the list of directories that should be checked based on
the passed list of files."""
dirs: Dict[Path, List[Tuple[Path, datetime.datetime]]] = {}
for file in files:
if file.exists():
file_date = get_file_modified_time(file)
for checkdir in file.parents:
# Retrieve the directory from the dictionary
# Which is a list of file/change-date pairs
dir_data = dirs.get(checkdir, [])
dir_data.append((file, file_date))
dirs[checkdir] = dir_data
return dirs
def build_project_check_list(
dirs: Dict[
Path, List[
Tuple[
Path,
datetime.datetime,
]
],
],
) -> Dict[
Path, datetime.datetime,
]:
"""Builds the list of the MS VS project files that should be checked
based on the passed list of files."""
projects: Dict[Path, datetime.datetime] = {}
for dir in dirs:
if dirs[dir]:
for project_file in dir.glob('*.vcxproj'):
included_files = get_included_files_from_project(project_file)
if included_files:
for filename, change_date in dirs[dir]:
if filename in included_files:
date_check = projects.get(
project_file,
datetime.datetime
(1900, 1, 1),
)
if change_date > date_check:
projects[project_file] = change_date
return projects
def check_if_projects_build(
projects: Dict[Path, datetime.datetime],
buildtypes: List[str],
) -> Set[DetectedProblem]:
"""Checks for the passed list of projects and build types if
the output is build."""
problems: Set[DetectedProblem] = set()
for project_file in projects:
file_change_date = projects[project_file]
for build in buildtypes:
dir = project_file.parent
failed_files = dir.glob(f'**/{build}/*.tlog/unsuccessfulbuild')
for buildfile in failed_files:
# The project name is the stem (without the .tlog)
# of the parent directory
project = buildfile.parent.stem
problems.add(
DetectedProblem(
build, project, build=False,
),
)
latest_files = dir.glob(f'**/{build}/*.tlog/*.lastbuildstate')
for latest_file in latest_files:
build_date = get_file_modified_time(latest_file)
if build_date <= file_change_date:
# The project name is the stem (without the .tlog)
# of the parent directory
project = latest_file.parent.stem
problems.add(
DetectedProblem(
build, project, outdated=True,
),
)
return problems
def check_builds_for_files(files: List[Path], buildtypes: List[str]) -> int:
"""Check if for the passed files the passed buildtypes are
successfully build."""
dirs = build_directory_check_list(files)
projects = build_project_check_list(dirs)
problems = check_if_projects_build(projects, buildtypes)
retval = 0
for problem in problems:
problem.report()
retval += 1
return retval
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check.')
parser.add_argument(
'--buildtype', action='append',
help='Build types that should be checked.',
)
args = parser.parse_args(argv)
buildtypes = list(args.buildtype or ('Release',))
files = []
curdir = Path()
for filename in args.filenames:
files.append(curdir.joinpath(filename))
problem_count = check_builds_for_files(
files,
buildtypes,
)
return problem_count
if __name__ == '__main__': # pragma: no cover
exit(main())
``` |
{
"source": "jorobledo/StratoPy",
"score": 3
} |
#### File: StratoPy/stratopy/core.py
```python
import math
import numpy as np
def geo2grid(lat, lon, nc):
# Apply scale and offset
xscale, xoffset = nc["x"].scale_factor, nc["x"].add_offset
yscale, yoffset = nc["y"].scale_factor, nc["y"].add_offset
x, y = latlon2xy(lat, lon)
col = (x - xoffset) / xscale
lin = (y - yoffset) / yscale
return int(lin), int(col)
def latlon2xy(lat, lon):
# goes_imagery_projection:semi_major_axis
req = 6378137 # meters
# goes_imagery_projection:inverse_flattening
# invf = 298.257222096
# goes_imagery_projection:semi_minor_axis
rpol = 6356752.31414 # meters
e = 0.0818191910435
# goes_imagery_projection:perspective_point_height
# + goes_imagery_projection:semi_major_axis
H = 42164160 # meters
# goes_imagery_projection: longitude_of_projection_origin
lambda0 = -1.308996939
# Convert to radians
latRad = lat * (math.pi / 180)
lonRad = lon * (math.pi / 180)
# (1) geocentric latitude
Phi_c = math.atan(((rpol ** 2) / (req ** 2)) * math.tan(latRad))
# (2) geocentric distance to the point on the ellipsoid
rc = rpol / (
math.sqrt(1 - ((e ** 2) * (math.cos(Phi_c) * math.cos(Phi_c))))
)
# (3) sx
sx = H - (rc * math.cos(Phi_c) * math.cos(lonRad - lambda0))
# (4) sy
sy = -rc * math.cos(Phi_c) * math.sin(lonRad - lambda0)
# (5)
sz = rc * math.sin(Phi_c)
# x,y
x = math.asin((-sy) / math.sqrt((sx * sx) + (sy * sy) + (sz * sz)))
y = math.atan(sz / sx)
return x, y
def scan2sat(x, y, lon0=-75.0, Re=6378000.0, Rp=6356000.0, h=3600000.0):
"""
Transforma coordenadas de scaneo geostacionarias x,y
en coordenadas cartesianas con origen en el satelite sx,sy,sz
En base a 5.2.8.1 de PUG3
Parameters
----------
x : float, float arr numpy.ma.core.MaskedArray
coordenada horizontal, en radianes
y : float, float arr numpy.ma.core.MaskedArray
coordenada vertical, en radianes. Paralelo al eje terrestre
longitud
lon0 : float
longitud del satélite y origen del sistema de coordenadas planas
Re: float
radio ecuatorial, en m
Rp: float
radio polar, en m
h: float
altura del satélite respecto de la superficie, en m
Returns
-------
sx : float, float arr
coordenada hacia el centro de la tierra
sy : float, float arr
coordenada horizontal
sz : float, float arr
coordenada vertical
"""
if (
str(type(x))[8:-2] != "numpy.ma.core.MaskedArray"
or str(type(y))[8:-2] != "numpy.ma.core.MaskedArray"
):
x = np.ma.MaskedArray(x)
y = np.ma.MaskedArray(y)
# print ("cambia el tipo")
mask = x.mask
H = Re + h # radio orbital del satelite
a = np.sin(x) ** 2 + np.cos(x) ** 2 * (
np.cos(y) ** 2 + (np.sin(y) * Re / Rp) ** 2
)
b = -2 * H * np.cos(x) * np.cos(y)
c = H ** 2 - Re ** 2
aux = b ** 2 - 4 * a * c
rs = np.zeros(aux.shape)
sx = np.ma.MaskedArray(np.zeros(aux.shape), mask)
sy = np.ma.MaskedArray(np.zeros(aux.shape), mask)
sz = np.ma.MaskedArray(np.zeros(aux.shape), mask)
rs[aux >= 0] = -(b[aux >= 0] + np.sqrt(aux[aux >= 0])) / (2 * a[aux >= 0])
sx[aux >= 0] = rs[aux >= 0] * np.cos(x[aux >= 0]) * np.cos(y[aux >= 0])
sy[aux >= 0] = -rs[aux >= 0] * np.sin(x[aux >= 0])
sz[aux >= 0] = rs[aux >= 0] * np.cos(x[aux >= 0]) * np.sin(y[aux >= 0])
return sx, sy, sz
def sat2latlon(
sx, sy, sz, lon0=-75.0, Re=6378000.0, Rp=6356000.0, h=3600000.0
):
"""
Transforma coordenadas cartesianas con origen
en el satelite sx,sy,sz
en coordenadas de latitud/longitud
En base a 5.2.8.1 de PUG3
Parameters
----------
sx : float, float arr
coordenada hacia el centro de la tierra
sy : float, float arr
coordenada horizontal
sz : float, float arr
coordenada vertical
lon0 : float
longitud del satélite y origen del sistema de coordenadas planas
Re: float
radio ecuatorial, en m
Rp: float
radio polar, en m
h: float
altura del satélite respecto de la superficie, en m
Returns
-------
lat : float, float arr
latitud
lon : float, float arr
longitud
"""
H = Re + h # radio orbital del satelite
gr2rad = np.pi / 180
lat = (
np.arctan((Re / Rp) ** 2 * sz / np.sqrt((H - sx) ** 2 + sy ** 2))
/ gr2rad
)
lon = lon0 - np.arctan(sy / (H - sx)) / gr2rad
return lat, lon
def latlon2scan(
lat, lon, lon0=-75.0, Re=6378000.0, Rp=6356000.0, h=36000000.0
):
"""
Transforma coordenadas de latitud/longitud
a x/y en proyeccion geoestacionaria
En base a 5.2.8.2 de PUG3
Parameters
----------
lat: float, float arr
latitud
lon: float, float arr
longitud
lon0: float, float arr
longitud del satélite y origen del sistema de coordenadas planas
Re: float,
radio ecuatorial, en m
Rp: float
radio polar, en m
h: float
altura del satélite respecto de la superficie, en m
Returns
-------
x : float, float arr
coordenada horizontal, en radianes
y : float, float arr
coordenada vertical, en radianes. Paralelo al eje terrestre
"""
H = Re + h # radio orbital del satelite
e = (1 - (Rp / Re) ** 2) ** 0.5 # 0.0818191910435 # excentricidad
gr2rad = np.pi / 180
latc = np.arctan((Rp / Re) ** 2 * np.tan(lat * gr2rad))
rc = Rp / (1 - (e * np.cos(latc)) ** 2) ** 0.5
sx = H - rc * np.cos(latc) * np.cos((lon - lon0) * gr2rad)
sy = -rc * np.cos(latc) * np.sin((lon - lon0) * gr2rad)
sz = rc * np.sin(latc)
s_norm = np.sqrt(sx ** 2 + sy ** 2 + sz ** 2)
x = np.arcsin(-sy / s_norm)
y = np.arctan(sz / sx)
return y, x
def colfil2scan(col, fil, x0, y0, scale):
"""
Transforma filas/columnas de la imagen a x/y en proyeccion geoestacionaria
En base a 5.2.8.2 de PUG3
Parameters
----------
col : int, int arr
columna
fil : int, int arr
fila
x0 : float
posición del x[0] en radianes
y0 : float
coordenada horizontal del primer punto, en radianes.
Paralelo al eje terrestre
scale : float
tamaño del pixel en radianes
Returns
-------
x : float, float arr
coordenada horizontal, en radianes.
y : float, float arr
coordenada vertical, en radianes. Paralelo al eje terrestre
"""
x = col * scale + x0
y = -fil * scale + y0
return x, y
def scan2colfil(x, y, x0, y0, scale, tipo=0):
"""
Transforma de x/y en proyeccion geoestacionaria a
En base a 5.2.8.2 de PUG3
Parameters
----------
x : float, float arr
coordenada vertical, en radianes
x : float
coordenada vertical del primer punto en radianes
x0 : float
posición del x[0] en radianes
y0 : float
coordenada horizontal del primer punto, en radianes.
Paralelo al eje terrestre
scale : float
tamaño del pixel en radianes
tipo : TYPE, optional
tipo de salida. The default is 0 para float, 1 para int.
Returns
-------
col : columna
fil : fila
"""
col = (x - x0) / scale
fil = -(y - y0) / scale
if tipo == 0:
return col, fil
elif tipo == 1:
return round(col), round(fil)
else:
raise TypeError("Type must be 0 (float) or 1 (int)")
```
#### File: StratoPy/stratopy/goes.py
```python
import datetime
import os
import attr
from netCDF4 import Dataset
import numpy as np
from pyorbital import astronomy
from pyspectral.near_infrared_reflectance import Calculator
from scipy import interpolate
from . import core
PATH = os.path.abspath(os.path.dirname(__file__))
def read_nc(file_path):
"""
Reads netCDF files through the netCDF4 library.
Parameters
----------
file_path: ``str tuple``
Contains a file path of one or all three paths of
channels 3, 7 and 13 of the CMIPF GOES-16 product.
Returns
-------
result: ``netCDF4.Dataset``
File variables.
"""
if len(file_path) == 3:
# Check for date and product consistency
files_date = [
band_path.split("s20", 1)[1].split("_", 1)[0]
for band_path in file_path
]
# Create boolean for consistency evaluation
eq_dates = all(date == files_date[0] for date in files_date)
eq_product = all("L2-CMIPF" in path for path in file_path)
if not eq_dates:
raise ValueError("Start date's from all files should be the same.")
elif not eq_product:
raise ValueError("Files must be from the same product.")
elif len(file_path) != 1 and len(file_path) != 3:
raise ValueError(
"File path must be a tuple of length 1 or 3 (in case of RGB)."
)
data = dict()
for paths in file_path:
channel = paths.split("-")[3].split("_")[0]
data[channel] = Dataset(paths, "r").variables
return Goes(data)
@attr.s(frozen=True, repr=False)
class Goes:
"""Generates an object containing de Day Microphysics state
according to GOES-16 manual.
Parameters
----------
data: ``netCDF4.Dataset.variables dict``
Dictionary with variables data from each channel of the
GOES Day Microphysics product.
coordinates: ``tuple`` (default: cut will be south hemisphere)
(lat_inf, lat_sup, lon_east, lon_west) where:
lat_inf, latitude of minimal position
lat_sup, latitude of maximal position
lon_east, longitude of
lon_west, longitude of
"""
_data = attr.ib(validator=attr.validators.instance_of(dict))
coordinates = attr.ib(default=(-40.0, 10.0, -37.0, -80.0))
_trim_coord = attr.ib(init=False)
RGB = attr.ib(init=False)
_img_date = attr.ib(init=False)
def __repr__(self):
_img_date = self._img_date.strftime("%d/%m/%y-%H:%M")
bands = [int(band.split("C")[1]) for band in self._data]
if len(bands) == 1:
return f"GOES Object -- {_img_date}, CH={bands[0]}"
else:
return (
f"GOES Object -- {_img_date}, "
f"CH={bands[0]}, {bands[1]} and {bands[2]}"
)
def _repr_html_(self):
_img_date = self._img_date.strftime("%d/%m/%y-%H:%M")
bands = [int(band.split("C")[1]) for band in self._data]
footer = "<b>-- Goes Object</b>"
if len(bands) == 1:
return f"<div>{_img_date}, , CH={bands[0]} {footer}</div>"
else:
return (
f"<div>{_img_date}, , "
f"CH={bands[0]}, {bands[1]} and {bands[2]} {footer}</div>"
)
@_img_date.default
def _img_date_default(self):
# Using existing channel date (same for all)
channel_data = list(self._data.values())[0]
# Img date in sec
time_delta = datetime.timedelta(seconds=int(channel_data["t"][:].data))
date_0 = datetime.datetime(year=2000, month=1, day=1, hour=12)
return date_0 + time_delta
@_trim_coord.default
def _trim_coord_default(self):
# Coordinates in deegres
lat_inf, lat_sup, lon_east, lon_west = self.coordinates
trim_coordinates = dict()
for ch_id, dataset in self._data.items():
# Extract all the variables
metadata = dataset
# satellite height
h = metadata["goes_imager_projection"].perspective_point_height
semieje_may = metadata["goes_imager_projection"].semi_major_axis
semieje_men = metadata["goes_imager_projection"].semi_minor_axis
lon_cen = metadata[
"goes_imager_projection"
].longitude_of_projection_origin
scale_factor = metadata["x"].scale_factor
offset = np.array(
[metadata["x"].add_offset, metadata["y"].add_offset]
)
pto_sup_izq = core.latlon2scan(
lat_sup,
lon_west,
lon_cen,
Re=semieje_may,
Rp=semieje_men,
h=h,
)
pto_inf_der = core.latlon2scan(
lat_inf,
lon_east,
lon_cen,
Re=semieje_may,
Rp=semieje_men,
h=h,
)
c0, r0 = core.scan2colfil(
pto_sup_izq[1],
pto_sup_izq[0],
offset[0],
offset[1],
scale_factor,
1,
)
c1, r1 = core.scan2colfil(
pto_inf_der[1],
pto_inf_der[0],
offset[0],
offset[1],
scale_factor,
1,
)
trim_coordinates[ch_id] = (r0, r1, c0, c1)
return trim_coordinates
def trim(self, for_RGB=True):
"""
This function trims a GOES CMI image according to the width, height
max west longitude and upper latitude specified on the parameters.
Default parameters are set to return a South America image.
Parameters
----------
Returns
-------
trim_img: ``numpy.array`` containing the trimmed image.
"""
trim_img = dict()
N = 5424 # Image size for psize = 2000 [m]
for ch_id, dataset in self._data.items():
image = np.array(dataset["CMI"][:].data)
esc = N / image.shape[0]
r0, r1, c0, c1 = self._trim_coord[ch_id]
trim_img[ch_id] = image[r0:r1, c0:c1]
# Rescale channels with psize = 1000 [m]
if for_RGB and ch_id == "M3C03":
x = range(trim_img[ch_id][:].shape[1])
y = range(trim_img[ch_id][:].shape[0])
f = interpolate.interp2d(x, y, trim_img[ch_id], kind="cubic")
xnew = np.arange(x[0], x[-1] + 1, (x[1] - x[0]) / esc)
ynew = np.arange(y[0], y[-1], (y[1] - y[0]) / esc)
trim_img[ch_id] = f(xnew, ynew)
return trim_img
@RGB.default
def _RGB_default(self, masked=False):
"""
This function creates an RGB image that represents the day microphysics
according to the GOES webpage manual.
goes_obj.RGB() tira la imagen en np array recortada, corregida
Parameters
----------
rec03: ``numpy.array``
Processed image of channel 3.
rec07b: ``numpy.array``
Processed image of channel 7.
rec13: ``numpy.array``
Processed image of channel 13.
masked: bool
If True, returns a masked RGB
according to day MP quick guide
Returns
-------
RGB: ``numpy.array``
RGB day microphysics image.
"""
# Starts with all channels trimmed images
trimmed_img = self.trim()
if len(trimmed_img) == 1:
return np.array(list(trimmed_img.values()))
else:
# Asign color to bands and make zenith correction on band 7.
R = trimmed_img["M3C03"]
G = solar7(
self._trim_coord["M3C07"],
trimmed_img["M3C07"],
trimmed_img["M3C13"],
)
B = trimmed_img["M3C13"]
# Minimuns and Maximuns
Rmin = 0
Rmax = 1
Gmin = 0
Gmax = 0.6
Bmin = 203
Bmax = 323
# Normalize the data and copying
R = (R - Rmin) / (Rmax - Rmin)
with np.errstate(invalid="ignore"):
G = ((G - Gmin) / (Gmax - Gmin)) ** 0.4
B = (B - Bmin) / (Bmax - Bmin)
RR = np.copy(R)
BB = np.copy(B)
GG = np.copy(G)
RR[RR < 0] = 0.0
RR[RR > 1] = 1.0
BB[BB < 0] = 0.0
BB[BB > 1] = 1.0
GG[GG < 0] = 0.0
GG[GG > 1] = 1.0
# Create the norm RGB
RRGB = np.stack([RR, GG, BB], axis=2)
if masked is True:
RRGB = mask(RRGB)
return RRGB
def solar7(trim_coord_ch7, ch7, ch13):
"""
This function does a zenith angle correction to channel 7.
This correction is needed for daylight images. It is used
in RGB method of Goes class.
Parameters
----------
trim_coord_ch7: ``tuple``
(r0, r1, c0, c1) where:
r0, latitude of
r1, latitude of
c0, longitude of
c1, longitude of
ch7: ``numpy.array``
Trimmed image of channel 7.
ch13: ``numpy.array``
Trimed image of channel 13.
Returns
-------
``numpy.array``
Zenith calculation for every pixel for channel 7.
"""
# Construct paths
latitude_path = os.path.join(PATH, "lat_vec.npy")
longitude_path = os.path.join(PATH, "lon_vec.npy")
# Trimmed coordinates
r0, r1, c0, c1 = trim_coord_ch7
lat = np.load(latitude_path)[r0:r1]
lon = np.load(longitude_path)[c0:c1]
# Calculate the solar zenith angle
utc_time = datetime.datetime(2019, 1, 2, 18, 00)
LON, LAT = np.meshgrid(lon, lat)
zenith = astronomy.sun_zenith_angle(utc_time, LON, LAT)
refl39 = Calculator(platform_name="GOES-16", instrument="abi", band="ch7")
return refl39.reflectance_from_tbs(zenith, ch7, ch13)
def mask(rgb):
"""This function returns a labeled-by-color image according to
the interpretation of the product Day Microphysics
(https://weather.msfc.nasa.gov/sport/training/quickGuides/
rgb/QuickGuide_DtMicroRGB_NASA_SPoRT.pdf)
Parameters:
-----------
rgb: numpy array
Numpy Array object containig the Day Microphysics RGB product
Returns:
-------
img_mask: numpy array
Masked RGB
"""
img_mask = np.zeros(rgb.shape)
# Large drops, Low clouds-> pink/magenta
lc_rfilter = rgb[:, :, 0] > 0.7 # R>0.8
lc_gfilter = rgb[:, :, 1] < 0.4 # G
lc_bfilter = rgb[:, :, 2] > 0.6 # B
lc_filter = lc_rfilter * lc_gfilter * lc_bfilter
# Mask= magenta
img_mask[lc_filter, 0] = 1.0
img_mask[lc_filter, 1] = 0.0
img_mask[lc_filter, 2] = 1.0
# Stratus/Stratoculumus (small drops, low clouds) -> bright green/blue
st_rfilter = (rgb[:, :, 0] > 0.3) * (rgb[:, :, 0] < 0.45) # R
st_gfilter = (rgb[:, :, 1] > 0.5) * (rgb[:, :, 1] < 0.8) # G
st_bfilter = rgb[:, :, 2] < 0.7
st_filter = st_rfilter * st_gfilter * st_bfilter
# Mask=Light blue
img_mask[st_filter, 0] = 0.0
img_mask[st_filter, 1] = 1.0
img_mask[st_filter, 2] = 1.0
# CumuloNimbis (high clouds) -> red, dark orange
cb_rfilter = rgb[:, :, 0] > 0.7 # R
cb_gfilter = rgb[:, :, 1] < 0.3 # G
cb_bfilter = rgb[:, :, 2] < 0.3 # B
cb_filter = cb_rfilter * cb_gfilter * cb_bfilter
# Mask=Red
img_mask[cb_filter, 0] = 1.0
img_mask[cb_filter, 1] = 0.0
img_mask[cb_filter, 2] = 0.0
# Cirrus (high clouds)-> green, dark green
cr_rfilter = rgb[:, :, 0] < 0.3 # R
cr_gfilter = rgb[:, :, 1] > 0.7 # G
cr_bfilter = rgb[:, :, 2] < 0.3 # B
cr_filter = cr_rfilter * cr_gfilter * cr_bfilter
# Mask= Green
img_mask[cr_filter, 0] = 0.0
img_mask[cr_filter, 1] = 1.0
img_mask[cr_filter, 2] = 0.0
# supercooled clouds Thick, small drops, medium clouds-> yellow
super_rfilter = rgb[:, :, 0] > 0.8
super_gfilter = rgb[:, :, 1] > 0.8
super_bfilter = rgb[:, :, 2] < 0.2 # amarillo
super_filter = super_rfilter * super_gfilter * super_bfilter
# Mask=Yellow
img_mask[super_filter, 0] = 1.0
img_mask[super_filter, 1] = 1.0
img_mask[super_filter, 2] = 0.0
return img_mask[:, :, [0, 1, 2]]
``` |
{
"source": "joroGER/schafkopf-ml",
"score": 3
} |
#### File: joroGER/schafkopf-ml/game_round.py
```python
from rules import Rules
from utils import Utils
class Game_round:
def __init__(self,
game,
starting_position):
self.game = game
self.starting_position = starting_position
self.played_cards = []
self.winner = None
self.round_points = 0
def start(self):
#print("New round! Player {0} starts.".format(self.starting_position))
#Utils.print_players_information(self.game.match.players)
pass
def run(self):
for i in range(self.game.match.num_players):
current_position = (self.starting_position + i) % self.game.match.num_players
player = self.game.match.players[current_position]
picked_card = player.decide_on_card(self)
self.game.log_msgs.append("Player {0} picked card {1}.".format(current_position, str(picked_card)))
self.played_cards.append(picked_card)
Rules.set_playable_cards(self, False)
def end(self):
self.winner = Rules.calc_round_winner(self)
self.round_points = Rules.calc_round_points(self)
self.game.match.players[self.winner].game_points += self.round_points
self.game.log_msgs.append("Player {0} won this round. Points: {1}. Played cards: {2}".format(self.winner, list(map(lambda player: player.game_points, self.game.match.players)), " ".join(list(map(str, self.played_cards)))))
self.game.starting_position = self.winner
Rules.set_playable_cards(self, True)
self.game.played_cards += self.played_cards
if len(self.game.match.players[0].cards) == 0:
self.game.playing = False
```
#### File: joroGER/schafkopf-ml/player.py
```python
import random
import numpy as np
from rules import Rules
from utils import Utils
class Player:
def __init__(self, position, is_human, rl_agent):
self.position = position
self.is_human = is_human
self.cards = []
self.game_points = 0
self.coins = 10000
self.rl_agent = rl_agent
# is used to remember transition
self.old_state = None
def decide_on_game(self, game_obj):
game = ''
possible_games = Rules.get_possible_games()
if self.is_human:
while game == '':
game_obj.log_msgs.append(str(self))
console_input = input("What game do you want to play?")
color = None
if console_input == 'solo':
color = input("What color do you want to play?")
try:
if console_input not in possible_games:
raise Exception()
game = {
"game": console_input,
"color": color,
"player_id": self.position
}
except:
print("Please pick a valid game.")
features = Utils.features_from_game(game_obj, self)
game_index = self.rl_agent.predict_game(features)
self.rl_agent.update_game_memory(self.position, features, game_index)
else:
if self.rl_agent:
features = Utils.features_from_game(game_obj, self)
game_index = self.rl_agent.predict_game(features)
if game_index == 0:
game_type = 'no_game'
color = None
elif game_index == 1:
game_type = 'wenz'
color = None
else:
# means its a solo, now determine color
color = Rules.get_color_ordering()[game_index - 2]
game_type = 'solo'
self.rl_agent.update_game_memory(self.position, features, game_index)
else:
game_type = random.choice(Rules.get_possible_games())
color = random.choice(Rules.get_color_ordering())
game = {
"game": game_type,
"color": color,
"player_id": self.position
}
return game
def decide_on_card(self, game_round):
card_index = -1
playable_cards_indices = [i for i in range(len(self.cards)) if self.cards[i].playable]
#print("Playable cards:", playable_cards_indices)
if self.is_human:
while card_index == -1:
console_input = input("What card index do you want to pick?")
try:
console_int_input = int(console_input)
if console_int_input not in playable_cards_indices:
raise Exception()
card_index = console_int_input
except:
print("Please pick a valid card.")
features = Utils.features_from_round(game_round, self)
if self.old_state:
self.rl_agent.update_card_memory_with_next_state(self.position, features, False)
self.old_state = features
playing = int(self.position == game_round.game.game_type["player_id"])
self.rl_agent.update_card_memory(self.position, features, card_index, playing)
else:
if self.rl_agent:
features = Utils.features_from_round(game_round, self)
if self.old_state:
self.rl_agent.update_card_memory_with_next_state(self.position, features, False)
self.old_state = features
# every 10 rounds do not explore on cards but do on game
explore = game_round.game.game_no % 10 != 0
playing = int(self.position == game_round.game.game_type["player_id"])
card_index = self.rl_agent.predict_action(features, playable_cards_indices, game_round.game.game_type["game"], explore, playing)
self.rl_agent.update_card_memory(self.position, features, card_index, playing)
else:
# pick highest card and play it
highest_index = playable_cards_indices[0]
game = game_round.game.game_type
for i in playable_cards_indices[1:]:
if Rules.get_card_ordering(game).index(self.cards[i].value) > Rules.get_card_ordering(game).index(self.cards[highest_index].value):
highest_index = i
elif (Rules.get_card_ordering(game).index(self.cards[i].value) == Rules.get_card_ordering(game).index(self.cards[highest_index].value) and Rules.get_color_ordering().index(self.cards[i].color) > Rules.get_color_ordering().index(self.cards[highest_index].color)):
highest_index = i
card_index = highest_index
picked_card = self.cards[card_index]
self.cards.pop(card_index)
return picked_card
def __str__(self):
cards_str = " ".join(list(map(str, self.cards)))
return "[Player: {0}. Cards: {1}. Coins: {2}]".format(self.position, cards_str, self.coins)
```
#### File: joroGER/schafkopf-ml/rules.py
```python
import random
import numpy as np
import json
cards = None
with open('cards.json') as f:
cards = json.load(f)
class Rules:
@staticmethod
def get_cards():
return cards
@staticmethod
def calc_highest_game(games_called):
ordering = Rules.get_possible_games()
highest_game = games_called[0]
for game in games_called:
if ordering.index(game["game"]) > ordering.index(highest_game["game"]):
highest_game = game
return highest_game
@staticmethod
def calc_round_winner(game_round):
first_card = game_round.played_cards[0]
highest_card = first_card
winner = 0
card_ordering = Rules.get_card_ordering(game_round.game.game_type)
color_ordering = Rules.get_color_ordering()
for i in range(1, len(game_round.played_cards)):
# trump vs no trump, wenz Jd vs Ad
if not highest_card.is_trump and game_round.played_cards[i].is_trump:
highest_card = game_round.played_cards[i]
winner = i
continue
# same color and higher, eg Ac vs 10c
if first_card.color == game_round.played_cards[i].color and \
card_ordering.index(game_round.played_cards[i].value) > card_ordering.index(highest_card.value):
highest_card = game_round.played_cards[i]
winner = i
continue
# if cards are both trump but one is higher, eg. solo Q vs J
if highest_card.is_trump and game_round.played_cards[i].is_trump and \
card_ordering.index(game_round.played_cards[i].value) > card_ordering.index(highest_card.value):
highest_card = game_round.played_cards[i]
winner = i
continue
# if cards are the same value and trump, check color ordering, eg wenz Jc vs Jd
if highest_card.is_trump and game_round.played_cards[i].is_trump and \
game_round.played_cards[i].value == highest_card.value and \
color_ordering.index(game_round.played_cards[i].color) > color_ordering.index(highest_card.color):
highest_card = game_round.played_cards[i]
winner = i
continue
# move from nth card played to correct played based on relative position
return (winner + game_round.starting_position) % game_round.game.match.num_players
@staticmethod
def calc_round_points(game_round):
points = 0
points_map = Rules.get_points_map()
for card in game_round.played_cards:
points += points_map[card.value]
return points
@staticmethod
def calc_game_winner(game):
if game.game_type['game'] != 'no_game':
player_index = game.game_type['player_id']
playing_points = game.match.players[player_index].game_points
if playing_points > 60:
return [game.match.players[player_index]]
else:
return [game.match.players[i] for i in range(game.match.num_players) if i != player_index]
else:
loser = game.match.players[0]
for player in game.match.players:
# <NAME>ielen gewinnt ramsch
#if player.game_points == 120:
# return [player]
if player.game_points > loser.game_points:
loser = player
return [game.match.players[i] for i in range(game.match.num_players) if i != loser.position]
@staticmethod # the amount all losing players have to pay to winning players
def calc_game_payout(game):
payout_map = {
"solo": 20,
"wenz": 10,
"no_game": 10,
"black_factor": 2,
"schneider": 10,
"per_running": 10,
"per_virgin_factor": 2
}
payout = 0
payout += payout_map[game.game_type['game']]
running_cards = Rules.get_running_cards(game.game_type['game'])
winning_cards = [item for sublist in list(map(lambda player: player.cards, game.winners)) for item in sublist]
winning_cards_ids = list(map(lambda card: card.id, winning_cards))
runnings = 0
for card_id in running_cards:
if card_id in winning_cards_ids:
runnings += 1
payout += runnings * payout_map["per_running"]
winning_game_points = sum(list(map(lambda player: player.game_points, game.winners)))
if winning_game_points == 120:
payout *= payout_map["black_factor"]
elif winning_game_points > 90 \
or winning_game_points < 30:
payout += payout_map["schneider"]
if game.game_type["game"] == "no_game":
virgins = 0
for player in game.match.players:
if player.game_points == 0:
virgins += 1
if virgins == game.match.num_players - 1:
payout = 100
else:
payout *= (virgins + 1)
return payout
@staticmethod
def get_running_cards(game_type):
if game_type == 'wenz':
return ['Jc', 'Js', 'Jh', 'Jd']
if game_type == 'solo' or game_type == 'no_game':
return ['Qc', 'Qs', 'Qh', 'Qd', 'Jc', 'Js', 'Jh', 'Jd']
@staticmethod
def get_points_map():
return {
'9': 0,
'J': 2,
'Q': 3,
'K': 4,
'10': 10,
'A': 11
}
@staticmethod
def get_possible_games():
return ['no_game', 'wenz', 'solo']
@staticmethod
def get_color_ordering():
return ['d', 'h', 's', 'c']
@staticmethod
def get_card_ordering(game_type):
if game_type['game'] == 'wenz':
return ['9', 'Q', 'K', '10', 'A', 'J']
if game_type['game'] == 'solo' or game_type['game'] == 'no_game':
return ['9', 'K', '10', 'A', 'J', 'Q']
@staticmethod
def order_cards(cards, game):
card_ordering = Rules.get_card_ordering(game.game_type)
color_ordering = Rules.get_color_ordering()
for _ in range(len(cards)):
for i in range(0, len(cards)-1):
card1_idx = card_ordering.index(cards[i].value)
card2_idx = card_ordering.index(cards[i+1].value)
color1 = color_ordering.index(cards[i].color)
color2 = color_ordering.index(cards[i+1].color)
card_is_higher = (
(cards[i].is_trump == cards[i+1].is_trump and card1_idx == card2_idx and color1 > color2) or
(cards[i].is_trump == cards[i+1].is_trump and card1_idx > card2_idx) or
(cards[i].is_trump and not cards[i+1].is_trump)
)
if card_is_higher:
tmp = cards[i+1]
cards[i+1] = cards[i]
cards[i] = tmp
@staticmethod
def is_card_trump(card, game_type):
if game_type['game'] == 'wenz':
# only J are trump
return card.value == 'J'
if game_type['game'] == 'solo':
# all Q, J and color are trump
return card.value == 'Q' or card.value == 'J' or card.color == game_type['color']
if game_type['game'] == 'no_game':
# all Q, J and heart are trump
return card.value == 'Q' or card.value == 'J' or card.color == 'h'
@staticmethod
def _is_card_playable(card, game_type, played_cards, player_cards):
# played_cards should never be empty here, if it is just fail!
first_card = played_cards[0]
if first_card.is_trump:
if card.is_trump:
return True
else:
# playable if he does not have any other trump card
return len(list(filter(lambda c: c.is_trump, player_cards))) == 0
else:
# playable if card is no trump and has same color
if not card.is_trump and card.color == first_card.color:
return True
else:
# playable if he does not have any other card of same color that is not trump
# acceptable would be if first_card = 'As' and player has 'Qs' in a solo.
return len(list(filter(lambda c: c.color == first_card.color and not c.is_trump, player_cards))) == 0
@staticmethod
def set_playable_cards(game_round, end_of_round):
for player in game_round.game.match.players:
for card in player.cards:
if end_of_round:
card.playable = True
else:
card.playable = Rules._is_card_playable(card,
game_round.game.game_type,
game_round.played_cards,
player.cards)
``` |
{
"source": "jorotenev/depdag",
"score": 3
} |
#### File: depdag/tests/test_integration.py
```python
import unittest
from depdag import DepDag
class TestIntegration(unittest.TestCase):
def test_case_1(self):
vert = DepDag().vertices
vert.a.depends_on('b', 'c')
vert.c.depends_on('e')
vert.d.depends_on('e')
vert.e.depends_on('b')
for v in [vert.a, vert.b, vert.c, vert.d, vert.e]:
self.assertFalse(v.provided)
self.assertFalse(v.is_resolved())
vert.b.payload = 'some_payload'
vert.d.payload = 'some_payload'
self.assertFalse(vert.a.provided)
self.assertFalse(vert.a.is_resolved())
self.assertTrue(vert.b.provided)
self.assertTrue(vert.b.is_resolved())
self.assertFalse(vert.c.provided)
self.assertFalse(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertFalse(vert.d.is_resolved())
self.assertFalse(vert.e.provided)
self.assertFalse(vert.e.is_resolved())
vert.a.payload = 'some_payload'
vert.e.payload = 'some_payload'
self.assertTrue(vert.a.provided)
self.assertFalse(vert.a.is_resolved())
self.assertTrue(vert.b.provided)
self.assertTrue(vert.b.is_resolved())
self.assertFalse(vert.c.provided)
self.assertFalse(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertTrue(vert.d.is_resolved())
self.assertTrue(vert.e.provided)
self.assertTrue(vert.e.is_resolved())
vert.c.payload = 'some_payload'
self.assertTrue(vert.a.provided)
self.assertTrue(vert.a.is_resolved())
self.assertTrue(vert.b.provided)
self.assertTrue(vert.b.is_resolved())
self.assertTrue(vert.c.provided)
self.assertTrue(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertTrue(vert.d.is_resolved())
self.assertTrue(vert.e.provided)
self.assertTrue(vert.e.is_resolved())
def test_case_2(self):
vert = DepDag().vertices
vert.a.depends_on('b')
vert.b.depends_on('c')
vert.c.depends_on('d')
vert.d.depends_on('e', 'f')
vert.g.depends_on('b')
vert.h.depends_on('g')
vert.i.depends_on('d')
for v in [vert.a, vert.b, vert.c, vert.d, vert.e, vert.f, vert.g, vert.h, vert.i]:
self.assertFalse(v.provided)
self.assertFalse(v.is_resolved())
vert.d.payload = 'some_payload'
vert.e.payload = 'some_payload'
vert.g.payload = 'some_payload'
self.assertFalse(vert.a.provided)
self.assertFalse(vert.a.is_resolved())
self.assertFalse(vert.b.provided)
self.assertFalse(vert.b.is_resolved())
self.assertFalse(vert.c.provided)
self.assertFalse(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertFalse(vert.d.is_resolved())
self.assertTrue(vert.e.provided)
self.assertTrue(vert.e.is_resolved())
self.assertFalse(vert.f.provided)
self.assertFalse(vert.f.is_resolved())
self.assertTrue(vert.g.provided)
self.assertFalse(vert.g.is_resolved())
self.assertFalse(vert.h.provided)
self.assertFalse(vert.h.is_resolved())
self.assertFalse(vert.i.provided)
self.assertFalse(vert.i.is_resolved())
vert.b.payload = 'some_payload'
vert.c.payload = 'some_payload'
vert.f.payload = 'some_payload'
self.assertFalse(vert.a.provided)
self.assertFalse(vert.a.is_resolved())
self.assertTrue(vert.b.provided)
self.assertTrue(vert.b.is_resolved())
self.assertTrue(vert.c.provided)
self.assertTrue(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertTrue(vert.d.is_resolved())
self.assertTrue(vert.e.provided)
self.assertTrue(vert.e.is_resolved())
self.assertTrue(vert.f.provided)
self.assertTrue(vert.f.is_resolved())
self.assertTrue(vert.g.provided)
self.assertTrue(vert.g.is_resolved())
self.assertFalse(vert.h.provided)
self.assertFalse(vert.h.is_resolved())
self.assertFalse(vert.i.provided)
self.assertFalse(vert.i.is_resolved())
vert.a.payload = 'some_payload'
vert.h.payload = 'some_payload'
vert.i.payload = 'some_payload'
self.assertTrue(vert.a.provided)
self.assertTrue(vert.a.is_resolved())
self.assertTrue(vert.b.provided)
self.assertTrue(vert.b.is_resolved())
self.assertTrue(vert.c.provided)
self.assertTrue(vert.c.is_resolved())
self.assertTrue(vert.d.provided)
self.assertTrue(vert.d.is_resolved())
self.assertTrue(vert.e.provided)
self.assertTrue(vert.e.is_resolved())
self.assertTrue(vert.f.provided)
self.assertTrue(vert.f.is_resolved())
self.assertTrue(vert.g.provided)
self.assertTrue(vert.g.is_resolved())
self.assertTrue(vert.h.provided)
self.assertTrue(vert.h.is_resolved())
self.assertTrue(vert.i.provided)
self.assertTrue(vert.i.is_resolved())
``` |
{
"source": "jorpilo/PriorityGraph",
"score": 3
} |
#### File: jorpilo/PriorityGraph/PriorityGraph.py
```python
from sortedcontainers import SortedListWithKey
DEBUG = False
class Graph:
nodes = dict()
links = SortedListWithKey(key=lambda item: -item.priority)
def addNode(self, name):
self.nodes[name] = self.Node(name)
def addLink(self, base, to, priority):
__base = self.nodes[base]
__to = self.nodes[to]
link = self.Link(__base, __to, priority)
self.links.add(link)
__base.addTo(link)
__to.addBase(link)
def resetGraph(self):
clean = 0
deadLinks = 0
for link in self.links:
link.used = False
clean += 1
for node in self.nodes.values():
if len(node.base) == 0:
for link in node.to:
link.used = True
deadLinks += 1
if DEBUG:
print("Clean links: " + str(clean))
print("Dead links: " + str(deadLinks))
def SearchPriorityCicles(self):
self.resetGraph()
result = []
i = 0
for link in self.links:
if DEBUG:
if not link.used:
print("traslado # " + str(i) + " / " + str(link))
else:
print(" used # " + str(i) + " / " + str(link))
i += 1
if not link.used:
link.used = True
route = self.SearchCicleRecursive(link, link.base)
if route is not None:
if DEBUG:
print("Conseguido")
route.append(link)
result.append(route)
else:
if DEBUG:
print("Muerto")
link.used = False
return result
def SearchCicleRecursive(self, link, start):
newbase = link.to
options = filter(lambda element: element.used is False and start not in element.deadNodes, newbase.to)
deadlinks = []
for option in options:
option.used = True
if option.to == start:
return [option]
else:
route = self.SearchCicleRecursive(option, start)
if route is not None:
route.append(option)
for link in deadlinks:
link.used = False
return route
else:
option.deadNodes.add(start)
deadlinks.append(option)
for link in deadlinks:
link.used = False
return None
def __str__(self):
String = "Nodes: \n"
String += "-------------------\n"
for node in self.nodes.values():
String += node.str_full() + "\n"
String += "-------------------\n"
String += "Links: \n"
String += "-------------------\n"
for link in self.links:
String += str(link) + "\n"
return String
__repr__ = __str__
class Node:
name = None
to = None
base = None
def __init__(self, name):
self.name = name
self.to = SortedListWithKey(key=lambda item: -item.priority)
self.base = []
def addTo(self, link):
self.to.add(link)
def addBase(self, link):
self.base.append(link)
def __str__(self):
return self.name
def str_full(self):
String = str(self) + " #Links: " + str(len(self.to)) + "\n"
for link in self.to:
String += str(link) + "\n"
return String
__repr__ = __str__
class Link:
base = None
to = None
priority = None
used = None
deadNodes = None
def __init__(self, base, to, priority):
self.base = base
self.to = to
self.priority = priority
self.used = False
self.deadNodes = set()
def __str__(self):
return str(self.base) + " --> " + str(self.to) + " Priority: " + str(self.priority)
def __eq__(self, other):
return self.base.name == other.base.name and self.to.name == other.to.name and self.priority == other.priority
__repr__ = __str__
``` |
{
"source": "JorrandeWit/latex-production-tools",
"score": 3
} |
#### File: latex-production-tools/tests/test_latex_utils.py
```python
import unittest
from latex_utils import (get_relevant_warnings,
remove_accented_characters,
open_webpage)
class TestLaTeXUtils(unittest.TestCase):
def test_get_relevant_warnings(self):
"""Test that relevant warnings are extracted from a LaTeX log file."""
test_log = """
Overfull \hbox (24.00002pt too wide) in paragraph at lines 245--268
LaTeX Warning: Citation `Qhydo4' on page 2 undefined on input line 304.
Underfull \hbox (badness 1132) in paragraph at lines 1347--1348
"""
expected_warnings = ["Overfull \hbox (24.00002pt too wide) in paragraph at lines 245--268",
"LaTeX Warning: Citation `Qhydo4' on page 2 undefined on input line 304."]
self.assertEqual(get_relevant_warnings(test_log), expected_warnings)
def test_accented_character_removal(self):
"""Test that accented strings are correctly normalized."""
self.assertEqual(remove_accented_characters("<NAME>"), "<NAME>")
self.assertEqual(remove_accented_characters("Caux"), "Caux")
self.assertEqual(remove_accented_characters("Jérôme"), "Jerome")
def test_webpage_access(self):
"""Test that webpages are retrieved and errors handled correctly."""
# Test 404 error catching.
with self.assertRaises(SystemExit):
open_webpage("http://example.com/404")
# Test no system exit on error.
self.assertFalse(open_webpage("http://example.com/404", exit_on_error=False)[0])
# Test succesfull connection to site.
self.assertEqual(open_webpage("http://example.com/")[1].status_code, 200)
``` |
{
"source": "jorrete/django-cache-helpers",
"score": 2
} |
#### File: django-cache-helpers/cache_helpers/decorators.py
```python
import time
import inspect
from functools import wraps
from django.core.cache import caches
from django.utils.cache import add_never_cache_headers, patch_response_headers
from django.utils.http import http_date
from .utils import check_bust_header, func_to_string
from .settings import CACHE_HELPERS_ALIAS, logger
def _cache_page(timeout,
key_func,
cache_alias=None,
check_func=None,
patch_func=None):
def _cache(view_func):
@wraps(view_func)
def __cache(request, *args, **kwargs):
args = list(args)
for arg in inspect.getfullargspec(view_func).args:
if arg in ['self', 'request']:
continue
if arg in kwargs:
args.append(kwargs.pop(arg))
args = tuple(args)
_cache_alias = cache_alias if cache_alias is not None else CACHE_HELPERS_ALIAS
cache = caches[_cache_alias]
view_path = func_to_string(view_func)
cache_key = key_func(request, *args, view_path=view_path, **kwargs)
response = cache.get(cache_key)
do_cache = (
response is None
or (check_func is not None and check_func(request))
or getattr(request, '_bust_cache', False))
logger.debug('\n'.join([
'######## cache ########',
'cache_alias: {}'.format(_cache_alias),
'cache: {}'.format(cache),
'cache_key: {}'.format(cache_key),
'timeout: {}'.format(timeout),
'response: {}'.format(response),
'check_func: {}'.format((check_func is not None and check_func(request))),
'bust_cache: {}'.format(getattr(request, '_bust_cache', False)),
'args: {}'.format(args),
'kwargs: {}'.format(kwargs),
'view_path: {}'.format(view_path),
'SAVE: {}'.format(do_cache),
'#######################',
]))
if do_cache:
response = view_func(request, *args, **kwargs)
if response.status_code == 200:
patch_func(response, timeout)
if hasattr(response, 'render') and callable(response.render):
def set_cache(response):
cache.set(cache_key, response, timeout)
response.add_post_render_callback(set_cache)
else:
cache.set(cache_key, response, timeout)
setattr(request, '_cache_update_cache', False)
return response
return __cache
return _cache
def cache_page(timeout, key_func, cache=None):
return _cache_page(
timeout,
key_func,
cache_alias=cache,
patch_func=patch_response_headers)
def cache_page_forever(timeout, key_func, cache=None):
def patch_expires_header(response, *args):
if timeout is None or timeout == 0 or timeout < 0:
add_never_cache_headers(response)
else:
response['Expires'] = http_date(time.time() + timeout)
return _cache_page(
None,
key_func,
cache_alias=cache,
check_func=check_bust_header,
patch_func=patch_expires_header)
def cache_result(timeout, cache_alias=None):
def _cache(view_func):
@wraps(view_func)
def __cache(*args, **kwargs):
_cache_alias = cache_alias if cache_alias is not None else CACHE_HELPERS_ALIAS
cache = caches[_cache_alias]
func_path = func_to_string(view_func)
cache_key = '.'.join([str(c) for c in (func_path, args, kwargs,)])
result = cache.get(cache_key)
bust_cache = kwargs.pop('bust_cache', False)
do_cache = (result is None or bust_cache)
logger.debug('\n'.join([
'######## cache ########',
'cache_alias: {}'.format(_cache_alias),
'cache: {}'.format(cache),
'cache_key: {}'.format(cache_key),
'timeout: {}'.format(timeout),
'result: {}'.format(result),
'bust_cache: {}'.format(bust_cache),
'args: {}'.format(args),
'kwargs: {}'.format(kwargs),
'func_path: {}'.format(func_path),
'SAVE: {}'.format(do_cache),
'#######################',
]))
if do_cache:
result = view_func(*args, **kwargs)
cache.set(cache_key, result, timeout)
return result
return __cache
return _cache
```
#### File: cache_helpers/request/real.py
```python
import requests
import uuid
from django.conf import settings
from ..utils import set_cache_bust_status
from ..settings import logger
from .helpers import BaseRequestMixin, BaseRequestCommand
def get_session(basic_auth=None, login=None):
session = requests.session()
kwargs = {}
if basic_auth:
kwargs['auth'] = (basic_auth['username'], basic_auth['password'])
if login is not None:
session.headers.update({'referer': login.get('referer')})
res = session.get(login['url'], **kwargs)
res = session.post(login['url'], allow_redirects=True, data={
'username': login['username'],
'password': login['password'],
'csrfmiddlewaretoken': res.cookies['csrftoken'],
'next': login['url'],
}, **kwargs)
# success has history of redirections
if not len(res.history):
raise Exception('Login failed')
else:
logger.info('Login success')
return session
def _make_request(url, session=None, bust_key=None, basic_auth=None, login=None, lang=None):
session = session if session is not None else get_session(basic_auth=basic_auth, login=login)
kwargs = {
'cookies': {},
'headers': {},
}
if lang:
kwargs['cookies'][settings.LANGUAGE_COOKIE_NAME] = lang
try:
kwargs['headers']['bust'] = bust_key if bust_key is not None else ''
if basic_auth:
kwargs['auth'] = (basic_auth['username'], basic_auth['password'])
response = session.get(url, **kwargs)
logger.info('Request success: {}{}{}'.format(
url,
' [lang: {}]'.format(lang) if lang is not None else '',
' [username: {}]'.format(login['username']) if login is not None else ''))
except Exception:
logger.error('Request error: {}'.format(url))
return response
def make_request(url, session=None, bust_key=None, basic_auth=None, login=None, lang=None):
session = get_session(basic_auth=basic_auth, login=login)
try:
bust_key = str(uuid.uuid4())
set_cache_bust_status(bust_key)
return _make_request(
url, session, bust_key,
basic_auth=basic_auth, login=login, lang=lang)
except Exception as e:
raise e
finally:
set_cache_bust_status()
class RealRequestMixin(BaseRequestMixin):
def get_request_runner(self):
return _make_request
def _make_requests(self, threads=1, **extra):
session = get_session(
basic_auth=self.get_request_basic_auth(),
login=extra.get('login', None))
try:
bust_key = str(uuid.uuid4())
set_cache_bust_status(bust_key)
extra['bust_key'] = bust_key
extra['session'] = session
return super()._make_requests(threads=threads, **extra)
finally:
set_cache_bust_status()
class RealRequestCommand(RealRequestMixin, BaseRequestCommand):
pass
```
#### File: django-cache-helpers/cache_helpers/views.py
```python
from .decorators import cache_page, cache_page_forever
class CachePageMixin(object):
cache_alias = None
def get_cache_timeout(self, request, *args, **kwargs):
if not hasattr(self, 'cache_timeout'):
raise ValueError('Missing cache_timeout attribute')
return self.cache_timeout
def cache_key_func(self, request, *args, **kwargs):
raise NotImplementedError()
def get_cache_alias(self, request, *args, **kwargs):
return self.cache_alias
def dispatch(self, request, *args, **kwargs):
return cache_page(
self.get_cache_timeout(request),
self.cache_key_func,
cache=self.get_cache_alias(request),
)(super().dispatch)(request, *args, **kwargs)
class CachePageForeverMixin(object):
cache_alias = None
def get_cache_timeout(self, request, *args, **kwargs):
if not hasattr(self, 'cache_timeout'):
raise ValueError('Missing cache_timeout attribute')
return self.cache_timeout
def cache_key_func(self, request, *args, **kwargs):
raise NotImplementedError()
def get_cache_alias(self, request, *args, **kwargs):
return self.cache_alias
def dispatch(self, request, *args, **kwargs):
return cache_page_forever(
self.get_cache_timeout(request),
self.cache_key_func,
cache=self.get_cache_alias(request),
)(super().dispatch)(request, *args, **kwargs)
``` |
{
"source": "Jorricks/python-degiro",
"score": 3
} |
#### File: python-degiro/degiroapi/product.py
```python
import datetime
from typing import Mapping, Optional
class Product:
"""A data class for a stock/product of DeGiro."""
def __init__(self, product: Mapping[str, str]):
self.__id = product["id"]
self.__name = product["name"]
self.__isin = product["isin"]
self.__symbol = product["symbol"]
self.__currency = product["currency"]
self.__product_type = product["productTypeId"]
self.__tradable = product["tradable"]
self.__close_price = product.get("closePrice")
cpd = product.get("closePriceDate")
self.__close_price_date = datetime.datetime.strptime(cpd, "%Y-%m-%d").date() if cpd else None
@property
def id(self) -> str:
return self.__id
@property
def name(self) -> str:
return self.__name
@property
def isin(self) -> str:
return self.__isin
@property
def symbol(self) -> str:
return self.__symbol
@property
def currency(self) -> str:
return self.__currency
@property
def product_type(self) -> str:
return self.__product_type
@property
def tradable(self) -> str:
return self.__tradable
@property
def close_price(self) -> Optional[str]:
return self.__close_price
@property
def close_price_date(self) -> Optional[datetime.date]:
return self.__close_price_date
``` |
{
"source": "jorrinpollard/podcasts",
"score": 3
} |
#### File: podcasts/podcasts/lib.py
```python
import re
NO_HTML_RE = re.compile('<.*?>')
def get_clean_podcast_attr(podcast_attr):
if not podcast_attr:
return
clean_podcast_attr = str(podcast_attr).strip()
clean_podcast_attr = re.sub(NO_HTML_RE, '', clean_podcast_attr)
return clean_podcast_attr
```
#### File: podcasts/test/test_art19.py
```python
from __future__ import division, absolute_import, print_function
import os
import sys
import unittest
cwd = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, cwd)
from podcasts import Podcast
source = os.path.join(cwd, "test", "fixtures", "art19.xml")
_copyright = None
_type = "episodic"
author = "GaS Digital Network"
categories = ['Sports & Recreation']
description = "For the past two years, UFC middleweight Champion, <NAME>, and stand up comedian, <NAME>, have hosted a show together on satellite radio. Now they bring their brand of brash, comedic MMA commentary to the podcast world! Believe You Me is a weekly show that gives you a behind the scenes look at the career and life of a UFC champion. Bisping along with his co-host, Gomez, break down MMA news, pop culture stories, and talk parenting, philosophy, and life in general. Believe You Me is FOR ADULTS ONLY! Follow us on twitter and Instagram: @BYMPod. The newest 15 episodes are always free, but if you want access to all the archives, listen live, chat live, access to the forums, and get the show before it comes out everywhere else - you can subscribe now at gasdigitalnetwork.com and use the code BYM to save 15% on the entire network."
explicit = "yes"
generator = "ART19"
image_url = "https://content.production.cdn.art19.com/images/61/e2/04/17/61e20417-ab6f-43c6-a8fb-00d8d845d8e5/d7bea48af84622089cf34d61a6b2bd64691ac4bda05793a4ca463ea0afbb60bc8af9457ea259ebe8d79826bf2e48d510d6fc130edd74e0bb5454fc1ee30baf74.jpeg"
language = "en"
last_build_date = "Tue, 07 May 2019 04:44:41 -0000"
link = "http://GaSDigitalNetwork.com/believe"
managing_editor = "<EMAIL> (<NAME>, <NAME>)"
new_feed_url = "https://rss.art19.com/believe-you-me"
owner = "<NAME>, <NAME>"
pub_date = None
subtitle = None
summary = "For the past two years, UFC middleweight Champion, <NAME>, and stand up comedian, <NAME>, have hosted a show together on satellite radio. Now they bring their brand of brash, comedic MMA commentary to the podcast world! Believe You Me is a weekly show that gives you a behind the scenes look at the career and life of a UFC champion. Bisping along with his co-host, Gomez, break down MMA news, pop culture stories, and talk parenting, philosophy, and life in general. Believe You Me is FOR ADULTS ONLY! Follow us on twitter and Instagram: @BYMPod. The newest 15 episodes are always free, but if you want access to all the archives, listen live, chat live, access to the forums, and get the show before it comes out everywhere else - you can subscribe now at gasdigitalnetwork.com and use the code BYM to save 15% on the entire network."
title = "Believe You Me with Michael Bisping"
web_master = None
class TestArt19(unittest.TestCase):
def setUp(self):
self.podcast = Podcast(source)
def test_attrubites(self):
self.assertEqual(self.podcast.source, source)
self.assertEqual(self.podcast.author, author)
self.assertEqual(self.podcast.categories, categories)
self.assertEqual(self.podcast.copyright, _copyright)
self.assertEqual(self.podcast.description, description)
self.assertEqual(self.podcast.explicit, explicit)
self.assertEqual(self.podcast.generator, generator)
self.assertEqual(self.podcast.image_url, image_url)
self.assertEqual(self.podcast.language, language)
self.assertEqual(self.podcast.last_build_date, last_build_date)
self.assertEqual(self.podcast.link, link)
self.assertEqual(self.podcast.managing_editor, managing_editor)
self.assertEqual(self.podcast.new_feed_url, new_feed_url)
self.assertEqual(self.podcast.owner, owner)
self.assertEqual(self.podcast.pub_date, pub_date)
self.assertEqual(self.podcast.subtitle, subtitle)
self.assertEqual(self.podcast.summary, summary)
self.assertEqual(self.podcast.title, title)
self.assertEqual(self.podcast.type, _type)
self.assertEqual(self.podcast.web_master, web_master)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jorritfolmer/nessus2json",
"score": 3
} |
#### File: jorritfolmer/nessus2json/nessus2json.py
```python
import argparse
import json
import lxml.etree
from xmljson import yahoo
def xml2json(tree):
output = []
for node in tree.iter():
if node.tag =='ReportHost':
attr_name = node.xpath('@name')[0]
attr_ip = node.xpath('//HostProperties/tag[@name="host-ip"]')[0].text
attr_rdns = node.xpath('//HostProperties/tag[@name="host-rdns"]')[0].text
for subnode in node.iter():
if subnode.tag == 'ReportItem':
rptitem = yahoo.data(subnode)
rptitem['host_name'] = attr_name
rptitem['host_ip'] = attr_ip
rptitem['host_rdns'] = attr_rdns
output.append(rptitem)
return(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str, help='Filename of Nessus XML export')
args = parser.parse_args()
filename = args.filename
tree = lxml.etree.parse(filename)
output = xml2json(tree)
print("{}".format(json.dumps(output, indent=2)))
``` |
{
"source": "JorritHimself/anu-ecf-2050emissions-pathway-tool",
"score": 3
} |
#### File: JorritHimself/anu-ecf-2050emissions-pathway-tool/dashapp v0944.py
```python
import pandas as pd
import numpy as np
import xlrd # Required dependency for pd.read_excel
import re # for some string manipulation with regex
import ast
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_bootstrap_components as dbc
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
### Import the prepped input data
df_full = pd.read_csv('./db/preppeddata.csv')
### Import the picker settings
df_picker_in = pd.read_excel('./db/inputpickerlist.xlsx', sheet_name="pickersettings")
# Sort sectors with LULUCF at the bottom, others on top
df_full['sectorsorted']=df_full['sector']
df_full.loc[(df_full['sector']=='LULUCF'), 'sectorsorted'] = '0 LULUCF'
df_full.loc[(df_full['sector']=='Electricity generation'), 'sectorsorted'] = '1 Electricity generation'
df_full.loc[(df_full['sector']=='Residential'), 'sectorsorted'] = '2 Residential'
df_full = df_full.sort_values(['sectorsorted', 'year'], ascending=[True, True])
# Define list of states
statelist = ['National', 'ACT', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA']
smallnumberstates = ['ACT', 'NT', 'TAS']
# Define list of picker names: this will be used in a loop to create a dictionary of values to set pickersettinsg for each state
# We dont need pickers names/values for 2031-2050 here as default values are the same for both periods
pickerlist = ['services_emis_picker1930','mining_emis_picker1930','manufacturing_emis_picker1930','gas_water_waste_emis_picker1930','construction_emis_picker1930',
'com_transp_emis_picker1930','agrifor_emis_picker1930','electricity_emis_picker1930','residential_emis_picker1930',
'lulucf_emis_pickerbase', 'lulucf_emis_pickergrow',
'services_valadd_picker','mining_valadd_picker','manufacturing_valadd_picker','gas_water_waste_valadd_picker',
'construction_valadd_picker','com_transp_valadd_picker','agrifor_valadd_picker','electricity_valadd_picker',
'electricity_growth_picker']
# Define list of picker settings: this will be used in a loop to create a dictionary of vlues to set pickersettinsg for each state
pickersettinglist = ['value', 'steps']
# Define list of colors for the sectors
my_discrete_color_map={"LULUCF": '#8C564B',
"Residential": '#FF7F0E',
"Electricity generation":'#D62728',
"Agriculture & Forestry":'#2CA02C',
"Agriculture":'#2CA02C',
"Commercial Transport": '#9467BD',
"Construction": '#1F77B4',
"Gas, Water & Waste Services":'#E377C2',
"Manufacturing":'#BCBD22',
"Mining":'#7F7F7F',
"Services": '#17BECF'}
# Define all the notes
services_emis_note_text = "Emissions for ANZSIC industry divisions DIV F - H, J - S Commercial Services, as reported in the State and Territory Greenhouse Gas Inventories"
mining_emis_note_text = "Emissions for ANZSIC industry division DIV B Mining, as reported in the State and Territory Greenhouse Gas Inventories"
manufacturing_emis_note_text = "Emissions for ANZSIC industry division DIV C Manufacturing, as reported in the State and Territory Greenhouse Gas Inventories"
gas_water_waste_emis_note_text = "Emissions for ANZSIC industry division DIV D Electricity, Gas, Water and Waste Services, as reported in the State and Territory Greenhouse Gas Inventories, minus emissions reported under electricity generation"
construction_emis_note_text = "Emissions for ANZSIC industry division DIV E Construction, as reported in the State and Territory Greenhouse Gas Inventories"
com_transp_emis_note_text = "Emissions for ANZSIC industry division DIV I Transport, Postal and Warehousing, as reported in the State and Territory Greenhouse Gas Inventories"
agriculture_emis_note_text = "Emissions as reported in the National Greenhouse Gas Inventory – UNFCCC classifications - 3 Agriculture"
residential_emis_note_text = "Residential emissions including private transport, as reported in the State and Territory Greenhouse Gas Inventories"
electricity_emis_note_text = "Emissions as reported via NEMSight, or via the Energy Update 2020 for Australia, NT, and WA. Assumed zero for the ACT."
total_emis_note_text = "Total emissions from all the activities listed above"
gross_emis_note_text = "These are the remaining emissions from all economic activities. Small levels of remaining emissions by 2050 can be compensated with LULUCF or negative emission technologies."
lulucf_note_text = "LULUCF is short for land-use, land use change and forestry. Negative emission technologies include e.g., carbon capture and storage (CCS). These processes can extract carbon dioxide from the air and store them in a sink, for example increased vegetation. Data on historical LULUCF levels as reported in the National Greenhouse Gas Inventory – UNFCCC classifications - 4 LULUCF."
lulucf_emis_note_text = "Here you can set the expected baseline LULUCF emissions, as a constant number for the entire period 2019 to 2050."
lulucf_emis_growth_note_text = "Here you can set how rapidly you expect LULUCF and negative emission technologies to grow each year."
net_emis_note_text = "These are the gross emissions plus LULUCF & Negative emisssion technologies. Scientific consensus is that this number needs to get to zero by 2050 in order to limit global warming to 1.5 degrees."
services_valadd_note_text = "Value added for ANZSIC industry division Agriculture, forestry and fishing"
mining_valadd_note_text = "Value added for ANZSIC industry division Mining"
manufacturing_valadd_note_text = "Value added for ANZSIC industry division Manufacturing"
gas_water_waste_valadd_note_text = "Value added for ANZSIC industry sub-divisions 27 Gas supply, 28 Water supply, sewerage and drainage services"
construction_valadd_note_text = "Value added for ANZSIC industry division Construction"
com_transp_valadd_note_text = "Value added for ANZSIC industry division Transport, Postal and Warehousing"
agriculture_valadd_note_text = "Value added for ANZSIC industry division Agriculture, forestry and fishing. Note that emissions reported above are for Agriculture only. For the calculation of emission intensity, the emissions for the Agricultural sector only are divided by the total value added for the three sub-divisions Agriculture, forestry and fishing."
electricity_valadd_note_text = "Value added for ANZSIC industry sub-divisions 26 Electricity supply"
total_valadd_note_text = "Total value added for all sectors listed above"
emis_red_note_text = "Emission reductions are reported here as negative numbers. Positive numbers mean emissions increased compared to 2005 levels."
####################### HTML divs styles for the overall layout ######################
# #rgba(242, 241, 239, 1)
# #f8f9fa
# the style arguments for the header
my_header_style = {
"width": "100%",
"padding": "0 2% 0 2%",
"color": "rgba(0,0,139,1)",
"background-color": "#f8f9fa",
}
# the style arguments for the subheader with the tool explanation
my_subheader_style = {
"width": "100%",
"padding": "0 2% 0 2%",
"background-color": "#f8f9fa",
}
# the style arguments for the header
my_tablist_style = {
"position": "sticky",
"top": 0,
"background-color": "#f8f9fa",
'zIndex': 9999,
}
# the style arguments for the sidebar. Sticky on top: scrolls untill 50px from top
my_left_pane_style = {
"position": "sticky",
"top": 37,
"width": "55%",
"background-color": "#f8f9fa",
}
# the styles for the main content position it to the right of the sidebar and
# add some padding.
my_right_pane_style = {
"position": "relative",
"top": -804,
"margin-left": "53.7%",
"background-color": "#ffffff",
}
# the style that fills the whole screen essentially
my_envelop_style = {
"width": "100%",
"max-width":"1536px",
"margin": "auto",
"background-color": "#f8f9fa"
}
my_background_style = {
"width": "100%",
"background-color": "#ffffff",
"height": "768px",
"position": "sticky",
}
### List of starting figures and other output
# These need to be deifned prior to the app layout
# But will be created during update in the callback, inlcuding layout
## Emissions figure
fig_emissions_total = go.Figure()
## Added value figure
fig_added_value_total = go.Figure()
## Emission intensity figure
fig_emis_int = go.Figure()
## Electricity generation and carbon intensity
# A bit special because of the dual axes
fig_elec_gen_int = make_subplots(specs=[[{"secondary_y": True}]])
## Population and per capita emissions
fig_pop_per_capita = make_subplots(specs=[[{"secondary_y": True}]])
## Emission intesnity index figure
fig_emis_int_index = make_subplots(specs=[[{"secondary_y": False}]])
### Define the app
# Note an additional stylesheet is loaded locally, see assets/bootstrap_modified.css
app = dash.Dash(__name__)
### App layout elements
header = html.Div(style=my_header_style, children=[
html.Div(html.H1('Net-zero 2050 emissions pathway tool for Australia'))
])
subheader = html.Div(style=my_subheader_style, className='no-print', children=[
html.Div(html.H3('With this tool you can develop pathways to reach net-zero emissions for Australia by 2050, a target considered necessary to keep global warming below 1.5 degrees.')),
html.Div(html.H3('In each of the tabs below, you can make such trajectories separately for each State or Territory.')),
html.Div(html.H3('You can make changes to the annual emissions growth, for both the near and long-term for each sector, and see how much closer this gets us to net-zero by 2050.')),
html.Div(html.H3('Note that in the figures, you can click on the name f a sector in the legend to make it disappear from the results, or double click on the name to see the results for that sector only.')),
html.Div(html.H3("For more explanation on how to use this tool, and how it was developed, see the 'About' page.")),
html.Div(html.H3("For more information on ANU's research on energy transitions and long-term emissisons strategies, see the 'Reports' page."),style={"padding-bottom": "0.3rem"}),
html.Div(html.H3('')),
])
tabheader = html.Div(style=my_tablist_style, className='no-print', children=[ # backgroundColor here is for the whole webpage
dbc.Container([
dcc.Tabs(id='tabslist', value='National', children=[
dcc.Tab(label='Australia', value='National'),
dcc.Tab(label='ACT', value='ACT'),
dcc.Tab(label='NSW', value='NSW'),
dcc.Tab(label='NT', value='NT'),
dcc.Tab(label='QLD', value='QLD'),
dcc.Tab(label='SA', value='SA'),
dcc.Tab(label='TAS', value='TAS'),
dcc.Tab(label='VIC', value='VIC'),
dcc.Tab(label='WA', value='WA'),
dcc.Tab(label='About', value='about'),
dcc.Tab(label='Reports', value='reports')
]),
], fluid=True, style={"padding":"0px 0px 0px 0px"}),
])
left_pane_io = html.Div(style=my_left_pane_style, children=[
dbc.Container([
html.Div(id='left-pane-output')
], fluid=True),
])
right_pane_figs = html.Div(style=my_right_pane_style, children=[
dbc.Container([
html.Div(id='right-pane-output')
], fluid=True),
])
### Define the app layout with tabs: content 'right-pane-output' is generated based on the tab selection
#app.layout = html.Div([dcc.Location(id="url"), header, sidebar, content])
app.layout = html.Div([html.Div([header, subheader, tabheader, left_pane_io, right_pane_figs], style=my_envelop_style)], style=my_background_style)
### Define app content based on tab choice.
### The picker value selection is a separate callback, below this block
@app.callback(Output('left-pane-output', 'children'),
[Input('tabslist', 'value')])
def render_sidebar(tab):
if tab in statelist:
## get the rows of dat for this geo: this will be used to dynamically fill the pathway result table
df_select = df_full[(df_full['geo']==tab) & (df_full['year']>=2005) & (df_full['sector']!="Overall")]
## Loop to get the right picker settings for each state and type of picker
df_pickerselect = df_picker_in[(df_picker_in['geo']==tab)]
df_pickerselect = df_pickerselect.set_index('picker')
pickersetting_dict = {}
for pickername in pickerlist:
for pickersetting in pickersettinglist:
pickersetting_dict[pickername + '_' + pickersetting] = df_pickerselect._get_value(pickername, pickersetting)
return html.Div([
dbc.Container([
dbc.Row([
dbc.Col((html.Div(html.H6(' '))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.Strong('Emissions'), style={"line-height": "1"})), width=3),
dbc.Col((html.Div(html.H4(['Annaul emissions growth (Mt CO',html.Sub('2'),'-eq)']))), width=6, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4('Emissions reductions (% vs. 2005)', id='emis_red_note')),dbc.Tooltip(emis_red_note_text, target='emis_red_note',placement='right')), width=7, style={'text-align': 'center'}),
]),
dbc.Row([
dbc.Col((html.Div(html.H5(''))), width=3),
dbc.Col((html.Div(html.H5('Historical'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('Near-term'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('Long-term'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(['and total 2018 & 2050 emissions (Mt CO',html.Sub('2'),'-eq)']))), width=7, style={'text-align': 'center', "margin-top": "-0.3rem"}),
]),
dbc.Row([
dbc.Col((html.Div(html.H5(''))), width=3),
dbc.Col((html.Div(html.H5("2009 - 2018"))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2019 - 2030'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2031 - 2050'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2050 Mt'))), width=1, style={"text-align": "center"}),
]),
dbc.Row([
dbc.Col((html.Div(html.H4('Services', id='services_emis_note')),dbc.Tooltip(services_emis_note_text, target='services_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['services_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='services_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['services_emis_picker1930_value'], step=pickersetting_dict['services_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='services_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['services_emis_picker1930_value'], step=pickersetting_dict['services_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='services_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(23,190,207,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Mining', id='mining_emis_note')),dbc.Tooltip(mining_emis_note_text, target='mining_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['mining_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='mining_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['mining_emis_picker1930_value'], step=pickersetting_dict['mining_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='mining_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['mining_emis_picker1930_value'], step=pickersetting_dict['mining_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='mining_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(127,127,127,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Manufacturing', id='manufacturing_emis_note')),dbc.Tooltip(manufacturing_emis_note_text, target='manufacturing_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['manufacturing_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='manufacturing_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['manufacturing_emis_picker1930_value'], step=pickersetting_dict['manufacturing_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='manufacturing_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['manufacturing_emis_picker1930_value'], step=pickersetting_dict['manufacturing_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(188,189,34,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Gas, water & waste services', id='gas_water_waste_emis_note')),dbc.Tooltip(gas_water_waste_emis_note_text, target='gas_water_waste_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['gas_water_waste_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='gas_water_waste_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['gas_water_waste_emis_picker1930_value'], step=pickersetting_dict['gas_water_waste_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='gas_water_waste_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['gas_water_waste_emis_picker1930_value'], step=pickersetting_dict['gas_water_waste_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(227,119,194,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Construction', id='construction_emis_note')),dbc.Tooltip(construction_emis_note_text, target='construction_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['construction_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='construction_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['construction_emis_picker1930_value'], step=pickersetting_dict['construction_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='construction_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['construction_emis_picker1930_value'], step=pickersetting_dict['construction_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='construction_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(31,119,180,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Commercial transport', id='com_transp_emis_note')),dbc.Tooltip(com_transp_emis_note_text, target='com_transp_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['com_transp_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='com_transp_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['com_transp_emis_picker1930_value'], step=pickersetting_dict['com_transp_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='com_transp_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['com_transp_emis_picker1930_value'], step=pickersetting_dict['com_transp_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='com_transp_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(148,103,189,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Agriculture', id='agriculture_emis_note')),dbc.Tooltip(agriculture_emis_note_text, target='agriculture_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['agrifor_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='agrifor_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['agrifor_emis_picker1930_value'], step=pickersetting_dict['agrifor_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='agrifor_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['agrifor_emis_picker1930_value'], step=pickersetting_dict['agrifor_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='agrifor_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(44,160,44,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Residential', id='residential_emis_note')),dbc.Tooltip(residential_emis_note_text, target='residential_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['residential_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='residential_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['residential_emis_picker1930_value'], step=pickersetting_dict['residential_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='residential_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['residential_emis_picker1930_value'], step=pickersetting_dict['residential_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='residential_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='residential_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='residential_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='residential_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='residential_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='residential_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(255,127,14,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Electricity generation', id='electricity_emis_note')),dbc.Tooltip(electricity_emis_note_text, target='electricity_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['electricity_emis_picker1930_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='electricity_emis_picker1930', type="number", bs_size="sm", value=pickersetting_dict['electricity_emis_picker1930_value'], step=pickersetting_dict['electricity_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='electricity_emis_picker3150', type="number", bs_size="sm", value=pickersetting_dict['electricity_emis_picker1930_value'], step=pickersetting_dict['electricity_emis_picker1930_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='electricity_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(214,39,40,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Total', id='total_emis_note')),dbc.Tooltip(total_emis_note_text, target='total_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4(id='total_emisred_Mt_hist'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_Mt_1930'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_Mt_3150'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2018copy'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emisred_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2050copy'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(31, 119, 180, 0.8)"}),
dbc.Row([
dbc.Col((html.Div(html.H6(' '))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Gross emissions', id='gross_emis_note')),dbc.Tooltip(gross_emis_note_text, target='gross_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gross_emis_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(31, 119, 180, 0.6)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('LULUCF & Negative emission technologies', id='lulucf_note')),dbc.Tooltip(lulucf_note_text, target='lulucf_note',placement='right')), width=8),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='LULUCF_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='LULUCF_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='LULUCF_2040'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='LULUCF_2050'))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(140,86,75,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Baseline (Mt/y)', id='lulucf_base_note')),dbc.Tooltip(lulucf_emis_note_text, target='lulucf_base_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['lulucf_emis_pickerbase_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='lulucf_emis_pickerbase1930', type="number", bs_size="sm", value=0, step=pickersetting_dict['lulucf_emis_pickergrow_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='lulucf_emis_pickerbase3150', type="number", bs_size="sm", value=0, step=pickersetting_dict['lulucf_emis_pickergrow_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(140,86,75,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Annual growth (Mt/y)', id='lulucf_growth_note')),dbc.Tooltip(lulucf_emis_growth_note_text, target='lulucf_growth_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['lulucf_emis_pickergrow_value'],' Mt']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='lulucf_emis_pickergrow1930', type="number", bs_size="sm", value=0, step=pickersetting_dict['lulucf_emis_pickergrow_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='lulucf_emis_pickergrow3150', type="number", bs_size="sm", value=0, step=pickersetting_dict['lulucf_emis_pickergrow_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(140,86,75,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4(html.Strong('Net emissions', id='net_emis_note'))),dbc.Tooltip(net_emis_note_text, target='net_emis_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emis_2018')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emis_2030')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emis_2040')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emis_2050')))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(31, 119, 180, 0.8)"}),
dbc.Row([
dbc.Col((html.Div(html.H4(html.Strong('Net emission reductions (% vs. 2005)', id='net_emis_note'))),dbc.Tooltip(net_emis_note_text, target='net_emis_note',placement='right')), width=8),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emisred_2018')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emisred_2030')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emisred_2040')))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(html.Strong(id='net_emisred_2050')))), width=1, style={"text-align": "center"}),
],style={"background-color": "rgba(31, 119, 180, 0.8)"}),
dbc.Row([
dbc.Col((html.Div(html.H4(''))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.H4(''))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.Strong('Emission intensity'))), width=3),
dbc.Col((html.Div(html.H4('Electricity generation growth (%/y)'))), width=6, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(['Carbon intensity of electricity generation (g CO',html.Sub('2'),'/kWh)']))), width=7, style={"text-align": "center"}),
]),
dbc.Row([
dbc.Col((html.Div(html.H5(''))), width=3),
dbc.Col((html.Div(html.H5("2009 - 2018"))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2019 - 2030'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2031 - 2050'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2018'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2030'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2040'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2050'))), width=5, style={"text-align": "center"}),
]),
dbc.Row([
dbc.Col((html.Div(html.H4('Electricity generation'))), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['electricity_growth_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='electricity_growth_picker', type="number", bs_size="sm", value=pickersetting_dict['electricity_growth_picker_value'], step=pickersetting_dict['electricity_growth_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(dbc.Input(id='electricity_growth_picker', type="number", bs_size="sm", value=pickersetting_dict['electricity_growth_picker_value'], step=pickersetting_dict['electricity_growth_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(id='elec_carb_int_2018'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='elec_carb_int_2030'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='elec_carb_int_2040'))), width=5, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='elec_carb_int_2050'))), width=5, style={"text-align": "center"}),
],style={"background-color": "rgba(214,39,40,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4(''))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.H4(''))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
dbc.Col((html.Div(html.Strong(''))), width=3),
dbc.Col((html.Div(html.H4('Value added growth (%/y)'))), width=6),
dbc.Col((html.Div(html.H4('Emission intensity changes (%/y)'))), width=7),
]),
dbc.Row([
dbc.Col((html.Div(html.H5(''))), width=3),
dbc.Col((html.Div(html.H5("2009 - 2018"))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2019 - 2050'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2009 - 2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2019 - 2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H5('2031 - 2050'))), width=1, style={"text-align": "center"}),
]),
dbc.Row([
dbc.Col((html.Div(html.H4('Services', id='services_valadd_note')),dbc.Tooltip(services_valadd_note_text, target='services_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['services_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='services_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['services_valadd_picker_value'], step=pickersetting_dict['services_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='services_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(23,190,207,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Mining', id='mining_valadd_note')),dbc.Tooltip(mining_valadd_note_text, target='mining_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['mining_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='mining_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['mining_valadd_picker_value'], step=pickersetting_dict['mining_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='mining_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(127,127,127,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Manufacturing', id='manufacturing_valadd_note')),dbc.Tooltip(manufacturing_valadd_note_text, target='manufacturing_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['manufacturing_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='manufacturing_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['manufacturing_valadd_picker_value'], step=pickersetting_dict['manufacturing_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='manufacturing_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(188,189,34,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Gas, water & waste services', id='gas_water_waste_valadd_note')),dbc.Tooltip(gas_water_waste_valadd_note_text, target='gas_water_waste_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['gas_water_waste_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='gas_water_waste_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['gas_water_waste_valadd_picker_value'], step=pickersetting_dict['gas_water_waste_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='gas_water_waste_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(227,119,194,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Construction', id='construction_valadd_note')),dbc.Tooltip(construction_valadd_note_text, target='construction_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['construction_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='construction_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['construction_valadd_picker_value'], step=pickersetting_dict['construction_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='construction_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(31,119,180,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Commercial transport', id='com_transp_valadd_note')),dbc.Tooltip(com_transp_valadd_note_text, target='com_transp_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['com_transp_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='com_transp_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['com_transp_valadd_picker_value'], step=pickersetting_dict['com_transp_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='com_transp_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(148,103,189,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Agriculture & Forestry', id='agrifor_valadd_note')),dbc.Tooltip(agriculture_valadd_note_text, target='agrifor_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['agrifor_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='agrifor_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['agrifor_valadd_picker_value'], step=pickersetting_dict['agrifor_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2),
dbc.Col((html.Div(html.H4(id='agrifor_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='agrifor_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(44,160,44,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Electricity generation', id='electricity_valadd_note')),dbc.Tooltip(electricity_valadd_note_text, target='electricity_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4([pickersetting_dict['electricity_valadd_picker_value'], '%']))), width=1, style={'text-align': 'center'}),
dbc.Col((html.Div(dbc.Input(id='electricity_valadd_picker', type="number", bs_size="sm", value=pickersetting_dict['electricity_valadd_picker_value'], step=pickersetting_dict['electricity_valadd_picker_steps']))), width=2, style={"text-align": "center", "padding-top":"0.15rem"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisint_red_2018'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisint_red_2030'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='electricity_emisint_red_2050'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(214,39,40,0.5)"}),
dbc.Row([
dbc.Col((html.Div(html.H4('Total', id='total_valadd_note')),dbc.Tooltip(total_valadd_note_text, target='total_valadd_note',placement='right')), width=3),
dbc.Col((html.Div(html.H4(id='total_val_add_hist'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_val_add_1950'))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=2, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emis_int_red_hist'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emis_int_red_1930'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(id='total_emis_int_red_3150'))), width=1, style={"text-align": "center"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
dbc.Col((html.Div(html.H4(''))), width=1, style={"background-color": "#f8f9fa"}),
],style={"background-color": "rgba(31, 119, 180, 0.8)"}),
dbc.Row([
dbc.Col((html.Div(html.H6(' '))), width=12),
],style={"background-color": "#f8f9fa"}),
dbc.Row([
html.Div(html.H3('This tool is provided by the Centre for Climate and Energy Policy, Crawford School of Public Policy, The Australian National University, with funding support by the 2050 Pathways Platform. Contact for queries and comments: <EMAIL>')),
]),
dbc.Row([
html.Div(html.H3('Emission pathway tool concept and data compilation by <NAME>, <NAME>, <NAME>, <NAME>.')),
]),
dbc.Row([
html.Div(html.H3('Website & web application in Python/Dash/Plotly by <NAME>.')),
]),
], fluid=True, style={"padding": "5px 20px 20px 20px", 'backgroundColor': 'f8f9fa', "position": "sticky", "top": 60, 'zIndex': 9999}), ### This is for padding aroudn the entire app: fill the entire screen, but keep padding top right bottom left at x pixels
])
elif tab == 'about':
return html.Div([
html.H2('About this tool'),
html.H3('This tool is provided by the Centre for Climate and Energy Policy, Crawford School of Public Policy, The Australian National University, with funding support by the 2050 Pathways Platform.'),
html.H3('Contact for queries and comments: <EMAIL>'),
html.H3('Emission pathway tool concept and data compilation by <NAME>, <NAME>, <NAME>, <NAME>.'),
html.H3(['This website & web application was built in Python/Dash/Plotly by <NAME>. Source code here: ', html.A("(link)", href='https://github.com/JorritHimself/Australian-Emission-Model-Dash', target="_blank"), '.']),
html.Div(html.H2('How to use this tool'),style={"margin-top": "1.5rem"}),
html.H3('Explanation todo'),
html.H3(['For a brief tutorial see this youtube clip here (actually also todo): ', html.A("(link)", href='http://youtube.com', target="_blank"), '.']),
html.Div(html.H2('Sources and methodological notes'),style={"margin-top": "1.5rem"}),
html.H2('Data used in this tool'),
html.H3(['The complete set of data as used in this tool can be downloaded here: ', html.A("(link)", href='http://aus2050emis.org/assets/ANU%20Australian%20emissions%20pathway%20tool%20-%20input%20data.xlsx', target="_blank"), '.']),
html.H2('Emissions'),
html.H3(['The primary source of emissions data is the State and Territory Greenhouse Gas Inventories, Department of Industry, Science, Energy and Resources: ', html.A("(link)", href='https://www.industry.gov.au/data-and-publications/state-and-territory-greenhouse-gas-inventories', target="_blank"), '.']),
html.H3(['For emmissions from agiculture and LULUCF, we used data from the National Greenhouse Gas Inventory – UNFCCC classifications, also provided by the Department of Industry, Science, Energy and Resources : ', html.A("(link)", href='https://ageis.climatechange.gov.au/UNFCCC.aspx', target="_blank"), '.']),
html.H3('For the sub-national level, the State and Territory Greenhouse Gas Inventories does not split out emmissions from electricity generation versus gas, water, & waste services. We divided the two by subtracting emissions from electricity generation, as determined below.'),
html.H3('For emmissions from electricity generation, for the national level, we used the numbers from the State and Territory Greenhouse Gas Inventories, Department of Industry, Science, Energy and Resources'),
html.H3(['For emmissions from electricity generation, for NSW, QLD, SA, TAS, VIC, we used AEMO data, via NEMSight: ', html.A("(link)", href='http://analytics.com.au/energy-analysis/nemsight-trading-tool', target="_blank"), '.']),
html.H3(['Emmissions from electricity generation for NT and WA are estimated by using emission intensity as reported in the Electricity sector emissions and generation data, Clean Energy Regulator: ', html.A("(link)", href='http://www.cleanenergyregulator.gov.au/NGER/National%20greenhouse%20and%20energy%20reporting%20data/electricity-sector-emissions-and-generation-data', target="_blank"), '. These emission intensity numbers were then multiplied with electricity generation numbers from the Energy Update 2020, Table O: ', html.A("(link)", href='https://www.energy.gov.au/publications/australian-energy-update-2020', target="_blank"), '.']),
html.H3('We chose to use the electricity emissions (for NT and WA) from the Energy Update rather than from the Clean Energy Regulator, as 1) the former has longer time-series, and 2) the emissions data for electricity generation from the Clean energy regulator in some years are larger than the total emissions reported for electricity, gas, water, and waste services combined, as reported in the State and Territory Greenhouse Gas Inventories.'),
html.H3('The emissions intensity of electricity generation for NT and WA for the period 2005-2013 is presumed equal to the intensity reported in 2014, the earliest year for which this data is available.'),
html.H3('Emmissions from electricity generation for the ACT are set to zero. The emissions are instead shown as part of NSW emissions. The ACT is part of the NSW region in the National Electricity Market. The ACT has renewable energy generation under contract that equates to the total electricity use in the ACT, and thus claims zero emissions from electricity generation for the ACT.'),
html.H2('Value added'),
html.H3(['Data used is the Industry Added Value by industry division or industry subdivision, provided by the Australian Bureau of Statistics: ', html.A("(link)", href='https://www.abs.gov.au/AUSSTATS/[email protected]/DetailsPage/8155.02017-18?OpenDocument', target="_blank"), '.']),
html.H3('Industry data was reported in financial years and has been averaged to convert to calender years.'),
html.H3("For the State level, the industry division 'Electricity, Gas, Water & Waste Services' was split into 'Electricity generation' and 'Gas, Water & Waste Services' by using the same percentage split between the two reported for the national level."),
html.H2('Inflation'),
html.H3(['To recalculate value added as 2019 AUD, we used the RBA Inflation calculator: ', html.A("(link)", href='https://www.rba.gov.au/calculator/', target="_blank"), '.']),
html.H2('Population'),
html.H3(['Population statistics via the Australian Bureau of Statistics: ', html.A("(link)", href='https://www.abs.gov.au/statistics/people/population/historical-population/latest-release', target="_blank"), '.']),
html.Div(html.H3(['For future population numbers, we used the Series B projections from the Australian Bureau of Statistics: ', html.A("(link)", href='https://www.abs.gov.au/statistics/people/population/population-projections-australia/latest-release', target="_blank"), '.']),style={"padding-bottom": "10rem"}),
])
elif tab == 'reports':
return html.Div([
html.H3('Here will be some other reports and links to the CCEP website etc')
])
### Define app content based on tab choice.
### The picker value selection is a separate callback, below this block
@app.callback(Output('right-pane-output', 'children'),
[Input('tabslist', 'value')])
def render_content(tab):
if tab in statelist:
return html.Div([
dbc.Container([
dbc.Row([html.Div(dcc.Graph(id='emissions_total', figure = fig_emissions_total))]),
dbc.Row([html.Div(dcc.Graph(id='emis_int_index', figure = fig_emis_int_index))]),
dbc.Row([html.Div(dcc.Graph(id='elec_gen_int', figure = fig_elec_gen_int))]),
dbc.Row([html.Div(dcc.Graph(id='pop_per_capita_emis', figure = fig_pop_per_capita))]),
dbc.Row([html.Div(dcc.Graph(id='value_added_total', figure = fig_added_value_total))]),
dbc.Row([html.Div(dcc.Graph(id='emis_int', figure = fig_emis_int))]),
]),
])
### Use picker input to update the figures and table contents all in one callback
@app.callback(
[Output('emissions_total', 'figure'),
Output('value_added_total', 'figure'),
Output('emis_int', 'figure'),
Output('elec_gen_int', 'figure'),
Output('pop_per_capita_emis', 'figure'),
Output('emis_int_index', 'figure'),
Output('services_emisred_2018', 'children'),
Output('services_emisred_2030', 'children'),
Output('services_emisred_2040', 'children'),
Output('services_emisred_2050', 'children'),
Output('mining_emisred_2018', 'children'),
Output('mining_emisred_2030', 'children'),
Output('mining_emisred_2040', 'children'),
Output('mining_emisred_2050', 'children'),
Output('manufacturing_emisred_2018', 'children'),
Output('manufacturing_emisred_2030', 'children'),
Output('manufacturing_emisred_2040', 'children'),
Output('manufacturing_emisred_2050', 'children'),
Output('gas_water_waste_emisred_2018', 'children'),
Output('gas_water_waste_emisred_2030', 'children'),
Output('gas_water_waste_emisred_2040', 'children'),
Output('gas_water_waste_emisred_2050', 'children'),
Output('construction_emisred_2018', 'children'),
Output('construction_emisred_2030', 'children'),
Output('construction_emisred_2040', 'children'),
Output('construction_emisred_2050', 'children'),
Output('com_transp_emisred_2018', 'children'),
Output('com_transp_emisred_2030', 'children'),
Output('com_transp_emisred_2040', 'children'),
Output('com_transp_emisred_2050', 'children'),
Output('agrifor_emisred_2018', 'children'),
Output('agrifor_emisred_2030', 'children'),
Output('agrifor_emisred_2040', 'children'),
Output('agrifor_emisred_2050', 'children'),
Output('residential_emisred_2018', 'children'),
Output('residential_emisred_2030', 'children'),
Output('residential_emisred_2040', 'children'),
Output('residential_emisred_2050', 'children'),
Output('electricity_emisred_2018', 'children'),
Output('electricity_emisred_2030', 'children'),
Output('electricity_emisred_2040', 'children'),
Output('electricity_emisred_2050', 'children'),
Output('services_emis_2018', 'children'),
Output('mining_emis_2018', 'children'),
Output('manufacturing_emis_2018', 'children'),
Output('gas_water_waste_emis_2018', 'children'),
Output('construction_emis_2018', 'children'),
Output('com_transp_emis_2018', 'children'),
Output('agrifor_emis_2018', 'children'),
Output('residential_emis_2018', 'children'),
Output('electricity_emis_2018', 'children'),
Output('services_emis_2050', 'children'),
Output('mining_emis_2050', 'children'),
Output('manufacturing_emis_2050', 'children'),
Output('gas_water_waste_emis_2050', 'children'),
Output('construction_emis_2050', 'children'),
Output('com_transp_emis_2050', 'children'),
Output('agrifor_emis_2050', 'children'),
Output('residential_emis_2050', 'children'),
Output('electricity_emis_2050', 'children'),
Output('total_emisred_Mt_hist', 'children'),
Output('total_emisred_Mt_1930', 'children'),
Output('total_emisred_Mt_3150', 'children'),
Output('total_emisred_2018', 'children'),
Output('total_emisred_2030', 'children'),
Output('total_emisred_2040', 'children'),
Output('total_emisred_2050', 'children'),
Output('net_emisred_2018', 'children'),
Output('net_emisred_2030', 'children'),
Output('net_emisred_2040', 'children'),
Output('net_emisred_2050', 'children'),
Output('gross_emis_2018', 'children'),
Output('gross_emis_2030', 'children'),
Output('gross_emis_2040', 'children'),
Output('gross_emis_2050', 'children'),
Output('gross_emis_2018copy', 'children'),
Output('gross_emis_2050copy', 'children'),
Output('LULUCF_2018', 'children'),
Output('LULUCF_2030', 'children'),
Output('LULUCF_2040', 'children'),
Output('LULUCF_2050', 'children'),
Output('net_emis_2018', 'children'),
Output('net_emis_2030', 'children'),
Output('net_emis_2040', 'children'),
Output('net_emis_2050', 'children'),
Output('total_val_add_hist', 'children'),
Output('total_val_add_1950', 'children'),
Output('elec_carb_int_2018', 'children'),
Output('elec_carb_int_2030', 'children'),
Output('elec_carb_int_2040', 'children'),
Output('elec_carb_int_2050', 'children'),
Output('services_emisint_red_2018', 'children'),
Output('services_emisint_red_2030', 'children'),
Output('services_emisint_red_2050', 'children'),
Output('mining_emisint_red_2018', 'children'),
Output('mining_emisint_red_2030', 'children'),
Output('mining_emisint_red_2050', 'children'),
Output('manufacturing_emisint_red_2018', 'children'),
Output('manufacturing_emisint_red_2030', 'children'),
Output('manufacturing_emisint_red_2050', 'children'),
Output('gas_water_waste_emisint_red_2018', 'children'),
Output('gas_water_waste_emisint_red_2030', 'children'),
Output('gas_water_waste_emisint_red_2050', 'children'),
Output('construction_emisint_red_2018', 'children'),
Output('construction_emisint_red_2030', 'children'),
Output('construction_emisint_red_2050', 'children'),
Output('com_transp_emisint_red_2018', 'children'),
Output('com_transp_emisint_red_2030', 'children'),
Output('com_transp_emisint_red_2050', 'children'),
Output('agrifor_emisint_red_2018', 'children'),
Output('agrifor_emisint_red_2030', 'children'),
Output('agrifor_emisint_red_2050', 'children'),
Output('electricity_emisint_red_2018', 'children'),
Output('electricity_emisint_red_2030', 'children'),
Output('electricity_emisint_red_2050', 'children'),
Output('total_emis_int_red_hist', 'children'),
Output('total_emis_int_red_1930', 'children'),
Output('total_emis_int_red_3150', 'children'),
],
[Input('agrifor_emis_picker1930', 'value'),
Input('com_transp_emis_picker1930', 'value'),
Input('construction_emis_picker1930', 'value'),
Input('electricity_emis_picker1930', 'value'),
Input('gas_water_waste_emis_picker1930', 'value'),
Input('manufacturing_emis_picker1930', 'value'),
Input('mining_emis_picker1930', 'value'),
Input('residential_emis_picker1930', 'value'),
Input('services_emis_picker1930', 'value'),
Input('agrifor_emis_picker3150', 'value'),
Input('com_transp_emis_picker3150', 'value'),
Input('construction_emis_picker3150', 'value'),
Input('electricity_emis_picker3150', 'value'),
Input('gas_water_waste_emis_picker3150', 'value'),
Input('manufacturing_emis_picker3150', 'value'),
Input('mining_emis_picker3150', 'value'),
Input('residential_emis_picker3150', 'value'),
Input('services_emis_picker3150', 'value'),
Input('agrifor_valadd_picker', 'value'),
Input('com_transp_valadd_picker', 'value'),
Input('construction_valadd_picker', 'value'),
Input('electricity_valadd_picker', 'value'),
Input('gas_water_waste_valadd_picker', 'value'),
Input('manufacturing_valadd_picker', 'value'),
Input('mining_valadd_picker', 'value'),
Input('services_valadd_picker', 'value'),
Input('lulucf_emis_pickerbase1930', 'value'),
Input('lulucf_emis_pickerbase3150', 'value'),
Input('lulucf_emis_pickergrow1930', 'value'),
Input('lulucf_emis_pickergrow3150', 'value'),
Input('electricity_growth_picker', 'value'),
Input('tabslist', 'value')
]
)
def update_all_outputs(agrifor_emis_trend1930, com_transp_emis_trend1930, construction_emis_trend1930, electricity_emis_trend1930,
gas_water_waste_emis_trend1930, manufacturing_emis_trend1930, mining_emis_trend1930, residential_emis_trend1930, services_emis_trend1930,
agrifor_emis_trend3150, com_transp_emis_trend3150, construction_emis_trend3150, electricity_emis_trend3150,
gas_water_waste_emis_trend3150, manufacturing_emis_trend3150, mining_emis_trend3150, residential_emis_trend3150, services_emis_trend3150,
agrifor_valadd_trend,com_transp_valadd_trend,construction_valadd_trend, electricity_valadd_trend,
gas_water_waste_valadd_trend, manufacturing_valadd_trend, mining_valadd_trend, services_valadd_trend,
lulucf_emis_base1930, lulucf_emis_base3150, lulucf_emis_growth1930, lulucf_emis_growth3150, electricity_growth_trend, tab):
df_select = df_full[(df_full['geo']==tab) & (df_full['year']>=2005) & (df_full['sector']!="Overall")]
### ### Emissions output per sector:
## First line: intermediate result: emissions levels in 2018 ('finaly'), + number of years since 2018 *annual emission growth 2019-2030
## Second line: emissions can not be less then zero, so we fix that here, also because totals for 2030 would be calculated on this value
## Third line: move corrected intermediate result to output
## Fourth line: If emissions in 2030 are already zero, we only add annual increases in all years since 2030
## Fifth line: If emissions in 2030 are not yet zero, we take the level in 2030 + number of years since 2030 *annual emission growth 2031-2050
## Sixth line again is to prevent emissions in each sector to go negative. Exception is LULUCF, this can go negative
### 'Agriculture & Forestry' emmissions:
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+agrifor_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+agrifor_emis_trend1930*12
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = agrifor_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + agrifor_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Commercial Transport' emmissions:
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+com_transp_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+com_transp_emis_trend1930*12
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = com_transp_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + com_transp_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Construction' emmissions:
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+construction_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+construction_emis_trend1930*12
df_select.loc[(df_select['sector']=='Construction') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = construction_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + construction_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Construction') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Electricity generation' emmissions:
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+electricity_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+electricity_emis_trend1930*12
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = electricity_emis_trend3150 * (df_select['yrs_since_final_obs'] - 12)
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + electricity_emis_trend3150 * (df_select['yrs_since_final_obs'] - 12)
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Gas, Water & Waste Services' emmissions:
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly'] + gas_water_waste_emis_trend1930 * df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+gas_water_waste_emis_trend1930 * 12
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = gas_water_waste_emis_trend3150 * (df_select['yrs_since_final_obs'] - 12)
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + gas_water_waste_emis_trend3150 * (df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Manufacturing' emmissions:
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly'] + manufacturing_emis_trend1930 * df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly'] + manufacturing_emis_trend1930 * 12
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = manufacturing_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + manufacturing_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### Mining' emmissions:
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+mining_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly'] + mining_emis_trend1930*12
df_select.loc[(df_select['sector']=='Mining') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = mining_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + mining_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Mining') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Residential' emmissions:
df_select.loc[(df_select['sector']=='Residential') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+residential_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Residential') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+residential_emis_trend1930*12
df_select.loc[(df_select['sector']=='Residential') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Residential') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Residential') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = residential_emis_trend3150 * (df_select['yrs_since_final_obs'] - 12)
df_select.loc[(df_select['sector']=='Residential') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + residential_emis_trend3150 * (df_select['yrs_since_final_obs'] - 12)
df_select.loc[(df_select['sector']=='Residential') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### 'Services' emmissions:
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>0) & (df_select['yrs_since_final_obs']),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+services_emis_trend1930*df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_base2030'] = df_select['emissions_MtCo2_finaly']+services_emis_trend1930*12
df_select.loc[(df_select['sector']=='Services') & (df_select['emissions_MtCo2_base2030']<0), 'emissions_MtCo2_base2030'] = 0
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030']
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']<=0), 'emissions_MtCo2_output'] = services_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>12) & (df_select['emissions_MtCo2_base2030']>0), 'emissions_MtCo2_output'] = df_select['emissions_MtCo2_base2030'] + services_emis_trend3150*(df_select['yrs_since_final_obs']-12)
df_select.loc[(df_select['sector']=='Services') & (df_select['emissions_MtCo2_output']<0), 'emissions_MtCo2_output'] = 0
### LULUCF emissions:
### Note again these may go negative. Also input and calculation is much more straightfoward: annual level plus annual growth*years since last obs
df_select.loc[(df_select['sector']=='LULUCF') & (df_select['yrs_since_final_obs']>0),'emissions_MtCo2_output'] = lulucf_emis_base1930 + lulucf_emis_growth1930 * df_select['yrs_since_final_obs']
df_select.loc[(df_select['sector']=='LULUCF') & (df_select['yrs_since_final_obs']>12),'emissions_MtCo2_output'] = lulucf_emis_base3150 + lulucf_emis_growth3150 * (df_select['yrs_since_final_obs'] - 12)
### ### Value added: only since 2009
### For value added trends, we need picker 1 plus picker value
df_select.loc[(df_select['sector']=='Services') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly'] * np.power((1+(services_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Mining') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(mining_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(manufacturing_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(gas_water_waste_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Construction') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(construction_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(com_transp_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(agrifor_valadd_trend/100)),df_select['yrs_since_final_obs'])
df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['yrs_since_final_obs']>0),'ind_val_add_output'] = df_select['ind_val_add_2019_bln_finaly']*np.power((1+(electricity_valadd_trend/100)),df_select['yrs_since_final_obs'])
### ### Emission intensity calculation, also only since 2009:
df_select['emis_int_outp']=df_select['emissions_MtCo2_output']/df_select['ind_val_add_output']
df_select_emis_int = df_select[df_select.sector != 'LULUCF']
df_select_emis_int = df_select_emis_int[df_select_emis_int.sector != 'Residential']
df_select_emis_int = df_select_emis_int[df_select_emis_int.year>= 2009]
### ### Electricity generation and emission intensity dynamically based on picker input
df_select_elec = df_select[(df_select['sector']=="Electricity generation")]
### Calculate growth of electricity output in GWh (same growth as with value added):
df_select_elec.loc[(df_select_elec['yrs_since_final_obs']>0),'elec_gen_GWh_output'] = df_select_elec['elec_gen_GWh_finaly']*np.power((1+(electricity_growth_trend/100)),df_select_elec['yrs_since_final_obs'])
### Calculate emission intensity (note emissions have been calculated above):
df_select_elec['elec_carb_int_outp']=1000 * df_select_elec['emissions_MtCo2_output'] / df_select_elec['elec_gen_GWh_output']
### Roudn the carbon intentsity
df_select_elec['elec_carb_int_outp']=round(df_select_elec['elec_carb_int_outp'],2)
df_select_elec['elec_gen_GWh_output']=round(df_select_elec['elec_gen_GWh_output'],0)
### ### Round numbers to be displayed in graphs and tables
df_select['emissions_MtCo2_output_orig_decimals'] = df_select['emissions_MtCo2_output']
if tab in smallnumberstates:
df_select['emissions_MtCo2_output']=round(df_select['emissions_MtCo2_output'],2)
else:
df_select['emissions_MtCo2_output']=round(df_select['emissions_MtCo2_output'],1)
### # Other stuff sournded equally
df_select['ind_val_add_output']=round(df_select['ind_val_add_output'],1)
df_select_emis_int['emis_int_outp']=round(df_select['emis_int_outp'],2)
df_select['elec_carb_int_outp']=round(df_select['elec_carb_int_outp'],2)
df_select['elec_gen_GWh_output']=round(df_select['elec_gen_GWh_output'],0)
### ### Calculate the emission reductions
df_select['emis_reduc']= -100 * (1 - (df_select['emissions_MtCo2_output'] / df_select['emissions_MtCo2_baseyear']))
### ### Sensible decimal numbers for the emission reductions, and add percentage symbol here
df_select['emis_reduc']=round(df_select['emis_reduc'],1)
df_select['emis_reduc']=df_select['emis_reduc'].apply(str)
df_select['emis_reduc']=df_select['emis_reduc'] + '%'
### ### Define emissions total figure with dynamic input
# Temp rename agri sector
df_select['sector'] = df_select['sector'].str.replace(re.escape('Agriculture & Forestry'),'Agriculture')
fig_emissions_total = px.area(df_select, x="year", y="emissions_MtCo2_output", color="sector",
color_discrete_map=my_discrete_color_map,
labels={"year": "", "emissions_MtCo2_output": "CO<sub>2</sub> Emissions (Mt CO<sub>2</sub>-eq/y)"},
title="CO<sub>2</sub> Emissions by sector",
width=695, height=375)
fig_emissions_total.update_layout(transition_duration=350,
template="plotly_white",
legend_traceorder="reversed",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0))
fig_emissions_total.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_emissions_total.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
# Rename agri sector again
df_select['sector'] = df_select['sector'].str.replace(re.escape('Agriculture'),'Agriculture & Forestry')
### ### Define value added figure with dynamic input
df_select_val_add = df_select[df_select.sector != 'LULUCF']
df_select_val_add = df_select_val_add[df_select_val_add.sector != 'Residential']
df_select_val_add = df_select_val_add[df_select_val_add.year>= 2009]
fig_added_value_total = px.area(df_select_val_add, x="year", y="ind_val_add_output", color="sector",
color_discrete_map=my_discrete_color_map,
labels={"year": "", "ind_val_add_output": "Value added (billion 2019 AUD)<sub> </sub>"},
title="Value added by sector",
width=700, height=375)
fig_added_value_total.update_layout(transition_duration=350,
template="plotly_white",
legend_traceorder="reversed",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0))
fig_added_value_total.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_added_value_total.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
### ### Emission intensity graph with dynamic input
fig_emis_int = px.line(df_select_emis_int, x="year", y="emis_int_outp", color="sector",
color_discrete_sequence=['#D62728', '#2CA02C', '#9467BD', '#8C564B', '#E377C2', '#BCBD22', '#7F7F7F', '#17BECF'],
labels={"year": "", "emis_int_outp": "Emission intensity (kg CO<sub>2</sub>-eq/2019 AUD)"},
title="Emission intensity by sector",
width=700, height=375)
fig_emis_int.update_layout(template="plotly_white",
legend_traceorder="reversed",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0))
fig_emis_int.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_emis_int.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
### ### Redefine Electricity generation and carbon intensity figure again, but with dynamic input
# Make dictionary for dual y-axis figure
year_dict = df_select_elec['year'].tolist()
gwh_dict = df_select_elec['elec_gen_GWh_output'].tolist()
elec_carb_int_dict = df_select_elec['elec_carb_int_outp'].tolist()
# create df_select_elec
fig_elec_gen_int = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig_elec_gen_int.add_scatter(x=year_dict, y=elec_carb_int_dict, name="Carbon intensity", mode="lines", line=dict(width=2, color="black"), secondary_y=False)
fig_elec_gen_int.add_scatter(x=year_dict, y=gwh_dict, name="Electricity generation", mode="lines", line=dict(width=2, color="rgba(214,39,40,1)"), secondary_y=True)
fig_elec_gen_int.update_layout(template="plotly_white",
legend_traceorder="reversed",
title_text="Electricity generation and carbon intensity",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0),
width=675, height=340)
fig_elec_gen_int.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_elec_gen_int.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
# Set y-axes titles
fig_elec_gen_int.update_yaxes(title_text="Carbon intensity (kg CO<sub>2</sub>-eq/kWh)", secondary_y=False)
fig_elec_gen_int.update_yaxes(title_text="Electricity generation (GWh)<sub> </sub>", secondary_y=True)
# y-axis range
max_elec_gen_int = max(elec_carb_int_dict) + 0.2
fig_elec_gen_int.update_layout(yaxis=dict(range=[0,max_elec_gen_int]))
### ### Update all the pathway result outputs
# Emission reduction: services
services_emisred_2018 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2018),'emis_reduc']
services_emisred_2030 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2030),'emis_reduc']
services_emisred_2040 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2040),'emis_reduc']
services_emisred_2050 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: mining
mining_emisred_2018 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2018),'emis_reduc']
mining_emisred_2030 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2030),'emis_reduc']
mining_emisred_2040 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2040),'emis_reduc']
mining_emisred_2050 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: manufacturing
manufacturing_emisred_2018 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2018),'emis_reduc']
manufacturing_emisred_2030 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2030),'emis_reduc']
manufacturing_emisred_2040 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2040),'emis_reduc']
manufacturing_emisred_2050 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Gas, water & waste
gas_water_waste_emisred_2018 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2018),'emis_reduc']
gas_water_waste_emisred_2030 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2030),'emis_reduc']
gas_water_waste_emisred_2040 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2040),'emis_reduc']
gas_water_waste_emisred_2050 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Construction
construction_emisred_2018 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2018),'emis_reduc']
construction_emisred_2030 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2030),'emis_reduc']
construction_emisred_2040 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2040),'emis_reduc']
construction_emisred_2050 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Commercial transport
com_transp_emisred_2018 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2018),'emis_reduc']
com_transp_emisred_2030 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2030),'emis_reduc']
com_transp_emisred_2040 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2040),'emis_reduc']
com_transp_emisred_2050 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Agriculture & Forestry
agrifor_emisred_2018 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2018),'emis_reduc']
agrifor_emisred_2030 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2030),'emis_reduc']
agrifor_emisred_2040 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2040),'emis_reduc']
agrifor_emisred_2050 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Residential
residential_emisred_2018 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2018),'emis_reduc']
residential_emisred_2030 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2030),'emis_reduc']
residential_emisred_2040 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2040),'emis_reduc']
residential_emisred_2050 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2050),'emis_reduc']
# Emission reduction: Electricity
electricity_emisred_2018 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2018),'emis_reduc']
electricity_emisred_2030 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2030),'emis_reduc']
electricity_emisred_2040 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2040),'emis_reduc']
electricity_emisred_2050 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2050),'emis_reduc']
### # 2018 and 2050 emissions in Mt by sector
# For layout with Mt
df_select['emissions_MtCo2_output_Mt'] = df_select['emissions_MtCo2_output'].apply(str)
df_select['emissions_MtCo2_output_Mt'] = df_select['emissions_MtCo2_output_Mt'] + ' Mt'
services_emis_2018 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
mining_emis_2018 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
manufacturing_emis_2018 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
gas_water_waste_emis_2018 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
construction_emis_2018 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
com_transp_emis_2018 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
agrifor_emis_2018 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
residential_emis_2018 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
electricity_emis_2018 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
services_emis_2050 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
mining_emis_2050 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
manufacturing_emis_2050 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
gas_water_waste_emis_2050 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
construction_emis_2050 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
com_transp_emis_2050 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
agrifor_emis_2050 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
residential_emis_2050 = df_select.loc[(df_select['sector']=='Residential') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
electricity_emis_2050 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
### # Total emissions and emissions reductions
# Net emission reductions %
df_select_netpc = df_select
df_select_netpc = df_select_netpc.groupby(['geo', 'year'], as_index=False).agg({'emissions_MtCo2_output':'sum',
'emissions_MtCo2_output_orig_decimals': 'sum',
'ind_val_add_output': 'sum',
'emissions_MtCo2_baseyear': 'sum'})
df_select_netpc['emis_reduc']= -100 * (1 - (df_select_netpc['emissions_MtCo2_output'] / df_select_netpc['emissions_MtCo2_baseyear']))
# Net emission intensity index
df_select_netpc['emis_int_output'] = df_select_netpc['emissions_MtCo2_output_orig_decimals'] / df_select_netpc['ind_val_add_output']
df_select_netpc['emis_int_baseyear'] = np.where(df_select_netpc['year'] == 2010, df_select_netpc['emis_int_output'], 0)
df_select_netpc['emis_int_baseyear'] = df_select_netpc.emis_int_baseyear.max()
df_select_netpc['emis_int_index'] = 100 * df_select_netpc['emis_int_output'] / df_select_netpc['emis_int_baseyear']
# Gross emission reductions: stick lulucf in separate column. And get rid of LULUCF emissions for calculation of reduction %ages
df_select_summ = df_select
df_select_summ['lulucf'] = df_select_summ['emissions_MtCo2_output']
df_select_summ.loc[df_select_summ['sector'] == 'LULUCF', 'emissions_MtCo2_output'] = 0
df_select_summ.loc[df_select_summ['sector'] == 'LULUCF', 'emissions_MtCo2_output_orig_decimals'] = 0
df_select_summ.loc[df_select_summ['sector'] == 'LULUCF', 'emissions_MtCo2_baseyear'] = 0
df_select_summ.loc[df_select_summ['sector'] != 'LULUCF', 'lulucf'] = 0
# Total emissions, value added, and lulucf
df_select_summ = df_select_summ.groupby(['geo', 'year'], as_index=False).agg({'emissions_MtCo2_output':'sum',
'emissions_MtCo2_output_orig_decimals': 'sum',
'ind_val_add_output': 'sum',
'emissions_MtCo2_baseyear': 'sum',
'lulucf': 'sum',
'population': 'min'})
df_select_summ['emis_reduc']= -100 * (1 - (df_select_summ['emissions_MtCo2_output'] / df_select_summ['emissions_MtCo2_baseyear']))
df_select_summ['net_emis']=df_select_summ['emissions_MtCo2_output'] + df_select_summ['lulucf']
### ### Emission reductiosn Mt
# For 2009-2018
df_select_summ['emissions_MtCo2_output_lag10'] = df_select_summ['emissions_MtCo2_output_orig_decimals'].shift(9)
df_select_summ['avg_annu_emis_grow_Mt'] = 0.1 * (df_select_summ['emissions_MtCo2_output_orig_decimals'] - df_select_summ['emissions_MtCo2_output_lag10'])
if tab in smallnumberstates:
df_select_summ['avg_annu_emis_grow_Mt']=round(df_select_summ['avg_annu_emis_grow_Mt'], 3)
else:
df_select_summ['avg_annu_emis_grow_Mt']=round(df_select_summ['avg_annu_emis_grow_Mt'], 2)
df_select_summ['avg_annu_emis_grow_Mt'] = df_select_summ['avg_annu_emis_grow_Mt'].apply(str)
df_select_summ['avg_annu_emis_grow_Mt'] = df_select_summ['avg_annu_emis_grow_Mt'] + ' Mt'
total_emisred_Mt_hist = df_select_summ.loc[df_select_summ['year'] == 2018, 'avg_annu_emis_grow_Mt']
# For 2019-2030 & 2031-2050
df_select_summ['emissions_MtCo2_output_lag1'] = df_select_summ['emissions_MtCo2_output_orig_decimals'].shift(1)
df_select_summ['annu_emis_grow_Mt'] = df_select_summ['emissions_MtCo2_output_orig_decimals'] - df_select_summ['emissions_MtCo2_output_lag1']
if tab in smallnumberstates:
df_select_summ['annu_emis_grow_Mt']=round(df_select_summ['annu_emis_grow_Mt'], 3)
else:
df_select_summ['annu_emis_grow_Mt']=round(df_select_summ['annu_emis_grow_Mt'], 2)
df_select_summ['annu_emis_grow_Mt'] = df_select_summ['annu_emis_grow_Mt'].apply(str)
df_select_summ['annu_emis_grow_Mt'] = df_select_summ['annu_emis_grow_Mt'] + ' Mt'
total_emisred_Mt_1930 = df_select_summ.loc[df_select_summ['year'] == 2019, 'annu_emis_grow_Mt']
total_emisred_Mt_3150 = df_select_summ.loc[df_select_summ['year'] == 2031, 'annu_emis_grow_Mt']
### ### Sensible decimal numbers for the % emission reductions, and add percentage symbol here
df_select_summ['emis_reduc']=round(df_select_summ['emis_reduc'],1)
df_select_summ['emis_reduc']=df_select_summ['emis_reduc'].apply(str)
df_select_summ['emis_reduc']=df_select_summ['emis_reduc'] + '%'
# Net emission version
df_select_netpc['emis_reduc']=round(df_select_netpc['emis_reduc'],1)
df_select_netpc['emis_reduc']=df_select_netpc['emis_reduc'].apply(str)
df_select_netpc['emis_reduc']=df_select_netpc['emis_reduc'] + '%'
### ### Assign variables for emission reductions % total
total_emisred_2018 = df_select_summ.loc[df_select_summ['year'] == 2018, 'emis_reduc']
total_emisred_2030 = df_select_summ.loc[df_select_summ['year'] == 2030, 'emis_reduc']
total_emisred_2040 = df_select_summ.loc[df_select_summ['year'] == 2040, 'emis_reduc']
total_emisred_2050 = df_select_summ.loc[df_select_summ['year'] == 2050, 'emis_reduc']
# Net emission version
net_emisred_2018 = df_select_netpc.loc[df_select_netpc['year'] == 2018, 'emis_reduc']
net_emisred_2030 = df_select_netpc.loc[df_select_netpc['year'] == 2030, 'emis_reduc']
net_emisred_2040 = df_select_netpc.loc[df_select_netpc['year'] == 2040, 'emis_reduc']
net_emisred_2050 = df_select_netpc.loc[df_select_netpc['year'] == 2050, 'emis_reduc']
### # Gross emissions with rounding
if tab in smallnumberstates:
df_select_summ['emissions_MtCo2_output']=round(df_select_summ['emissions_MtCo2_output'], 2)
else:
df_select_summ['emissions_MtCo2_output']=round(df_select_summ['emissions_MtCo2_output'], 1)
df_select_summ['emissions_MtCo2_output_Mt'] = df_select_summ['emissions_MtCo2_output'].apply(str)
df_select_summ['emissions_MtCo2_output_Mt'] = df_select_summ['emissions_MtCo2_output_Mt'] + ' Mt'
gross_emis_2018 = df_select_summ.loc[df_select_summ['year']==2018,'emissions_MtCo2_output_Mt']
gross_emis_2030 = df_select_summ.loc[df_select_summ['year']==2030,'emissions_MtCo2_output_Mt']
gross_emis_2040 = df_select_summ.loc[df_select_summ['year']==2040,'emissions_MtCo2_output_Mt']
gross_emis_2050 = df_select_summ.loc[df_select_summ['year']==2050,'emissions_MtCo2_output_Mt']
gross_emis_2018copy = gross_emis_2018
gross_emis_2050copy = gross_emis_2050
### # LULUCF emissions
LULUCF_2018 = df_select.loc[(df_select['sector']=='LULUCF') & (df_select['year']==2018),'emissions_MtCo2_output_Mt']
LULUCF_2030 = df_select.loc[(df_select['sector']=='LULUCF') & (df_select['year']==2030),'emissions_MtCo2_output_Mt']
LULUCF_2040 = df_select.loc[(df_select['sector']=='LULUCF') & (df_select['year']==2040),'emissions_MtCo2_output_Mt']
LULUCF_2050 = df_select.loc[(df_select['sector']=='LULUCF') & (df_select['year']==2050),'emissions_MtCo2_output_Mt']
### # Net emissions
if tab in smallnumberstates:
df_select_summ['net_emis']=round(df_select_summ['net_emis'],2)
else:
df_select_summ['net_emis']=round(df_select_summ['net_emis'],1)
df_select_summ['net_emis_Mt'] = df_select_summ['net_emis'].apply(str)
df_select_summ['net_emis_Mt'] = df_select_summ['net_emis_Mt'] + ' Mt'
net_emis_2018 = df_select_summ.loc[df_select_summ['year']==2018,'net_emis_Mt']
net_emis_2030 = df_select_summ.loc[df_select_summ['year']==2030,'net_emis_Mt']
net_emis_2040 = df_select_summ.loc[df_select_summ['year']==2040,'net_emis_Mt']
net_emis_2050 = df_select_summ.loc[df_select_summ['year']==2050,'net_emis_Mt']
### # Emission intensity of electricity generation
df_select_elec['elec_carb_int_outp'] = df_select_elec.elec_carb_int_outp.apply(str)
df_select_elec['elec_carb_int_outp_g'] = df_select_elec['elec_carb_int_outp'] + ' g/kWh'
elec_carb_int_2018 = df_select_elec.loc[df_select_elec['year']==2018,'elec_carb_int_outp_g']
elec_carb_int_2030 = df_select_elec.loc[df_select_elec['year']==2030,'elec_carb_int_outp_g']
elec_carb_int_2040 = df_select_elec.loc[df_select_elec['year']==2040,'elec_carb_int_outp_g']
elec_carb_int_2050 = df_select_elec.loc[df_select_elec['year']==2050,'elec_carb_int_outp_g']
### ### Total value added changes
# For 2009-2018
df_select_summ['ind_val_add_output_lag10'] = df_select_summ['ind_val_add_output'].shift(9)
df_select_summ['ind_val_add_total_hist'] = np.power(df_select_summ['ind_val_add_output'] / df_select_summ['ind_val_add_output_lag10'], 0.1)
df_select_summ['ind_val_add_total_hist'] = 100 * (df_select_summ['ind_val_add_total_hist'] - 1)
df_select_summ['ind_val_add_total_hist'] = round(df_select_summ['ind_val_add_total_hist'], 1)
df_select_summ['ind_val_add_total_hist'] = df_select_summ['ind_val_add_total_hist'].apply(str)
df_select_summ['ind_val_add_total_hist'] = df_select_summ['ind_val_add_total_hist'] + '%'
total_val_add_hist = df_select_summ.loc[df_select_summ['year'] == 2018, 'ind_val_add_total_hist']
# For 2019-2030 & 2031-2050
df_select_summ['ind_val_add_output_lag1'] = df_select_summ['ind_val_add_output'].shift(1)
df_select_summ['ind_val_add_total_1950'] = df_select_summ['ind_val_add_output'] / df_select_summ['ind_val_add_output_lag1']
df_select_summ['ind_val_add_total_1950'] = 100 * (df_select_summ['ind_val_add_total_1950'] - 1)
df_select_summ['ind_val_add_total_1950'] = round(df_select_summ['ind_val_add_total_1950'], 1)
df_select_summ['ind_val_add_total_1950'] = df_select_summ['ind_val_add_total_1950'].apply(str)
df_select_summ['ind_val_add_total_1950'] = df_select_summ['ind_val_add_total_1950'] + '%'
total_val_add_1950 = df_select_summ.loc[df_select_summ['year'] == 2020, 'ind_val_add_total_1950']
### ### Emission intensity changes by sector
# For 2009-2018
df_select['emis_int_outp'] = df_select['emissions_MtCo2_output'] / df_select['ind_val_add_output']
df_select['emis_int_outp_lag10'] = df_select['emis_int_outp'].shift(9)
df_select['emis_int_outp_hist'] = np.power(df_select['emis_int_outp'] / df_select['emis_int_outp_lag10'], 0.1)
df_select['emis_int_outp_hist'] = 100 * (df_select['emis_int_outp_hist'] - 1)
df_select['emis_int_outp_hist'] = round(df_select['emis_int_outp_hist'], 1)
df_select['emis_int_outp_hist'] = df_select['emis_int_outp_hist'].apply(str)
df_select['emis_int_outp_hist'] = df_select['emis_int_outp_hist'] + '%'
# For 2019-2030
df_select['emis_int_outp_lag12'] = df_select['emis_int_outp'].shift(11)
df_select['emis_int_outp_1930'] = np.power(df_select['emis_int_outp'] / df_select['emis_int_outp_lag12'], 1 / 12)
df_select['emis_int_outp_1930'] = 100 * (df_select['emis_int_outp_1930'] - 1)
df_select['emis_int_outp_1930'] = round(df_select['emis_int_outp_1930'], 1)
df_select['emis_int_outp_1930'] = df_select['emis_int_outp_1930'].apply(str)
df_select['emis_int_outp_1930'] = df_select['emis_int_outp_1930'] + '%'
# For 2031-2050
df_select['emis_int_outp_lag20'] = df_select['emis_int_outp'].shift(19)
df_select['emis_int_outp_3150'] = np.power(df_select['emis_int_outp'] / df_select['emis_int_outp_lag20'], 1 / 20)
df_select['emis_int_outp_3150'] = 100 * (df_select['emis_int_outp_3150'] - 1)
df_select['emis_int_outp_3150'] = round(df_select['emis_int_outp_3150'], 1)
df_select['emis_int_outp_3150'] = df_select['emis_int_outp_3150'].apply(str)
df_select['emis_int_outp_3150'] = df_select['emis_int_outp_3150'] + '%'
# Annual reductions in emission intensity: services
services_emisint_red_2018 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2018),'emis_int_outp_hist']
services_emisint_red_2030 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2030),'emis_int_outp_1930']
services_emisint_red_2050 = df_select.loc[(df_select['sector']=='Services') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Mining
mining_emisint_red_2018 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2018),'emis_int_outp_hist']
mining_emisint_red_2030 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2030),'emis_int_outp_1930']
mining_emisint_red_2050 = df_select.loc[(df_select['sector']=='Mining') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Manufacturing
manufacturing_emisint_red_2018 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2018),'emis_int_outp_hist']
manufacturing_emisint_red_2030 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2030),'emis_int_outp_1930']
manufacturing_emisint_red_2050 = df_select.loc[(df_select['sector']=='Manufacturing') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Gas, Water & waste services
gas_water_waste_emisint_red_2018 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2018),'emis_int_outp_hist']
gas_water_waste_emisint_red_2030 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2030),'emis_int_outp_1930']
gas_water_waste_emisint_red_2050 = df_select.loc[(df_select['sector']=='Gas, Water & Waste Services') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Construction
construction_emisint_red_2018 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2018),'emis_int_outp_hist']
construction_emisint_red_2030 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2030),'emis_int_outp_1930']
construction_emisint_red_2050 = df_select.loc[(df_select['sector']=='Construction') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Commercial transport
com_transp_emisint_red_2018 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2018),'emis_int_outp_hist']
com_transp_emisint_red_2030 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2030),'emis_int_outp_1930']
com_transp_emisint_red_2050 = df_select.loc[(df_select['sector']=='Commercial Transport') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Agriculture & Forestry
agrifor_emisint_red_2018 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2018),'emis_int_outp_hist']
agrifor_emisint_red_2030 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2030),'emis_int_outp_1930']
agrifor_emisint_red_2050 = df_select.loc[(df_select['sector']=='Agriculture & Forestry') & (df_select['year']==2050),'emis_int_outp_3150']
# Annual reductions in emission intensity: Electricity generation
electricity_emisint_red_2018 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2018),'emis_int_outp_hist']
electricity_emisint_red_2030 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2030),'emis_int_outp_1930']
electricity_emisint_red_2050 = df_select.loc[(df_select['sector']=='Electricity generation') & (df_select['year']==2050),'emis_int_outp_3150']
### ### Emission intensity changes total
# Use net emisisons for this
df_select_summ['total_emis_int'] = df_select_summ['net_emis'] / df_select_summ['ind_val_add_output']
# For 2009-2018
df_select_summ['total_emis_int_lag10'] = df_select_summ['total_emis_int'].shift(9)
df_select_summ['total_emis_int_red_hist'] = np.power(df_select_summ['total_emis_int'] / df_select_summ['total_emis_int_lag10'], 0.1)
df_select_summ['total_emis_int_red_hist'] = 100 * (df_select_summ['total_emis_int_red_hist'] - 1)
df_select_summ['total_emis_int_red_hist'] = round(df_select_summ['total_emis_int_red_hist'], 1)
df_select_summ['total_emis_int_red_hist'] = df_select_summ['total_emis_int_red_hist'].apply(str)
df_select_summ['total_emis_int_red_hist'] = df_select_summ['total_emis_int_red_hist'] + '%'
# For 2019-2030
df_select_summ['total_emis_int_lag12'] = df_select_summ['total_emis_int'].shift(11)
df_select_summ['total_emis_int_red_1930'] = np.power(df_select_summ['total_emis_int'] / df_select_summ['total_emis_int_lag12'], 1 / 12)
df_select_summ['total_emis_int_red_1930'] = 100 * (df_select_summ['total_emis_int_red_1930'] - 1)
df_select_summ['total_emis_int_red_1930'] = round(df_select_summ['total_emis_int_red_1930'], 1)
df_select_summ['total_emis_int_red_1930'] = df_select_summ['total_emis_int_red_1930'].apply(str)
df_select_summ['total_emis_int_red_1930'] = df_select_summ['total_emis_int_red_1930'] + '%'
# For 2031-2050
df_select_summ['total_emis_int_lag20'] = df_select_summ['total_emis_int'].shift(19)
df_select_summ['total_emis_int_red_3150'] = np.power(df_select_summ['total_emis_int'] / df_select_summ['total_emis_int_lag20'], 1 / 20)
df_select_summ['total_emis_int_red_3150'] = 100 * (df_select_summ['total_emis_int_red_3150'] - 1)
df_select_summ['total_emis_int_red_3150'] = round(df_select_summ['total_emis_int_red_3150'], 1)
df_select_summ['total_emis_int_red_3150'] = df_select_summ['total_emis_int_red_3150'].apply(str)
df_select_summ['total_emis_int_red_3150'] = df_select_summ['total_emis_int_red_3150'] + '%'
# Outputs
total_emis_int_red_hist = df_select_summ.loc[df_select_summ['year'] == 2018, 'total_emis_int_red_hist']
total_emis_int_red_1930 = df_select_summ.loc[df_select_summ['year'] == 2030, 'total_emis_int_red_1930']
total_emis_int_red_3150 = df_select_summ.loc[df_select_summ['year'] == 2050, 'total_emis_int_red_3150']
### ### Define Per capita emission figure again, but with dynamic input
# Per capita emissions
df_select_summ['pcap_emissions'] = df_select_summ['net_emis'] / df_select_summ['population']
# Roudn numbers
df_select_summ['population'] = round(df_select_summ['population'], 2)
df_select_summ['pcap_emissions'] = round(df_select_summ['pcap_emissions'], 2)
# Make dictionary for dual y-axis figure
year_new_dict = df_select_summ['year'].tolist()
pop_dict = df_select_summ['population'].tolist()
pcap_emis_dict = df_select_summ['pcap_emissions'].tolist()
# redefine figure with dynamic input
# Keep here because net emissions have not been calculated earlier
fig_pop_per_capita = make_subplots(specs=[[{"secondary_y": True}]])
# Add traces
fig_pop_per_capita.add_scatter(x=year_dict, y=pcap_emis_dict, name="Per capita emissions", mode="lines", line=dict(width=2, color="black"), secondary_y=False)
fig_pop_per_capita.add_scatter(x=year_new_dict, y=pop_dict, name="Population", mode="lines", line=dict(width=2, color="rgba(31, 119, 180, 1)"), secondary_y=True)
fig_pop_per_capita.update_layout(template="plotly_white",
legend_traceorder="reversed",
title_text="Population and net per capita emissions",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0),
width=672, height=340)
fig_pop_per_capita.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_pop_per_capita.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
# Set y-axes titles
fig_pop_per_capita.update_yaxes(title_text="Net per capita emissions (t CO<sub>2</sub>-eq/person)", secondary_y=False)
fig_pop_per_capita.update_yaxes(title_text="Population (millions)", secondary_y=True)
# Set y-axis range
max_pcap_emis = max(pcap_emis_dict) * 1.1
fig_pop_per_capita.update_layout(yaxis=dict(range=[0,max_pcap_emis]))
### ### Redefine Emission intensity index figure with dynamic input
# Roudn numbers
df_select_netpc['emis_int_index'] = round(df_select_netpc['emis_int_index'], 1)
# Create lists
year_new_dict = df_select_netpc['year'].tolist()
emis_int_dict = df_select_netpc['emis_int_index'].tolist()
# redefine figure with dynamic input
# Keep here because net emissions have not been calculated earlier
fig_emis_int_index = make_subplots(specs=[[{"secondary_y": False}]])
# Add traces
fig_emis_int_index.add_scatter(x=year_new_dict, y=emis_int_dict, name="Emission intensity index", mode="lines", line=dict(width=2, color="rgba(31, 119, 180, 1)"), secondary_y=False)
fig_emis_int_index.update_layout(template="plotly_white",
title_text="Emission intensity index",
title_font_color="#1F77B4",
title_font_size=18,
title_font_family="Rockwell",
title_x=0.02,
margin=dict(t=40, r=0, b=0, l=65, pad=0),
width=482, height=340)
fig_emis_int_index.update_xaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
fig_emis_int_index.update_yaxes(showline=True, linewidth=1, linecolor='black', gridcolor='rgba(149, 165, 166, 0.6)', mirror=True)
# Set y-axes titles
fig_emis_int_index.update_yaxes(title_text="Emission intensity index (2010=100)", secondary_y=False)
# Set y-axis range
emis_int_dict = emis_int_dict[4:]
max_emis_int = max(emis_int_dict) * 1.1
fig_emis_int_index.update_layout(yaxis=dict(range=[0,max_emis_int]))
return (fig_emissions_total, fig_added_value_total, fig_emis_int, fig_elec_gen_int, fig_pop_per_capita, fig_emis_int_index,
services_emisred_2018, services_emisred_2030, services_emisred_2040, services_emisred_2050,
mining_emisred_2018, mining_emisred_2030, mining_emisred_2040, mining_emisred_2050,
manufacturing_emisred_2018, manufacturing_emisred_2030, manufacturing_emisred_2040, manufacturing_emisred_2050,
gas_water_waste_emisred_2018, gas_water_waste_emisred_2030, gas_water_waste_emisred_2040, gas_water_waste_emisred_2050,
construction_emisred_2018, construction_emisred_2030, construction_emisred_2040, construction_emisred_2050,
com_transp_emisred_2018, com_transp_emisred_2030, com_transp_emisred_2040, com_transp_emisred_2050,
agrifor_emisred_2018, agrifor_emisred_2030, agrifor_emisred_2040, agrifor_emisred_2050,
residential_emisred_2018, residential_emisred_2030, residential_emisred_2040, residential_emisred_2050,
electricity_emisred_2018, electricity_emisred_2030, electricity_emisred_2040, electricity_emisred_2050,
services_emis_2018, mining_emis_2018, manufacturing_emis_2018, gas_water_waste_emis_2018,
construction_emis_2018, com_transp_emis_2018, agrifor_emis_2018, residential_emis_2018, electricity_emis_2018,
services_emis_2050, mining_emis_2050, manufacturing_emis_2050, gas_water_waste_emis_2050,
construction_emis_2050, com_transp_emis_2050, agrifor_emis_2050, residential_emis_2050, electricity_emis_2050,
total_emisred_Mt_hist, total_emisred_Mt_1930, total_emisred_Mt_3150,
total_emisred_2018, total_emisred_2030, total_emisred_2040, total_emisred_2050,
net_emisred_2018, net_emisred_2030, net_emisred_2040, net_emisred_2050,
gross_emis_2018, gross_emis_2030, gross_emis_2040, gross_emis_2050, gross_emis_2018copy, gross_emis_2050copy,
LULUCF_2018, LULUCF_2030, LULUCF_2040, LULUCF_2050,
net_emis_2018, net_emis_2030, net_emis_2040, net_emis_2050,
total_val_add_hist, total_val_add_1950,
elec_carb_int_2018, elec_carb_int_2030, elec_carb_int_2040, elec_carb_int_2050,
services_emisint_red_2018, services_emisint_red_2030, services_emisint_red_2050,
mining_emisint_red_2018, mining_emisint_red_2030, mining_emisint_red_2050,
manufacturing_emisint_red_2018, manufacturing_emisint_red_2030, manufacturing_emisint_red_2050,
gas_water_waste_emisint_red_2018, gas_water_waste_emisint_red_2030, gas_water_waste_emisint_red_2050,
construction_emisint_red_2018, construction_emisint_red_2030, construction_emisint_red_2050,
com_transp_emisint_red_2018, com_transp_emisint_red_2030, com_transp_emisint_red_2050,
agrifor_emisint_red_2018, agrifor_emisint_red_2030, agrifor_emisint_red_2050,
electricity_emisint_red_2018, electricity_emisint_red_2030, electricity_emisint_red_2050,
total_emis_int_red_hist, total_emis_int_red_1930, total_emis_int_red_3150)
if __name__ == '__main__':
app.run_server(debug=False,dev_tools_ui=False,dev_tools_props_check=False)
``` |
{
"source": "jorritsmit/mycroft-core",
"score": 2
} |
#### File: integrationtests/skills/skill_tester.py
```python
from queue import Queue, Empty
import json
import time
import os
import re
import ast
from os.path import join, isdir, basename
from pyee import EventEmitter
from mycroft.messagebus.message import Message
from mycroft.skills.core import create_skill_descriptor, load_skill, \
MycroftSkill, FallbackSkill
MainModule = '__init__'
DEFAULT_EVALUAITON_TIMEOUT = 30
def get_skills(skills_folder):
"""Find skills in the skill folder or sub folders.
Recursive traversal into subfolders stop when a __init__.py file
is discovered
Args:
skills_folder: Folder to start a search for skills __init__.py
files
Returns:
list: the skills
"""
skills = []
def _get_skill_descriptor(skills_folder):
if not isdir(skills_folder):
return
if MainModule + ".py" in os.listdir(skills_folder):
skills.append(create_skill_descriptor(skills_folder))
return
possible_skills = os.listdir(skills_folder)
for i in possible_skills:
_get_skill_descriptor(join(skills_folder, i))
_get_skill_descriptor(skills_folder)
skills = sorted(skills, key=lambda p: basename(p['path']))
return skills
def load_skills(emitter, skills_root):
"""Load all skills and set up emitter
Args:
emitter: The emmitter to use
skills_root: Directory of the skills __init__.py
Returns:
list: a list of loaded skills
"""
skill_list = []
for skill in get_skills(skills_root):
path = skill["path"]
skill_id = 'test-' + basename(path)
skill_list.append(load_skill(skill, emitter, skill_id))
return skill_list
def unload_skills(skills):
for s in skills:
s._shutdown()
class InterceptEmitter(object):
"""
This class intercepts and allows emitting events between the
skill_tester and the skill being tested.
When a test is running emitted communication is intercepted for analysis
"""
def __init__(self):
self.emitter = EventEmitter()
self.q = None
def on(self, event, f):
# run all events
print("Event: ", event)
self.emitter.on(event, f)
def emit(self, event, *args, **kwargs):
event_name = event.type
if self.q:
self.q.put(event)
self.emitter.emit(event_name, event, *args, **kwargs)
def once(self, event, f):
self.emitter.once(event, f)
def remove(self, event_name, func):
pass
class MockSkillsLoader(object):
"""Load a skill and set up emitter
"""
def __init__(self, skills_root):
self.skills_root = skills_root
self.emitter = InterceptEmitter()
from mycroft.skills.intent_service import IntentService
from mycroft.skills.padatious_service import PadatiousService
self.ih = IntentService(self.emitter)
self.ps = PadatiousService(self.emitter, self.ih)
self.skills = None
self.emitter.on(
'intent_failure',
FallbackSkill.make_intent_failure_handler(self.emitter))
def make_response(_):
data = dict(result=False)
self.emitter.emit(Message('skill.converse.response', data))
self.emitter.on('skill.converse.request', make_response)
def load_skills(self):
self.skills = load_skills(self.emitter, self.skills_root)
self.skills = [s for s in self.skills if s]
self.ps.train(Message('', data=dict(single_thread=True)))
return self.emitter.emitter # kick out the underlying emitter
def unload_skills(self):
unload_skills(self.skills)
class SkillTest(object):
"""
This class is instantiated for each skill being tested. It holds the
data needed for the test, and contains the methods doing the test
"""
def __init__(self, skill, test_case_file, emitter, test_status=None):
self.skill = skill
self.test_case_file = test_case_file
self.emitter = emitter
self.dict = dict
self.output_file = None
self.returned_intent = False
self.test_status = test_status
def run(self, loader):
"""
Run a test for a skill. The skill, test_case_file and emitter is
already set up in the __init__ method
Args:
loader: A list of loaded skills
"""
s = [s for s in loader.skills if s and s.root_dir == self.skill]
if s:
s = s[0]
else:
raise Exception('Skill couldn\'t be loaded')
print('Test case file: ', self.test_case_file)
test_case = json.load(open(self.test_case_file, 'r'))
print("Test case: ", test_case)
if 'responses' in test_case:
def get_response(dialog='', data=None, announcement='',
validator=None, on_fail=None, num_retries=-1):
data = data or {}
utt = announcement or s.dialog_renderer.render(dialog, data)
s.speak(utt)
response = test_case['responses'].pop(0)
print(">" + utt)
print("Responding with ", response)
return response
s.get_response = get_response
# If we keep track of test status for the entire skill, then
# get all intents from the skill, and mark current intent
# tested
if self.test_status:
self.test_status.append_intent(s)
if 'intent_type' in test_case:
self.test_status.set_tested(test_case['intent_type'])
evaluation_rule = EvaluationRule(test_case, s)
# Set up queue for emitted events. Because
# the evaluation method expects events to be received in convoy,
# and be handled one by one. We cant make assumptions about threading
# in the core or the skill
q = Queue()
s.emitter.q = q
# Set up context before calling intent
# This option makes it possible to better isolate (reduce dependance)
# between test_cases
cxt = test_case.get('remove_context', None)
if cxt:
if isinstance(cxt, list):
for x in cxt:
MycroftSkill.remove_context(s, x)
else:
MycroftSkill.remove_context(s, cxt)
cxt = test_case.get('set_context', None)
if cxt:
for key, value in cxt.items():
MycroftSkill.set_context(s, key, value)
# Emit an utterance, just like the STT engine does. This sends the
# provided text to the skill engine for intent matching and it then
# invokes the skill.
self.emitter.emit(
'recognizer_loop:utterance',
Message('recognizer_loop:utterance',
{'utterances': [test_case.get('utterance', None)]}))
# Wait up to X seconds for the test_case to complete
timeout = time.time() + int(test_case.get('evaluation_timeout')) \
if test_case.get('evaluation_timeout', None) and \
isinstance(test_case['evaluation_timeout'], int) \
else time.time() + DEFAULT_EVALUAITON_TIMEOUT
while not evaluation_rule.all_succeeded():
try:
event = q.get(timeout=1)
if ':' in event.type:
event.data['__type__'] = event.type.split(':')[1]
else:
event.data['__type__'] = event.type
evaluation_rule.evaluate(event.data)
if event.type == 'mycroft.skill.handler.complete':
break
except Empty:
pass
if time.time() > timeout:
break
# Stop emmiter from sending on queue
s.emitter.q = None
# remove the skill which is not responding
self.emitter.remove_all_listeners('speak')
self.emitter.remove_all_listeners('mycroft.skill.handler.complete')
# Report test result if failed
if not evaluation_rule.all_succeeded():
print("Evaluation failed")
print("Rule status: ", evaluation_rule.rule)
return False
return True
# Messages that should not print debug info
HIDDEN_MESSAGES = ['skill.converse.request', 'skill.converse.response']
class EvaluationRule(object):
"""
This class initially convert the test_case json file to internal rule
format, which is stored throughout the testcase run. All Messages on
the event bus can be evaluated against the rules (test_case)
This approach makes it easier to add new tests, since Message and rule
traversal is already set up for the internal rule format.
The test writer can use the internal rule format directly in the
test_case using the assert keyword, which allows for more
powerfull/individual test cases than the standard dictionaly
"""
def __init__(self, test_case, skill=None):
"""
Convert test_case read from file to internal rule format
Args:
test_case: The loaded test case
skill: optional skill to test, used to fetch dialogs
"""
self.rule = []
_x = ['and']
if 'utterance' in test_case and 'intent_type' in test_case:
intent_type = str(test_case['intent_type'])
_x.append(['or'] +
[['endsWith', 'intent_type', intent_type]] +
[['endsWith', '__type__', intent_type]])
# Check for adapt intent info
if test_case.get('intent', None):
for item in test_case['intent'].items():
_x.append(['equal', str(item[0]), str(item[1])])
# Check for expected data structure
if test_case.get('expected_data'):
_d = ['and']
for item in test_case['expected_data'].items():
_d.append(['equal', item[0], item[1]])
self.rule.append(_d)
if _x != ['and']:
self.rule.append(_x)
if test_case.get('expected_response', None):
self.rule.append(['match', 'utterance',
str(test_case['expected_response'])])
if test_case.get('expected_dialog', None):
if not skill:
print('Skill is missing, can\'t run expected_dialog test')
else:
# Make sure expected dialog file is used
dialog = test_case['expected_dialog']
# Extract dialog texts from skill
dialogs = skill.dialog_renderer.templates[dialog]
# Allow custom fields to be anything
d = [re.sub('{.*?\}', '.*', t) for t in dialogs]
# Create rule allowing any of the sentences for that dialog
rules = [['match', 'utterance', r] for r in d]
self.rule.append(['or'] + rules)
if test_case.get('changed_context', None):
ctx = test_case['changed_context']
if isinstance(ctx, list):
for c in ctx:
self.rule.append(['equal', 'context', str(c)])
else:
self.rule.append(['equal', 'context', ctx])
if test_case.get('assert', None):
for _x in ast.literal_eval(test_case['assert']):
self.rule.append(_x)
print("Rule created ", self.rule)
def evaluate(self, msg):
"""Main entry for evaluating a message against the rules.
The rules are prepared in the __init__
This method is usually called several times with different
messages using the same rule set. Each call contributing
to fulfilling all the rules
Args:
msg: The message event to evaluate
"""
if msg.get('__type__', '') not in HIDDEN_MESSAGES:
print("\nEvaluating message: ", msg)
for r in self.rule:
self._partial_evaluate(r, msg)
def _get_field_value(self, rule, msg):
if isinstance(rule, list):
value = msg.get(rule[0], None)
if len(rule) > 1 and value:
for field in rule[1:]:
value = value.get(field, None)
if not value:
break
else:
value = msg.get(rule, None)
return value
def _partial_evaluate(self, rule, msg):
"""Evaluate the message against a part of the rules
Recursive over rules
Args:
rule: A rule or a part of the rules to be broken down further
msg: The message event being evaluated
Returns:
Bool: True if a partial evaluation succeeded
"""
if 'succeeded' in rule: # Rule has already succeeded, test not needed
return True
if rule[0] == 'equal':
if self._get_field_value(rule[1], msg) != rule[2]:
return False
if rule[0] == 'notEqual':
if self._get_field_value(rule[1], msg) == rule[2]:
return False
if rule[0] == 'endsWith':
if not (self._get_field_value(rule[1], msg) and
self._get_field_value(rule[1], msg).endswith(rule[2])):
return False
if rule[0] == 'exists':
if not self._get_field_value(rule[1], msg):
return False
if rule[0] == 'match':
if not (self._get_field_value(rule[1], msg) and
re.match(rule[2], self._get_field_value(rule[1], msg))):
return False
if rule[0] == 'and':
for i in rule[1:]:
if not self._partial_evaluate(i, msg):
return False
if rule[0] == 'or':
for i in rule[1:]:
if self._partial_evaluate(i, msg):
break
else:
return False
rule.append('succeeded')
return True
def all_succeeded(self):
"""Test if all rules succeeded
Returns:
bool: True if all rules succeeded
"""
return len([x for x in self.rule if x[-1] != 'succeeded']) == 0
``` |
{
"source": "jorritsmit/ros-get",
"score": 2
} |
#### File: ros-get/test/test_list_installed.py
```python
import imp
import pytest
import ros_get.workspace
import xdg
from ros_get import create, install, list_workspaces, locate, remove, save, switch, update
@pytest.fixture()
def empty_config_home(tmpdir, monkeypatch):
monkeypatch.setenv('XDG_CONFIG_HOME', str(tmpdir))
imp.reload(xdg)
imp.reload(ros_get.workspace)
return tmpdir
def test_install(empty_config_home):
assert install([], verbose=True) == 1
@pytest.mark.skip()
def test_update():
assert update(verbose=True) == 1
def test_list(empty_config_home):
list_workspaces(verbose=True)
def test_remove(empty_config_home):
remove(['unknown'], verbose=True)
@pytest.mark.skip()
def test_ws_create(empty_config_home):
# TODO: implement when the final implementation of create is finished
create(verbose=True)
def test_ws_switch(empty_config_home):
switch('unknown', verbose=True)
def test_ws_save(empty_config_home):
save('dir', 'name', verbose=True)
def test_ws_list(empty_config_home):
assert len(empty_config_home.listdir()) == 0
list_workspaces(verbose=True)
config = empty_config_home.join('ros-get')
assert config.check(dir=1)
assert config.join('workspace').check(exists=0)
assert config.join('workspaces').check(dir=1)
def test_ws_locate(empty_config_home):
locate(verbose=True)
assert len(empty_config_home.listdir()) == 0
``` |
{
"source": "jorrit-steporange/CumulusCI",
"score": 3
} |
#### File: ci/github/merge_master_to_feature.py
```python
import os
import sys
from github import Github
from github.GithubException import GithubException
def merge_master_to_feature():
ORG_NAME=os.environ.get('GITHUB_ORG_NAME')
REPO_NAME=os.environ.get('GITHUB_REPO_NAME')
MASTER_BRANCH=os.environ.get('MASTER_BRANCH','master')
USERNAME=os.environ.get('GITHUB_USERNAME')
PASSWORD=<PASSWORD>('<PASSWORD>')
BUILD_COMMIT=os.environ.get('BUILD_COMMIT', None)
g = Github(USERNAME,PASSWORD)
try:
org = g.get_organization(ORG_NAME)
except:
org = g.get_user(ORG_NAME)
repo = org.get_repo(REPO_NAME)
master = repo.get_branch(MASTER_BRANCH)
exception = None
pulls = repo.get_pulls()
for branch in repo.get_branches():
# Skip any branches which don't start with feature/
if not branch.name.startswith('feature/'):
print 'Skipping branch %s: does not start with feature/' % branch.name
continue
# Skip the master branch
if branch.name == master.name:
print 'Skipping branch %s: is master branch' % branch.name
continue
# Skip branches which are not behind dev
# Changed to check if the files list is empty since merge creates a new commit on dev
# which makes the merged feature branch behind by a commit but with no files.
# Get a comparison of master vs branch. compare.ahead_by means master is head of the branch.
# This orientation is necessary so the compare.files list lists files changed in master but not
# in the branch.
if BUILD_COMMIT:
compare = repo.compare(branch.commit.sha, BUILD_COMMIT)
else:
compare = repo.compare(branch.commit.sha, master.commit.sha)
if not compare.files:
print 'Skipping branch %s: branch has no files different than %s' % (branch.name, master.name)
continue
# Try to do a merge directly in Github
try:
merge = repo.merge(branch.name, master.name)
print 'Merged %s commits into %s (%s)' % (compare.ahead_by, branch.name, merge.sha)
except GithubException, e:
# Auto-merge failed
if e.data.get('message') == u'Merge conflict':
existing_pull = None
for pull in pulls:
if pull.base.ref == branch.name:
existing_pull = pull
print 'Skipping branch %s: pull request already exists' % branch.name
if not existing_pull:
# If the failure was due to merge conflict, create a pull request
pull = repo.create_pull(
title="Merge conflict merging %s into %s" % (master.name, branch.name),
body="mrbelvedere tried to merge new commits to %s but hit a merge conflict. Please resolve manually" % master.name,
base=branch.name,
head=master.name,
)
print 'Create pull request %s to resolve merge conflict in %s' % (pull.number, branch.name)
# Assign pull request to branch committers
commits = repo.get_commits(sha = branch.commit.sha)
assignee = None
# Find the most recent committer who is not the user used by this script
# NOTE: This presumes the user being used by this script is a robot user, not a real dev
for commit in commits:
if commit.committer.login != USERNAME:
assignee = commit.committer
break
if assignee:
repo.get_issue(pull.number).edit(assignee = assignee)
else:
# For other types of failures, store the last exception and raise at the end
exception = e
if exception:
# If an exception other than a merge conflict was encountered, raise it
raise exception
if __name__ == '__main__':
try:
merge_master_to_feature()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(1)
```
#### File: CumulusCI/ci/mrbelvedere_update_dependencies.py
```python
import json
import os
import sys
import requests
def update_dependencies():
MRBELVEDERE_BASE_URL=os.environ.get('MRBELVEDERE_BASE_URL')
PACKAGE_KEY=os.environ.get('MRBELVEDERE_PACKAGE_KEY')
NAMESPACE=os.environ.get('NAMESPACE')
BETA=os.environ.get('BETA', False)
VERSION=os.environ.get('PACKAGE_VERSION')
PROPERTIES_PATH=os.environ.get('PROPERTIES_PATH', None)
dependencies_url = '%s/%s/dependencies' % (MPINSTALLER_BASE_URL, NAMESPACE)
if BETA in ('True','true'):
dependencies_url = dependencies_url + '/beta'
current_dependencies = json.loads(requests.get(dependencies_url).content)
dependencies = []
if PROPERTIES_PATH:
f = open(PROPERTIES_PATH, 'r')
for line in f.readlines():
namespace, version = [x.strip() for x in line.split('=')]
namespace = namespace.replace('version.','')
# Skip any namespace with "Not Installed" as a the version
if version == 'Not Installed':
continue
dependencies.append({'namespace': namespace, 'number': version})
dependencies.append({'namespace': NAMESPACE, 'number': VERSION})
changed = False
for package in current_dependencies:
matched = False
for new in dependencies:
if new['namespace'] == package['namespace']:
matched = True
if new['number'] == package['number']:
print "No change for %s" % new['namespace']
else:
print "Changing %s from %s to %s" % (package['namespace'], package['number'], new['number'])
changed = True
break
if not matched:
print "No change for %s" % package['namespace']
if changed:
resp = requests.post(dependencies_url, data=json.dumps(dependencies), headers={'Authorization': PACKAGE_KEY})
if __name__ == '__main__':
try:
update_dependencies()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(1)
``` |
{
"source": "JorritvandenBerg/sunny-dapp",
"score": 2
} |
#### File: sunny-dapp/smartcontract/sunny_dapp.py
```python
from boa.blockchain.vm.Neo.Runtime import Log, Notify, GetTrigger, CheckWitness
from boa.blockchain.vm.Neo.Blockchain import GetHeight, GetHeader
from boa.blockchain.vm.Neo.Action import RegisterAction
from boa.blockchain.vm.Neo.TriggerType import Application, Verification
from boa.blockchain.vm.Neo.Storage import GetContext, Get, Put, Delete
from boa.code.builtins import list
# -------------------------------------------
# DAPP SETTINGS
# -------------------------------------------
OWNER = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
# Script hash of the token owner
THRESHOLD = 50
# Threshold of relative sunshine duration percent on a given day
# -------------------------------------------
# Events
# -------------------------------------------
DispatchAgreementEvent = RegisterAction('agreement', 'agreement_key')
DispatchResultNoticeEvent = RegisterAction('result-notice', 'agreement_key', 'weather_param', 'oracle_cost')
DispatchClaimEvent = RegisterAction('pay-out', 'agreement_key')
DispatchTransferEvent = RegisterAction('transfer', 'from', 'to', 'amount')
DispatchRefundAllEvent = RegisterAction('refund-all', 'agreement_key')
DispatchDeleteAgreementEvent = RegisterAction('delete', 'agreement_key')
def Main(operation, args):
"""
This is the main entry point for the dApp
:param operation: the operation to be performed
:type operation: str
:param args: an optional list of arguments
:type args: list
:return: indicating the successful execution of the dApp
:rtype: bool
"""
trigger = GetTrigger()
if trigger == Verification():
# if the script that sent this is the owner
# we allow the spend
is_owner = CheckWitness(OWNER)
if is_owner:
return True
return False
elif trigger == Application():
if operation == 'deploy':
if len(args) == 6:
dapp_name = args[0]
oracle = args[1]
time_margin = args[2]
min_time = args[3]
max_time = args[4]
fee = args[5]
d = Deploy(dapp_name, oracle, time_margin, min_time, max_time)
Log("Dapp deployed")
return d
else:
return False
elif operation == 'name':
context = GetContext()
n = Get(context, 'dapp_name')
return n
elif operation == 'updateName':
if len(args) == 1:
new_name = args[0]
n = UpdateName(new_name)
Log("Name updated")
return n
else:
return False
elif operation == 'oracle':
context = GetContext()
o = Get(context, 'oracle')
return o
elif operation == 'updateOracle':
if len(args) == 1:
new_oracle = args[0]
o = UpdateOracle(new_oracle)
Log("Oracle updated")
return o
else:
return False
elif operation == 'time_margin':
context = GetContext()
time_margin = Get(context, 'time_margin')
return time_margin
elif operation == 'min_time':
context = GetContext()
min_time = Get(context, 'min_time')
return min_time
elif operation == 'max_time':
context = GetContext()
max_time = Get(context, 'max_time')
return max_time
elif operation == 'updateTimeLimits':
if len(args) == 2:
time_variable = args[0]
value = args[1]
t = UpdateTimeLimits(time_variable, value)
Log("Time limits updated")
return t
else:
return False
elif operation == 'agreement':
if len(args) == 10:
agreement_key = args[0]
customer = args[1]
insurer = args[2]
location = args[3]
timestamp = args[4]
utc_offset = args[5]
amount = args[6]
premium = args[7]
dapp_name = args[8]
fee = args[9]
a = Agreement(agreement_key, customer, insurer, location, timestamp, utc_offset, amount, premium, dapp_name, fee)
Log("Agreement added!")
return a
else:
return False
elif operation == 'resultNotice':
if len(args) == 3:
agreement_key = args[0]
weather_param = args[1]
oracle_cost = args[2]
return ResultNotice(agreement_key, weather_param, oracle_cost)
else:
return False
elif operation == 'claim':
if len(args) == 1:
agreement_key = args[0]
return Claim(agreement_key)
else:
return False
elif operation == 'transfer':
if len(args) == 3:
t_from = args[0]
t_to = args[1]
t_amount = args[2]
return DoTransfer(t_from, t_to, t_amount)
else:
return False
elif operation == 'refundAll':
if len(args) == 1:
agreement_key = args[0]
return RefundAll(agreement_key)
else:
return False
elif operation == 'deleteAgreement':
if len(args) == 1:
agreement_key = args[0]
return DeleteAgreement(agreement_key)
else:
return False
result = 'unknown operation'
return result
return False
def Deploy(dapp_name, oracle, time_margin, min_time, max_time):
"""
Method for the dApp owner initiate settings in storage
:param dapp_name: name of the dapp
:type dapp_name: str
:param oracle: oracle that is used
:type oracle: bytearray
:param time_margin: time margin in seconds
:type time_margin: int
:param min_time: minimum time until the datetime of the event in seconds
:type min_time: int
:param max_time: max_time until the datetime of the event in seconds
:type max_time: int
:return: whether the update succeeded
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to deploy dApp")
return False
context = GetContext()
Put(context, 'dapp_name', dapp_name)
Put(context, 'oracle', oracle)
if time_margin < 0:
Log("time_margin must be positive")
return False
Put(context, 'time_margin', time_margin)
if min_time < 3600 + time_margin:
Log("min_time must be greater than 3600 + time_margin")
return False
Put(context, 'min_time', min_time)
if max_time <= (min_time + time_margin):
Log("max_time must be greather than min_time + time_margin")
return False
Put(context, 'max_time', max_time)
return True
def UpdateName(new_name):
"""
Method for the dApp owner to update the dapp name
:param new_name: new name of the dapp
:type new_name: str
:return: whether the update succeeded
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to update name")
return False
context = GetContext()
Put(context, 'dapp_name', new_name)
return True
def UpdateOracle(new_oracle):
"""
Method for the dApp owner to update oracle that is used to signal events
:param new_name: new oracle for the dapp
:type new_name: bytearray
:return: whether the update succeeded
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to update oracle")
return False
context = GetContext()
Put(context, 'oracle', new_oracle)
return True
def UpdateTimeLimits(time_variable, value):
"""
Method for the dApp owner to update the time limits
:param time_variable: the name of the time variable to change
:type time_variable: str
:param value: the value for the time variable to change in seconds
:type value: int
:return: whether the update succeeded
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to update time limits")
return False
if value < 0:
Log("Time limit value must be positive")
return False
context = GetContext()
if time_variable == 'time_margin':
time_margin = value
Put(context, 'time_margin', time_margin)
elif time_variable == 'min_time':
min_time = value
Put(context, 'min_time', min_time)
elif time_variable == 'max_time':
max_time = value
Put(context, 'max_time', max_time)
else:
Log("Time variable name not existing")
return False
return True
def Agreement(agreement_key, customer, insurer, location, timestamp, utc_offset, amount, premium, dapp_name, fee):
"""
Method to create an agreement
:param agreement_key: unique identifier for the agreement
:type agreement_key: str
:param customer: customer party of the agreement
:type customer: bytearray
:param insurer: insurer party of the agreement
:type insurer: bytearray
:param location: location were the event occurs, typically a city
:type location: str
:param timestamp: timezone naive datetime of the day of the event
:type timestamp: int
:param utc_offset: positive or negative utc_offset for timestamp
:type utc_offset: int
:param amount: the insured amount of NEO
:type amount: int
:param premium: the amount of NEO to be paid as a premium to the insurer
:type premium: int
:param dapp_name: the name of the dApp
:type dapp_name: str
:param fee: the fee to be charged
:type fee: int
:return: whether the agreement was successful
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to add an agreement")
return False
# Check if the contract is deployed
context = GetContext()
if not Get(context, dapp_name):
Log("Must first deploy contract with the deploy operation")
return False
# Get timestamp of current block
currentHeight = GetHeight()
currentBlock = GetHeader(currentHeight)
current_time = currentBlock.Timestamp
# Compute timezone adjusted time
timezone_timestamp = timestamp + (utc_offset * 3600)
timezone_current_time = current_time + (utc_offset * 3600)
# Get contract settings
dapp_name = Get(context, 'dapp_name')
oracle = Get(context, 'oracle')
time_margin = Get(context, 'time_margin')
min_time = Get(context, 'min_time')
max_time = Get(context, 'max_time')
# Check if timestamp is not out of boundaries
if timezone_timestamp < (timezone_current_time + min_time - time_margin):
Log("Datetime must be > 1 day ahead")
return False
elif timezone_timestamp > (timezone_current_time + max_time + time_margin):
Log("Datetime must be < 30 days ahead")
return False
# Check if amount and premium are not zero or below
if amount <= 0:
Log("Insured amount is zero or negative")
return False
if premium <= 0:
Log("Premium is zero or negative")
return False
status = 'initialized'
# Set place holder variables
weather_param = 0
oracle_cost = 0
agreement_data = [customer, insurer, location, timestamp, utc_offset, amount, premium, fee, oracle, time_margin, min_time, max_time, status, weather_param, oracle_cost]
Put(context, agreement_key, agreement_data)
DispatchAgreementEvent(agreement_key)
return True
def ResultNotice(agreement_key, weather_param, oracle_cost):
"""
Method to signal resulte by oracle
:param agreement_key: the key of the agreement
:type agreement_key: bytearray
:param weather_param: weather parameter that the contract is depending on
:type weather_param: int
:param oracle_cost: costs made by the oracle to do this assignment
:type oracle_cost: int
:return: whether a pay out to the customer is done
:rtype: bool
"""
# Check if the method is triggered by the oracle for this agreement
context = GetContext()
agreement_data = Get(context, agreement_key)
oracle = agreement_data[8]
if not CheckWitness(oracle):
Log("Must be oracle to notice results")
return False
timestamp = agreement_data[3]
utc_offset = agreement_data[4]
status = agreement_data[12]
if not status == 'initialized':
Log("Contract has incorrect status to do a result notice")
return False
agreement_data[12] = 'result-noticed'
agreement_data[13] = weather_param
agreement_data[14] = oracle_cost
# Get timestamp of current block
currentHeight = GetHeight()
currentBlock = GetHeader(currentHeight)
current_time = currentBlock.Timestamp
Put(context, agreement_key, agreement_data)
timezone_timestamp = timestamp + (3600 * utc_offset)
timezone_current_time = current_time + (3600 * utc_offset)
if timezone_current_time < timezone_timestamp:
Log("Datetime of result notice is lower than agreed datetime")
return False
else:
DispatchResultNoticeEvent(agreement_key, weather_param, oracle_cost)
return True
def Claim(agreement_key):
"""
Method to handle the pay out
:param agreement_key: the key of the agreement
:type agreement_key: bytearray
:return: whether a pay out to the customer is done
:rtype: bool
"""
context = GetContext()
agreement_data = Get(context, agreement_key)
customer = agreement_data[0]
insurer = agreement_data[1]
oracle = agreement_data[8]
status = agreement_data[12]
amount = agreement_data[5]
premium = agreement_data[6]
fee = agreement_data[7]
weather_param = agreement_data[13]
oracle_cost = agreement_data[14]
# Check if the pay out is triggered by the owner, customer, or insurer.
valid_witness = False
if CheckWitness(OWNER):
valid_witness = True
elif CheckWitness(customer):
valid_witness = True
elif CheckWitness(insurer):
valid_witness = True
if not valid_witness:
Log("Must be owner, customer or insurer to claim")
return False
# Check whether this contract has the right status to do a claim
if status == 'initialized':
Log("Status must be result-noticed to be able to do a claim")
return False
elif status == 'claimed':
Log("Contract pay out is already claimed")
return False
elif status == 'refunded':
Log("Contract is already refunded")
return False
net_premium = premium - fee
if weather_param >= THRESHOLD:
Notify("Day was sunny, no pay out to customer")
DoTransfer(OWNER, insurer, net_premium)
DispatchTransferEvent(OWNER, insurer, net_premium)
return False
elif weather_param < THRESHOLD:
Notify("Day was not sunny, pay out insured amount to customer")
DoTransfer(OWNER, insurer, net_premium)
DispatchTransferEvent(OWNER, insurer, net_premium)
DoTransfer(OWNER, customer, amount)
DispatchTransferEvent(OWNER, customer, amount)
DoTransfer(OWNER, oracle, oracle_cost)
DispatchTransferEvent(OWNER, oracle, oracle_cost)
agreement_data[12] = 'claimed'
Put(context, agreement_key, agreement_data)
DispatchClaimEvent(agreement_key)
return True
def DoTransfer(sender, receiver, amount):
"""
Method to transfer tokens from one account to another
:param sender: the address to transfer from
:type sender: bytearray
:param receiver: the address to transfer to
:type receiver: bytearray
:param amount: the amount of tokens to transfer
:type amount: int
:return: whether the transfer was successful
:rtype: bool
"""
if amount <= 0:
Log("Cannot transfer negative amount")
return False
from_is_sender = CheckWitness(sender)
if not from_is_sender:
Log("Not owner of funds to be transferred")
return False
if sender == receiver:
Log("Sending funds to self")
return True
context = GetContext()
from_val = Get(context, sender)
if from_val < amount:
Log("Insufficient funds to transfer")
return False
if from_val == amount:
Delete(context, sender)
else:
difference = from_val - amount
Put(context, sender, difference)
to_value = Get(context, receiver)
to_total = to_value + amount
Put(context, receiver, to_total)
DispatchTransferEvent(sender, receiver, amount)
return True
def RefundAll(agreement_key):
"""
Method refund payments in case a total eclipse or EMP caused oracle failure
:param agreement_key: agreement_key
:type agreement_key: bytearray
:return: whether the refund was successful
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to do a refund to all")
return False
context = GetContext()
agreement_data = Get(context, agreement_key)
status = agreement_data[12]
customer = agreement_data[0]
insurer = agreement_data[1]
status = agreement_data[12]
amount = agreement_data[5]
premium = agreement_data[6]
fee = agreement_data[7]
if status == 'claimed':
Log("contract pay out has already been claimed")
return False
elif status == 'refunded':
Log("A RefundAll already took place")
return False
# Perform refund
net_premium = premium - fee
DoTransfer(OWNER, insurer, net_premium)
DispatchTransferEvent(OWNER, insurer, net_premium)
DoTransfer(OWNER, customer, amount)
DispatchTransferEvent(OWNER, customer, amount)
agreement_data[12] = 'refunded'
Put(context, agreement_key, agreement_data)
DispatchRefundAllEvent(agreement_key)
return True
def DeleteAgreement(agreement_key):
"""
Method for the dApp owner to delete claimed or refunded agreements
:param agreement_key: agreement_key
:type agreement_key: str
:return: whether the deletion succeeded
:rtype: bool
"""
if not CheckWitness(OWNER):
Log("Must be owner to delete an agreement")
return False
context = GetContext
agreement_data = Get(context, agreement_key)
status = agreement_data[12]
if status == 'claimed':
Delete(context, agreement_key)
DispatchDeleteAgreementEvent(agreement_key)
elif status == 'refunded':
Delete(context, agreement_key)
DispatchDeleteAgreementEvent(agreement_key)
return False
``` |
{
"source": "JorritWillaert/OptimalConnectFour",
"score": 4
} |
#### File: JorritWillaert/OptimalConnectFour/game_functions.py
```python
from board import Board
from random import randint
import cpu
class Game():
def __init__(self, player1, player2, size_horizontal, size_vertical):
self.player1 = player1
self.player2 = player2
self.size_horizontal = size_horizontal
self.size_vertical = size_vertical
self.in_a_row = 4
self.board = Board(self.size_horizontal, self.size_vertical)
self.array_strings = ['1', '2', '3', '4', '5', '6', '7']
def make_human_move(self, own, opponent):
print(own.name + ", it is your turn. You are playing " + own.character)
column = self.choose_move()
row = self.board.check_row(column)
if row == 0:
self.board.remove_free_column(column)
self.board.change_character(own.character, row, column)
def make_cpu_move(self, own, opponent):
value, column = cpu.cpu_min_max_algorithm(self, own, opponent)
row = self.board.check_row(column)
if row == 0:
self.board.remove_free_column(column)
self.board.change_character(own.character, row, column)
return value
def draw_board(self):
self.board.draw_board()
def victory(self, own, opponent, printing = True):
"""Check for victory"""
row = self.size_vertical - 1
column = 0
while row >= 0:
while column < self.size_horizontal:
horizontal = diagonal_right = diagonal_left = vertical = False
if column < self.size_horizontal - (self.in_a_row - 1):
horizontal = self.victory_row_right(row, column)
if horizontal:
self.print_victory(own, printing)
return True
if column < self.size_horizontal - (self.in_a_row - 1) and row > self.in_a_row - 2:
diagonal_right = self.victory_diagonal_right_up(row, column)
if diagonal_right:
self.print_victory(own, printing)
return True
if column > self.in_a_row - 2 and row > self.in_a_row - 2:
diagonal_left = self.victory_diagonal_left_up(row, column)
if diagonal_left:
self.print_victory(own, printing)
return True
if row > self.in_a_row - 2:
vertical = self.victory_column_up(row, column)
if vertical:
self.print_victory(own, printing)
return True
column += 1
column = 0
row -= 1
def print_victory(self, own, printing):
if printing:
self.board.draw_board()
print(own.name + ", congratulations! You've won!")
def victory_row_right(self, row, column):
running_char = self.board.get_character(row, column)
if running_char == '.':
return False
for i in range(1, self.in_a_row):
if self.board.get_character(row, column + i) != running_char:
return False
return True
def victory_diagonal_right_up(self, row, column):
running_char = self.board.get_character(row, column)
if running_char == '.':
return False
for i in range(1, self.in_a_row):
if self.board.get_character(row - i, column + i) != running_char:
return False
return True
def victory_diagonal_left_up(self, row, column):
running_char = self.board.get_character(row, column)
if running_char == '.':
return False
for i in range(1, self.in_a_row):
if self.board.get_character(row - i, column - i) != running_char:
return False
return True
def victory_column_up(self, row, column):
running_char = self.board.get_character(row, column)
if running_char == '.':
return False
for i in range(1, self.in_a_row):
if self.board.get_character(row - i, column) != running_char:
return False
return True
def draw(self, printing = True):
"""Check for a draw (no more legal moves possible)"""
if not self.board.get_free_columns():
if printing:
print("The game ended in a draw!")
return True
def choose_move(self):
"""Return a number between 0 and 6 (input - 1). Re-ask question if no legal move."""
while True:
move = input("Which column do you want to play? ").strip()
if move not in self.array_strings:
print("Invalid choice")
else:
column = int(move) - 1
if column in self.board.get_free_columns():
return column
print("Illegal move!")
self.draw_board()
def cpu_random_move(self):
print("The CPU is doing a random move.")
while True:
column = randint(0, 6)
if column in self.board.get_free_columns():
break
row = self.board.check_row(column)
if row == 0:
self.board.remove_free_column(column)
self.board.change_character(own.character, row, column)
def heuristic_ordering(size_horizontal):
heuristic = [None] * size_horizontal
for i in range(size_horizontal):
heuristic[i] = size_horizontal // 2 + (1 - 2 * (i % 2)) * (i + 1) // 2
return heuristic
def choose_gamemode():
"""Return True if the player wants to play against the computer"""
while True:
cpu = input("Do you want to play against an optimal computer? (y/n) ").lower().strip()
if cpu not in ['y', 'n', 'yes', 'no']:
print("Invalid choice")
else:
if cpu in ['y', 'yes']:
return True
return False
def choose_size():
while True:
standard = input("Do you want to play on a standard 7 x 6 board? (y/n) ").lower().strip()
if standard not in ['y', 'n', 'yes', 'no']:
print("Invalid choice")
else:
if standard in ['n', 'no']:
sizes = input("Please input the desired horizontal and vertical sizes, separated by a space. ").strip().split()
if len(sizes) != 2:
print("Invalid choice")
continue
else:
horizontal = sizes[0]
vertical = sizes[1]
if horizontal.isdigit() and vertical.isdigit() and int(horizontal) >= 4 and int(vertical) >= 4:
return int(horizontal), int(vertical)
else:
print("Invalid choice")
continue
return 7, 6
def print_rules():
print(
'''
Welcome to connect four!
The aim for both players is to make a straight line (vertical, horizontal or diagonal) of four pieces from your own color.
Moves are made alternatively, one by turn. Pieces slide downwards from upper holes, falling down to the last row or piling up on the last piece introduced in the same column.
The winner is the first player who gets a straight line made with four own pieces without gaps between them.
Have fun!
'''
)
```
#### File: JorritWillaert/OptimalConnectFour/player.py
```python
class Player():
def __init__(self, cpu, character):
self.cpu = cpu
self.name = self.ask_name()
self.character = character
self.laid_stones = 0
def ask_name(self):
if self.cpu:
return "CPU"
else:
return input("What is your name? ")
def increase_laid_stones(self):
self.laid_stones += 1
def decrease_laid_stones(self):
self.laid_stones -= 1
def get_laid_stones(self):
return self.laid_stones
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.