repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
FreeOpcUa/python-opcua | opcua/common/instantiate.py | 1 | 5606 | """
Instantiate a new node and its child nodes from a node type.
"""
import logging
from opcua import Node
from opcua import ua
from opcua.common import ua_utils
from opcua.common.copy_node import _rdesc_from_node, _read_and_copy_attrs
logger = logging.getLogger(__name__)
def instantiate(parent, node_type, nodeid=None, bname=None, dname=None, idx=0, instantiate_optional=True):
"""
instantiate a node type under a parent node.
nodeid and browse name of new node can be specified, or just namespace index
If they exists children of the node type, such as components, variables and
properties are also instantiated
"""
rdesc = _rdesc_from_node(parent, node_type)
rdesc.TypeDefinition = node_type.nodeid
if nodeid is None:
nodeid = ua.NodeId(namespaceidx=idx) # will trigger automatic node generation in namespace idx
if bname is None:
bname = rdesc.BrowseName
elif isinstance(bname, str):
bname = ua.QualifiedName.from_string(bname)
nodeids = _instantiate_node(
parent.server,
Node(parent.server, rdesc.NodeId),
parent.nodeid,
rdesc,
nodeid,
bname,
dname=dname,
instantiate_optional=instantiate_optional)
return [Node(parent.server, nid) for nid in nodeids]
def _instantiate_node(server,
node_type,
parentid,
rdesc,
nodeid,
bname,
dname=None,
recursive=True,
instantiate_optional=True):
"""
instantiate a node type under parent
"""
addnode = ua.AddNodesItem()
addnode.RequestedNewNodeId = nodeid
addnode.BrowseName = bname
addnode.ParentNodeId = parentid
addnode.ReferenceTypeId = rdesc.ReferenceTypeId
addnode.TypeDefinition = rdesc.TypeDefinition
if rdesc.NodeClass in (ua.NodeClass.Object, ua.NodeClass.ObjectType):
addnode.NodeClass = ua.NodeClass.Object
_read_and_copy_attrs(node_type, ua.ObjectAttributes(), addnode)
elif rdesc.NodeClass in (ua.NodeClass.Variable, ua.NodeClass.VariableType):
addnode.NodeClass = ua.NodeClass.Variable
_read_and_copy_attrs(node_type, ua.VariableAttributes(), addnode)
elif rdesc.NodeClass in (ua.NodeClass.Method, ):
addnode.NodeClass = ua.NodeClass.Method
_read_and_copy_attrs(node_type, ua.MethodAttributes(), addnode)
elif rdesc.NodeClass in (ua.NodeClass.DataType, ):
addnode.NodeClass = ua.NodeClass.DataType
_read_and_copy_attrs(node_type, ua.DataTypeAttributes(), addnode)
else:
logger.error("Instantiate: Node class not supported: %s", rdesc.NodeClass)
raise RuntimeError("Instantiate: Node class not supported")
return
if dname is not None:
addnode.NodeAttributes.DisplayName = dname
res = server.add_nodes([addnode])[0]
res.StatusCode.check()
added_nodes = [res.AddedNodeId]
if recursive:
parents = ua_utils.get_node_supertypes(node_type, includeitself=True)
node = Node(server, res.AddedNodeId)
for parent in parents:
descs = parent.get_children_descriptions(includesubtypes=False)
for c_rdesc in descs:
# skip items that already exists, prefer the 'lowest' one in object hierarchy
if not ua_utils.is_child_present(node, c_rdesc.BrowseName):
c_node_type = Node(server, c_rdesc.NodeId)
refs = c_node_type.get_referenced_nodes(refs=ua.ObjectIds.HasModellingRule)
if not refs:
# spec says to ignore nodes without modelling rules
logger.info("Instantiate: Skip node without modelling rule %s as part of %s", c_rdesc.BrowseName, addnode.BrowseName)
continue
# exclude nodes with optional ModellingRule if requested
if not instantiate_optional and refs[0].nodeid in (ua.NodeId(ua.ObjectIds.ModellingRule_Optional), ua.NodeId(ua.ObjectIds.ModellingRule_OptionalPlaceholder)):
logger.info("Instantiate: Skip optional node %s as part of %s", c_rdesc.BrowseName, addnode.BrowseName)
continue
# if root node being instantiated has a String NodeId, create the children with a String NodeId
if res.AddedNodeId.NodeIdType is ua.NodeIdType.String:
inst_nodeid = res.AddedNodeId.Identifier + "." + c_rdesc.BrowseName.Name
nodeids = _instantiate_node(
server,
c_node_type,
res.AddedNodeId,
c_rdesc,
nodeid=ua.NodeId(identifier=inst_nodeid, namespaceidx=res.AddedNodeId.NamespaceIndex),
bname=c_rdesc.BrowseName,
instantiate_optional=instantiate_optional)
else:
nodeids = _instantiate_node(
server,
c_node_type,
res.AddedNodeId,
c_rdesc,
nodeid=ua.NodeId(namespaceidx=res.AddedNodeId.NamespaceIndex),
bname=c_rdesc.BrowseName,
instantiate_optional=instantiate_optional)
added_nodes.extend(nodeids)
return added_nodes
| lgpl-3.0 | -3,443,361,974,097,699,300 | 42.457364 | 178 | 0.600428 | false |
anselmobd/fo2 | src/estoque/models.py | 1 | 3782 | import datetime
from pprint import pprint
from django.contrib.auth.models import User
from django.db import models
from produto.models import ProdutoItem
class EstoquePermissions(models.Model):
class Meta:
verbose_name = 'Permissões de estoque'
managed = False
permissions = (
("can_transferencia", "Pode fazer transferência entre depósitos"),
)
class TipoMovStq(models.Model):
codigo = models.CharField(
'Código',
max_length=100, unique=True, default="-")
descricao = models.CharField(
'Descrição',
max_length=100)
trans_saida = models.IntegerField(
'Transação de saída',
default=0)
trans_entrada = models.IntegerField(
'Transação de entrada',
default=0)
menu = models.BooleanField(
'Aparece no menu',
default=False)
ordem = models.IntegerField(
default=0)
renomeia = models.BooleanField(
'Renomeia',
default=False)
CHOICES = (
('1', '1 para 1'),
('M', 'Monta Kit'),
('D', 'Desmonta Kit'),
)
unidade = models.CharField(
max_length=1, choices = CHOICES, default='1')
def __str__(self):
return self.descricao
class Meta:
db_table = "fo2_est_tipo_mov"
verbose_name = "Tipo de movimento de estoque"
verbose_name_plural = "Tipos de movimentos de estoque"
_doc_mov_stq_start_range = 802000000
class DocMovStqManager(models.Manager):
def get_queryset(self):
return super(
DocMovStqManager,
self).get_queryset().annotate(
num_doc=models.F('id') + _doc_mov_stq_start_range).all()
class DocMovStq(models.Model):
descricao = models.CharField(
'Descrição',
max_length=100)
data = models.DateField()
usuario = models.ForeignKey(
User, models.PROTECT,
verbose_name='usuário')
objects = DocMovStqManager()
@property
def get_num_doc(self):
return self.id + _doc_mov_stq_start_range
def __str__(self):
return f'{self.num_doc} - {self.descricao}'
def save(self, *args, **kwargs):
if not self.id:
self.data = datetime.date.today()
super(DocMovStq, self).save(*args, **kwargs)
class Meta:
db_table = "fo2_est_doc_mov"
verbose_name = "Documento de movimento de estoque"
verbose_name_plural = "Documentos de movimentos de estoque"
class MovStq(models.Model):
tipo_mov = models.ForeignKey(
TipoMovStq, models.PROTECT,
verbose_name='Tipo de movimento')
item = models.ForeignKey(
ProdutoItem, models.PROTECT)
quantidade = models.IntegerField(
default=0)
deposito_origem = models.IntegerField(
'Depósito de origem')
deposito_destino = models.IntegerField(
'Depósito de destino')
novo_item = models.ForeignKey(
ProdutoItem, models.PROTECT, related_name='movstqdest', null=True)
documento = models.ForeignKey(
DocMovStq, models.PROTECT,
verbose_name='Documento de movimento de estoque')
usuario = models.ForeignKey(
User, models.PROTECT,
verbose_name='usuário')
obs = models.CharField(
'Observação', default='',
max_length=100)
hora = models.DateTimeField(
null=True, auto_now_add=True)
itens_extras = models.CharField(
default='', max_length=200)
def __str__(self):
return (f'{self.documento.get_num_doc}, {self.item} '
f'{self.deposito_origem}->{self.deposito_destino}')
class Meta:
db_table = "fo2_est_mov"
verbose_name = "Movimento de estoque"
verbose_name_plural = "Movimentos de estoque"
| mit | 4,410,419,019,604,873,000 | 27.293233 | 78 | 0.612809 | false |
maas/maas | src/maasserver/rbac.py | 1 | 15583 | from collections import defaultdict
from functools import partial
import http.client
import threading
from typing import Mapping, Sequence, Union
from urllib.parse import parse_qs, quote, urlparse
import attr
from maasserver.macaroon_auth import (
APIError,
AuthInfo,
get_auth_info,
MacaroonClient,
UserDetails,
)
from maasserver.models import Config, ResourcePool
class SyncConflictError(Exception):
"""Sync conflict error occurred."""
@attr.s
class Resource:
"""Represents a resource in RBAC."""
# Identifier of the resource.
identifier = attr.ib(converter=int)
# Name of the resource
name = attr.ib(converter=str)
class AllResourcesType:
"""Class that represents all resources."""
# Represents access to all resources of the requested resource type.
ALL_RESOURCES = AllResourcesType()
ResourcesResultType = Union[AllResourcesType, Sequence[int]]
class RBACClient(MacaroonClient):
"""A client for RBAC API."""
API_BASE_URL = "/api/service/v1"
def __init__(self, url: str = None, auth_info: AuthInfo = None):
if url is None:
url = Config.objects.get_config("rbac_url")
if auth_info is None:
auth_info = get_auth_info()
super().__init__(auth_info=auth_info, url=url)
def _get_resource_type_url(self, resource_type: str):
"""Return the URL for `resource_type`."""
return self._url + quote(
"{}/resources/{}".format(self.API_BASE_URL, resource_type)
)
def get_user_details(self, username: str) -> UserDetails:
"""Return details about a user."""
url = self._url + quote(
"{}/user/{}".format(self.API_BASE_URL, username)
)
details = self._request("GET", url)
return UserDetails(
username=details["username"],
fullname=details.get("name", ""),
email=details.get("email", ""),
)
def get_resources(self, resource_type: str) -> Sequence[Resource]:
"""Return list of resources with `resource_type`."""
result = self._request(
"GET", self._get_resource_type_url(resource_type)
)
return [
Resource(identifier=res["identifier"], name=res["name"])
for res in result
]
def update_resources(
self,
resource_type: str,
updates: Sequence[Resource] = None,
removals: Sequence[int] = None,
last_sync_id: str = None,
):
"""Put all the resources for `resource_type`.
This replaces all the resources for `resource_type`.
"""
resources_updates = []
resources_removals = []
if updates:
resources_updates = [
{"identifier": str(res.identifier), "name": res.name}
for res in updates
]
if removals and last_sync_id:
resources_removals = [str(id) for id in removals]
data = {
"last-sync-id": last_sync_id,
"updates": resources_updates,
"removals": resources_removals,
}
try:
result = self._request(
"POST", self._get_resource_type_url(resource_type), json=data
)
except APIError as exc:
if exc.status_code == int(http.client.CONFLICT) and last_sync_id:
# Notify the caller of the conflict explicitly.
raise SyncConflictError()
raise
return result["sync-id"]
def allowed_for_user(
self, resource_type: str, user: str, *permissions: Sequence[str]
) -> ResourcesResultType:
"""Return the resource identifiers that `user` can access with
`permissions`.
Returns a dictionary mapping the permissions to the resources of
`resource_type` that the user can access. An object of `ALL_RESOURCES`
means the user can access all resources of that type.
"""
url = self._get_resource_type_url(
resource_type
) + "/allowed-for-user?u={}&{}".format(
quote(user),
"&".join(
["p=%s" % quote(permission) for permission in permissions]
),
)
result = self._request("GET", url)
for permission, res in result.items():
if res == [""]:
result[permission] = ALL_RESOURCES
else:
result[permission] = [int(idnt) for idnt in res]
return result
class FakeResourceStore:
"""A fake store for RBAC resources.
The fake RBAC client uses this so that it doesn't have to talk to a
real RBAC server for tests.
"""
def __init__(self):
self.resources = defaultdict(list)
user_resources_dict = partial(defaultdict, list)
user_permissions_dict = partial(defaultdict, user_resources_dict)
self.allowed = defaultdict(user_permissions_dict)
def add_pool(self, pool):
"""Register a pool with RBAC."""
self.resources["resource-pool"].append(
Resource(identifier=str(pool.id), name=pool.name)
)
def allow(self, username, pool, permission):
"""Add a policy for a user having a permission on a pool."""
identifier = "" if pool is ALL_RESOURCES else str(pool.id)
user_permissions = self.allowed[username]
user_resources = user_permissions["resource-pool"]
user_resources[permission].append(identifier)
class FakeRBACClient(RBACClient):
"""A fake RBACClient that can be used in tests.
It overrides _request to talk to a fake store, so it works exactly
like the real client, except that it doesn't talk to a real RBAC
server.
"""
def __init__(self, url: str = None, auth_info: AuthInfo = None):
if url is None:
url = Config.objects.get_config("rbac_url")
self._url = url
self._auth_info = auth_info
self.store = FakeResourceStore()
def _request(self, method, url):
parsed = urlparse(url)
path_parts = parsed.path.split("/")
assert path_parts[:5] == ["", "api", "service", "v1", "resources"]
if method.upper() == "GET":
resource_type, action = path_parts[5:7]
query = parse_qs(parsed.query)
[user] = query["u"]
permissions = query["p"]
user_resources = self.store.allowed.get(user, None)
if user_resources is None:
return {}
user_permissions = user_resources.get(resource_type, {})
result = {}
for permission in permissions:
pool_identifiers = user_permissions.get(permission, [])
result[permission] = (
[""] if "" in pool_identifiers else pool_identifiers
)
return result
def get_user_details(self, username):
return UserDetails(
username=username,
fullname="User username",
email=username + "@example.com",
)
# Set when there is no client for the current request.
NO_CLIENT = object()
class RBACWrapper:
"""Object for querying RBAC information."""
def __init__(self, client_class=None):
# A client is created per thread.
self._store = threading.local()
self._client_class = client_class
if self._client_class is None:
self._client_class = RBACClient
def _get_rbac_url(self):
"""Return the configured RBAC url."""
return Config.objects.get_config("rbac_url")
@property
def client(self):
"""Get thread-local client."""
# Get the current cleared status and reset it to False for the
# next request on this thread.
cleared = getattr(self._store, "cleared", False)
self._store.cleared = False
client = getattr(self._store, "client", None)
if client is None:
url = self._get_rbac_url()
if url:
client = self._client_class(url)
self._store.client = client
else:
self._store.client = NO_CLIENT
return client
# Check if this is a new request, a new check of the client needs
# to be performed.
if cleared:
# Check that the `rbac_url` and the credentials match.
url = self._get_rbac_url()
if url:
auth_info = get_auth_info()
if client is NO_CLIENT:
# Previously no client was created, create a new client
# now that RBAC is enabled.
client = self._client_class(url, auth_info)
self._store.client = client
elif client._url != url or client._auth_info != auth_info:
# URL or creds differ, re-create the client.
client = self._client_class(url, auth_info)
self._store.client = client
else:
# RBAC is now disabled.
client = None
self._store.client = NO_CLIENT
if client is NO_CLIENT:
return None
return client
def clear(self):
"""Clear the current client.
This marks a client as cleared that way only a new client is created
if the `rbac_url` is changed.
"""
self.clear_cache()
self._store.cleared = True
def is_enabled(self):
"""Return whether MAAS has been configured to use RBAC."""
return self.client is not None
def get_cache(self, resource, user, default=dict):
"""Return the cache for the `resource` and `user`."""
cache = getattr(self._store, "cache", None)
if cache is None:
cache = {}
setattr(self._store, "cache", cache)
key = (resource, user)
if key in cache:
return cache[key]
scoped = default()
cache[key] = scoped
return scoped
def clear_cache(self):
"""Clears the entire cache."""
if hasattr(self._store, "cache"):
delattr(self._store, "cache")
def get_resource_pool_ids(
self, user: str, *permissions: Sequence[str]
) -> Mapping[str, ResourcesResultType]:
"""Get the resource pools ids that given user has the given
permission on.
@param user: The user name of the user.
@param permission: A permission that the user should
have on the resource pool.
"""
results = self._get_resource_pool_identifiers(user, *permissions)
for permission, result in results.items():
if result is ALL_RESOURCES:
results[permission] = list(
ResourcePool.objects.all().values_list("id", flat=True)
)
else:
results[permission] = [int(idnt) for idnt in result]
return results
def can_create_resource_pool(self, user: str) -> bool:
"""Return True if the `user` can create a resource pool.
A user can create a resource pool if they have edit on all resource
pools.
@param user: The user name of the user.
"""
pool_identifiers = self._get_resource_pool_identifiers(user, "edit")
return pool_identifiers["edit"] is ALL_RESOURCES
def can_delete_resource_pool(self, user: str) -> bool:
"""Return True if the `user` can delete a resource pool.
A user can delete a resource pool if they have edit on all resource
pools.
@param user: The user name of the user.
"""
pool_identifiers = self._get_resource_pool_identifiers(user, "edit")
return pool_identifiers["edit"] is ALL_RESOURCES
def _get_resource_pool_identifiers(
self, user: str, *permissions: Sequence[str]
) -> Mapping[str, ResourcesResultType]:
"""Get the resource pool identifiers from RBAC.
Uses the thread-local cache so only one request is made to RBAC per
request to MAAS.
@param user: The user name of the user.
@param permission: A permission that the user should
have on the resource pool.
"""
cache = self.get_cache("resource-pool", user)
results, missing = {}, []
for permission in permissions:
identifiers = cache.get(permission, None)
if identifiers is None:
missing.append(permission)
else:
results[permission] = identifiers
if missing:
fetched = self.client.allowed_for_user(
"resource-pool", user, *missing
)
for permission in missing:
identifiers = fetched.get(permission, {})
cache[permission] = results[permission] = identifiers
return results
rbac = RBACWrapper()
class RBACUserClient(MacaroonClient):
"""A client for the RBAC user API."""
API_BASE_URL = "/api/rbac/v1"
def __init__(self, url):
# no auth info is passed as this is meant for interactive use
super().__init__(url, None)
self._maas_product = None
def create_service(self, name):
"""Create a MAAS service with the specified name."""
maas = self._get_maas_product()
data = {"name": name, "product": {"$ref": maas["$uri"]}}
return self._api_request("POST", "/service", json=data)
def get_registerable_services(self):
"""Return MAAS services that can be registered by the user."""
maas = self._get_maas_product()
services = self._api_request("GET", "/service/registerable")
return [
service
for service in services
if service["product"]["$ref"] == maas["$uri"]
]
def register_service(self, service_uri, public_key):
"""Register the specified service with the public key."""
return self._request(
"POST",
self._url + service_uri + "/credentials",
json={"public-key": public_key},
)
def _get_maas_product(self):
"""Return details for the maas product."""
if self._maas_product is None:
products = self._api_request("GET", "/product")
[maas] = [
product for product in products if product["label"] == "maas"
]
self._maas_product = maas
return self._maas_product
def _api_request(self, method, path, json=None, status_code=200):
return self._request(
method,
self._url + self.API_BASE_URL + path,
json=json,
status_code=status_code,
)
class FakeRBACUserClient(RBACUserClient):
def __init__(self):
self.services = []
self.products = []
self.registered_services = []
def create_service(self, name):
maas = {
"name": "maas",
"$uri": "/api/rbac/v1/service/4",
"pending": True,
"product": {"$ref" "/api/rbac/v1/product/2"},
}
self.services.append(maas)
return maas
def get_products(self):
return self.products
def get_registerable_services(self):
return self.services
def register_service(self, service_uri, public_key):
self.registered_services.append(service_uri)
return {
"url": "http://auth.example.com",
"username": "u-{}".format(len(self.registered_services)),
}
| agpl-3.0 | 665,017,665,849,337,200 | 32.297009 | 78 | 0.571713 | false |
maas/maas | src/maasserver/models/space.py | 1 | 6013 | # Copyright 2015-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Space objects."""
import datetime
import re
from django.core.exceptions import PermissionDenied, ValidationError
from django.db.models import CharField, Manager, TextField
from django.db.models.query import QuerySet
from maasserver import DefaultMeta
from maasserver.models.cleansave import CleanSave
from maasserver.models.timestampedmodel import TimestampedModel
from maasserver.utils.orm import MAASQueriesMixin
def validate_space_name(value):
"""Django validator: `value` must be either `None`, or valid."""
if value is None:
return
namespec = re.compile(r"^[\w-]+$")
if not namespec.search(value):
raise ValidationError("Invalid space name: %s." % value)
# Name of the special, default space. This space cannot be deleted.
DEFAULT_SPACE_NAME = "space-0"
class SpaceQueriesMixin(MAASQueriesMixin):
def get_specifiers_q(self, specifiers, separator=":", **kwargs):
# Circular imports.
from maasserver.models import Subnet
# This dict is used by the constraints code to identify objects
# with particular properties. Please note that changing the keys here
# can impact backward compatibility, so use caution.
specifier_types = {
None: self._add_default_query,
"name": "__name",
"subnet": (Subnet.objects, "vlan__space"),
}
return super().get_specifiers_q(
specifiers,
specifier_types=specifier_types,
separator=separator,
**kwargs
)
class SpaceQuerySet(QuerySet, SpaceQueriesMixin):
"""Custom QuerySet which mixes in some additional queries specific to
this object. This needs to be a mixin because an identical method is needed
on both the Manager and all QuerySets which result from calling the
manager.
"""
class SpaceManager(Manager, SpaceQueriesMixin):
"""Manager for :class:`Space` model."""
def get_queryset(self):
queryset = SpaceQuerySet(self.model, using=self._db)
return queryset
def get_default_space(self):
"""Return the default space."""
now = datetime.datetime.now()
space, _ = self.get_or_create(
id=0,
defaults={"id": 0, "name": None, "created": now, "updated": now},
)
return space
def get_space_or_404(self, specifiers, user, perm):
"""Fetch a `Space` by its id. Raise exceptions if no `Space` with
this id exists or if the provided user has not the required permission
to access this `Space`.
:param specifiers: The space specifiers.
:type specifiers: string
:param user: The user that should be used in the permission check.
:type user: django.contrib.auth.models.User
:param perm: The permission to assert that the user has on the node.
:type perm: unicode
:raises: django.http.Http404_,
:class:`maasserver.exceptions.PermissionDenied`.
.. _django.http.Http404: https://
docs.djangoproject.com/en/dev/topics/http/views/
#the-http404-exception
"""
space = self.get_object_by_specifiers_or_raise(specifiers)
if user.has_perm(perm, space):
return space
else:
raise PermissionDenied()
class Space(CleanSave, TimestampedModel):
"""A `Space`.
:ivar name: The short-human-identifiable name for this space.
:ivar objects: An instance of the class :class:`SpaceManager`.
"""
# Name of the undefined space.
UNDEFINED = "undefined"
class Meta(DefaultMeta):
"""Needed for South to recognize this model."""
verbose_name = "Space"
verbose_name_plural = "Spaces"
objects = SpaceManager()
# We don't actually allow blank or null name, but that is enforced in
# clean() and save().
name = CharField(
max_length=256,
editable=True,
null=True,
blank=True,
unique=True,
validators=[validate_space_name],
)
description = TextField(null=False, blank=True)
def __str__(self):
return "name=%s" % self.get_name()
def is_default(self):
"""Is this the default space?"""
return self.id == 0
def get_name(self):
"""Return the name of the space."""
if self.name:
return self.name
else:
return "space-%s" % self.id
def clean_name(self):
reserved = re.compile(r"^space-\d+$")
if self.name is not None and self.name != "":
if self.name == Space.UNDEFINED:
raise ValidationError({"name": ["Reserved space name."]})
if reserved.search(self.name):
if self.id is None or self.name != "space-%d" % self.id:
raise ValidationError({"name": ["Reserved space name."]})
elif self.id is not None:
# Since we are not creating the space, force the (null or empty)
# name to be the default name.
self.name = "space-%d" % self.id
def save(self, *args, **kwargs):
# Name will get set by clean_name() if None or empty, and there is an
# id. We just need to handle names here for creation.
super().save(*args, **kwargs)
if self.name is None or self.name == "":
# If we got here, then we have a newly created space that needs a
# default name.
self.name = "space-%d" % self.id
self.save()
def clean(self, *args, **kwargs):
super().clean(*args, **kwargs)
self.clean_name()
@property
def subnet_set(self):
"""Backward compatibility shim to get the subnets on this space."""
# Circular imports.
from maasserver.models import Subnet
return Subnet.objects.filter(vlan__space=self)
| agpl-3.0 | -8,030,477,149,762,342,000 | 32.220994 | 79 | 0.620323 | false |
vmware/pyvmomi-community-samples | samples/getvmsbycluster.py | 1 | 4534 | #!/usr/bin/env python
"""
Written by Chris Hupman
Github: https://github.com/chupman/
Example: Get guest info with folder and host placement
"""
import json
from tools import cli, service_instance
data = {}
def get_nics(guest):
nics = {}
for nic in guest.net:
if nic.network: # Only return adapter backed interfaces
if nic.ipConfig is not None and nic.ipConfig.ipAddress is not None:
nics[nic.macAddress] = {} # Use mac as uniq ID for nic
nics[nic.macAddress]['netlabel'] = nic.network
ipconf = nic.ipConfig.ipAddress
i = 0
nics[nic.macAddress]['ipv4'] = {}
for ip in ipconf:
if ":" not in ip.ipAddress: # Only grab ipv4 addresses
nics[nic.macAddress]['ipv4'][i] = ip.ipAddress
nics[nic.macAddress]['prefix'] = ip.prefixLength
nics[nic.macAddress]['connected'] = nic.connected
i = i+1
return nics
def vmsummary(summary, guest):
vmsum = {}
config = summary.config
net = get_nics(guest)
vmsum['mem'] = str(config.memorySizeMB / 1024)
vmsum['diskGB'] = str("%.2f" % (summary.storage.committed / 1024**3))
vmsum['cpu'] = str(config.numCpu)
vmsum['path'] = config.vmPathName
vmsum['ostype'] = config.guestFullName
vmsum['state'] = summary.runtime.powerState
vmsum['annotation'] = config.annotation if config.annotation else ''
vmsum['net'] = net
return vmsum
def vm2dict(datacenter, cluster, host, vm, summary):
# If nested folder path is required, split into a separate function
vmname = vm.summary.config.name
data[datacenter][cluster][host][vmname]['folder'] = vm.parent.name
data[datacenter][cluster][host][vmname]['mem'] = summary['mem']
data[datacenter][cluster][host][vmname]['diskGB'] = summary['diskGB']
data[datacenter][cluster][host][vmname]['cpu'] = summary['cpu']
data[datacenter][cluster][host][vmname]['path'] = summary['path']
data[datacenter][cluster][host][vmname]['net'] = summary['net']
data[datacenter][cluster][host][vmname]['ostype'] = summary['ostype']
data[datacenter][cluster][host][vmname]['state'] = summary['state']
data[datacenter][cluster][host][vmname]['annotation'] = summary['annotation']
def data2json(raw_data, args):
with open(args.jsonfile, 'w') as json_file:
json.dump(raw_data, json_file)
def main():
"""
Iterate through all datacenters and list VM info.
"""
parser = cli.Parser()
parser.add_custom_argument('--json', required=False, action='store_true',
help='Write out to json file')
parser.add_custom_argument('--jsonfile', required=False, action='store',
default='getvmsbycluster.json',
help='Filename and path of json file')
parser.add_custom_argument('--silent', required=False, action='store_true',
help='supress output to screen')
args = parser.get_args()
si = service_instance.connect(args)
outputjson = True if args.json else False
content = si.RetrieveContent()
children = content.rootFolder.childEntity
for child in children: # Iterate though DataCenters
datacenter = child
data[datacenter.name] = {} # Add data Centers to data dict
clusters = datacenter.hostFolder.childEntity
for cluster in clusters: # Iterate through the clusters in the DC
# Add Clusters to data dict
data[datacenter.name][cluster.name] = {}
hosts = cluster.host # Variable to make pep8 compliance
for host in hosts: # Iterate through Hosts in the Cluster
hostname = host.summary.config.name
# Add VMs to data dict by config name
data[datacenter.name][cluster.name][hostname] = {}
vms = host.vm
for vm in vms: # Iterate through each VM on the host
vmname = vm.summary.config.name
data[datacenter.name][cluster.name][hostname][vmname] = {}
summary = vmsummary(vm.summary, vm.guest)
vm2dict(datacenter.name, cluster.name, hostname, vm, summary)
if not args.silent:
print(json.dumps(data, sort_keys=True, indent=4))
if outputjson:
data2json(data, args)
# Start program
if __name__ == "__main__":
main()
| apache-2.0 | 2,445,535,360,479,337,000 | 38.426087 | 81 | 0.60322 | false |
ASoftTech/Scons-Tools-Grbd | scons_tools_grbd/Tools/MSBuild/VC/Dll2Lib.py | 1 | 3979 | """
Dll2Lib
This tool will generate a .lib file under windows for a given .dll file
This uses dumpfile to export a list of symbols
dumpbin /exports C:\yourpath\yourlib.dll
The list of symbols is then written to a .def file
The lib command is then used to generate the .lib file from the .def file
lib /def:C:\mypath\mylib.def /OUT:C:\mypath\mylib.lib
A side affect of this is an .exp file which also requires cleanup
We can then use the .lib file for linking with the compiler under Windows
"""
import os, sys, os.path as path, subprocess
import SCons.Script
from SCons.Environment import Environment
from SCons.Script import Builder
from SCons.Tool.MSCommon import msvc_exists, msvc_setup_env_once
def exists(env):
return msvc_exists()
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
# Set-up ms tools paths
msvc_setup_env_once(env)
env.SetDefault(
# Location of the dumpbin executable
DUMPBIN = 'dumpbin',
)
# Register the builder
bld = Builder(action = __Dll2Lib_func, emitter = __Dll2Lib_emitter)
env.Append(BUILDERS = {'Dll2Lib' : bld})
def __Dll2Lib_emitter(target, source, env):
"""Add the generated .def and .exp files to the list of targerts for cleanup"""
addfiles = []
for item in target:
libfile = item.abspath
deffile = path.splitext(libfile)[0] + '.def'
expfile = path.splitext(libfile)[0] + '.exp'
addfiles.append(File(deffile))
addfiles.append(File(expfile))
target = target + addfiles
return target, source
def __Dll2Lib_func(target, source, env):
"""Actual builder that does the work after the Sconscript file is parsed"""
index = 0
for srcitem in source:
srcfile = str(srcitem)
filename = str(target[index])
libfile = path.splitext(filename)[0] + '.lib'
deffile = path.splitext(filename)[0] + '.def'
if path.splitext(srcfile)[1] != '.dll':
continue
dumpbin_exp = __dumpbin_run_exports(env, srcfile)
exportlist = __dumpbin_parse_exports(dumpbin_exp)
__write_deffile(deffile, exportlist)
__generate_lib(env, deffile, libfile)
index = index + 1
def __dumpbin_run_exports(env, dllfile):
"""Run dumpbin /exports against the input dll"""
cmdopts = [env['DUMPBIN'], '/exports', str(dllfile)]
print("Calling '%s'" % env['DUMPBIN'])
stdout, stderr = __runcmd_mbcs(env, cmdopts)
return stdout
def __dumpbin_parse_exports(input):
"""Parse thr output from dumpbin as a list of symbols"""
ret = []
lines = input.split('\n')
for line in lines:
arr1 = line.split()
if len(arr1) == 4 and arr1[1] != 'number' and arr1[1] != 'hint':
ret.append(arr1[3])
return ret
def __write_deffile(outfile, lines):
"""Write the list of symbols to a .def file"""
with open(outfile, 'w') as f:
f.write('EXPORTS\n')
for line in lines:
f.write(line + '\n')
def __generate_lib(env, deffile, libfile):
"""Generate the .lib file"""
cmdopts = [env['AR'], '/def:' + deffile, '/OUT:' + libfile]
stdout, stderr = __runcmd_mbcs(env, cmdopts)
return stdout
def __runcmd_mbcs(env, cmdopts):
"""Run command while capturing the output"""
popen = SCons.Action._subproc(env, cmdopts, stdin='devnull',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = popen.stdout.read()
stderr = popen.stderr.read()
if not isinstance(stderr, str):
stderr = stderr.decode("mbcs")
if not isinstance(stdout, str):
stdout = stdout.decode("mbcs")
if stderr:
import sys
sys.stderr.write(stderr)
if popen.wait() != 0:
raise IOError(stderr)
return stdout, stderr
| mit | -6,572,437,492,332,772,000 | 30.614754 | 83 | 0.617994 | false |
liliasapurina/python_training | test/test_contacts.py | 1 | 2349 | __author__ = '1'
import re
def test_contacts_on_home_page(app):
address_from_home_page = app.address.get_address_list()[0]
address_from_edit_page = app.address.get_address_info_from_edit_page(0)
assert address_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(address_from_edit_page)
assert address_from_home_page.name == address_from_edit_page.name
assert address_from_home_page.lastname == address_from_edit_page.lastname
assert address_from_home_page.address == address_from_edit_page.address
assert address_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(address_from_edit_page)
def test_contacts_on_address_view_page(app):
address_from_view_page = app.address.get_address_from_view_page(0)
address_from_edit_page = app.address.get_address_info_from_edit_page(0)
assert address_from_view_page.all_fields == merge_fields_like_on_view_page(address_from_edit_page)
def clear(s):
return re.sub("[() -]","",s)
def merge_phones_like_on_home_page(address):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[address.phone, address.mobilephone, address.workphone, address.secondaryphone]))))
def merge_emails_like_on_home_page(address):
return "\n".join(filter(lambda x: x != "",
filter(lambda x: x is not None,
[address.email, address.email2, address.email3])))
def merge_fields_like_on_view_page(address):
return str(address.name)+" "+str(address.middlename)\
+" "+str(address.lastname)+"\n"+str(address.nickname)\
+"\n"+str(address.company)+"\n"+str(address.address)\
+"\nH: "+str(address.phone)\
+"\nM: "+str(address.mobilephone)+"\nW: "+str(address.workphone)\
+"\n"+"\n"\
+create_view_for_email(str(address.email))\
+create_view_for_email(str(address.email2))\
+create_view_for_email(str(address.email3))+'\n'\
+"\nP: "+str(address.secondaryphone)
def create_view_for_email(email):
if email != "":
dog_index = email.find("@")
return email+" (www."+email[dog_index+1:len(email)]+")"
else:
return "\n" | apache-2.0 | -2,452,055,065,751,226,400 | 46.959184 | 121 | 0.62069 | false |
robozman/pymumblegui | pymumble/pymumble_py3/mumble.py | 1 | 24841 | # -*- coding: utf-8 -*-
import threading
import logging
import time
import select
import socket
import ssl
import struct
from .errors import *
from .constants import *
from . import users
from . import channels
from . import blobs
from . import commands
from . import callbacks
from . import tools
from . import soundoutput
from . import mumble_pb2
class Mumble(threading.Thread):
"""
Mumble client library main object.
basically a thread
"""
def __init__(self, host, user, port=64738, password='', certfile=None, keyfile=None, reconnect=False, tokens=[], debug=False):
"""
host=mumble server hostname or address
port=mumble server port
user=user to use for the connection
password=password for the connection
certfile=client certificate to authenticate the connection
keyfile=private key comming with client certificate
reconnect=if True, try to reconnect if disconnected
tokens=channel access tokens as a list of strings
debug=if True, send debugging messages (lot of...) to the stdout
"""
# TODO: use UDP audio
threading.Thread.__init__(self)
self.Log = logging.getLogger("PyMumble") # logging object for errors and debugging
if debug:
self.Log.setLevel(logging.DEBUG)
else:
self.Log.setLevel(logging.ERROR)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
ch.setFormatter(formatter)
self.Log.addHandler(ch)
self.parent_thread = threading.current_thread() # main thread of the calling application
self.mumble_thread = None # thread of the mumble client library
self.host = host
self.port = port
self.user = user
self.password = password
self.certfile = certfile
self.keyfile = keyfile
self.reconnect = reconnect
self.ping_stats = {"last_rcv": 0, "time_send": 0, "nb": 0, "avg": 40.0, "var": 0.0}
self.tokens = tokens
self.__opus_profile = PYMUMBLE_AUDIO_TYPE_OPUS_PROFILE
self.receive_sound = False # set to True to treat incoming audio, otherwise it is simply ignored
self.loop_rate = PYMUMBLE_LOOP_RATE
self.application = PYMUMBLE_VERSION_STRING
self.callbacks = callbacks.CallBacks() # callbacks management
self.ready_lock = threading.Lock() # released when the connection is fully established with the server
self.ready_lock.acquire()
def init_connection(self):
"""Initialize variables that are local to a connection, (needed if the client automatically reconnect)"""
self.ready_lock.acquire(False) # reacquire the ready-lock in case of reconnection
self.connected = PYMUMBLE_CONN_STATE_NOT_CONNECTED
self.control_socket = None
self.media_socket = None # Not implemented - for UDP media
self.bandwidth = PYMUMBLE_BANDWIDTH # reset the outgoing bandwidth to it's default before connectiong
self.server_max_bandwidth = None
self.udp_active = False
self.users = users.Users(self, self.callbacks) # contain the server's connected users informations
self.channels = channels.Channels(self, self.callbacks) # contain the server's channels informations
self.blobs = blobs.Blobs(self) # manage the blob objects
self.sound_output = soundoutput.SoundOutput(self, PYMUMBLE_AUDIO_PER_PACKET, self.bandwidth, opus_profile=self.__opus_profile) # manage the outgoing sounds
self.commands = commands.Commands() # manage commands sent between the main and the mumble threads
self.receive_buffer = bytes() # initialize the control connection input buffer
def run(self):
"""Connect to the server and start the loop in its thread. Retry if requested"""
self.mumble_thread = threading.current_thread()
# loop if auto-reconnect is requested
while True:
self.init_connection() # reset the connection-specific object members
if self.connect() >= PYMUMBLE_CONN_STATE_FAILED: # some error occured, exit here
self.ready_lock.release()
break
try:
self.loop()
except socket.error:
self.connected = PYMUMBLE_CONN_STATE_NOT_CONNECTED
if not self.reconnect or not self.parent_thread.is_alive():
break
time.sleep(PYMUMBLE_CONNECTION_RETRY_INTERVAL)
def connect(self):
"""Connect to the server"""
# Connect the SSL tunnel
self.Log.debug("connecting to %s on port %i.", self.host, self.port)
std_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.control_socket = ssl.wrap_socket(std_sock, certfile=self.certfile, keyfile=self.keyfile, ssl_version=ssl.PROTOCOL_TLS)
self.control_socket.connect((self.host, self.port))
self.control_socket.setblocking(0)
# Perform the Mumble authentication
version = mumble_pb2.Version()
version.version = (PYMUMBLE_PROTOCOL_VERSION[0] << 16) + (PYMUMBLE_PROTOCOL_VERSION[1] << 8) + PYMUMBLE_PROTOCOL_VERSION[2]
version.release = self.application
version.os = PYMUMBLE_OS_STRING
version.os_version = PYMUMBLE_OS_VERSION_STRING
self.Log.debug("sending: version: %s", version)
self.send_message(PYMUMBLE_MSG_TYPES_VERSION, version)
authenticate = mumble_pb2.Authenticate()
authenticate.username = self.user
authenticate.password = self.password
authenticate.tokens.extend(self.tokens)
authenticate.opus = True
self.Log.debug("sending: authenticate: %s", authenticate)
self.send_message(PYMUMBLE_MSG_TYPES_AUTHENTICATE, authenticate)
except socket.error:
self.connected = PYMUMBLE_CONN_STATE_FAILED
return self.connected
self.connected = PYMUMBLE_CONN_STATE_AUTHENTICATING
return self.connected
def loop(self):
"""
Main loop
waiting for a message from the server for maximum self.loop_rate time
take care of sending the ping
take care of sending the queued commands to the server
check on every iteration for outgoing sound
check for disconnection
"""
self.Log.debug("entering loop")
last_ping = time.time() # keep track of the last ping time
# loop as long as the connection and the parent thread are alive
while self.connected not in (PYMUMBLE_CONN_STATE_NOT_CONNECTED, PYMUMBLE_CONN_STATE_FAILED) and self.parent_thread.is_alive():
if last_ping + PYMUMBLE_PING_DELAY <= time.time(): # when it is time, send the ping
self.ping()
last_ping = time.time()
if self.connected == PYMUMBLE_CONN_STATE_CONNECTED:
while self.commands.is_cmd():
self.treat_command(self.commands.pop_cmd()) # send the commands coming from the application to the server
self.sound_output.send_audio() # send outgoing audio if available
(rlist, wlist, xlist) = select.select([self.control_socket], [], [self.control_socket], self.loop_rate) # wait for a socket activity
if self.control_socket in rlist: # something to be read on the control socket
self.read_control_messages()
elif self.control_socket in xlist: # socket was closed
self.control_socket.close()
self.connected = PYMUMBLE_CONN_STATE_NOT_CONNECTED
def ping(self):
"""Send the keepalive through available channels"""
ping = mumble_pb2.Ping()
ping.timestamp = int(time.time())
ping.tcp_ping_avg = self.ping_stats['avg']
ping.tcp_ping_var = self.ping_stats['var']
ping.tcp_packets = self.ping_stats['nb']
self.Log.debug("sending: ping: %s", ping)
self.send_message(PYMUMBLE_MSG_TYPES_PING, ping)
self.ping_stats['time_send'] = int(time.time() * 1000)
self.Log.debug(self.ping_stats['last_rcv'])
if self.ping_stats['last_rcv'] != 0 and int(time.time() * 1000) > self.ping_stats['last_rcv'] + (60 * 1000):
self.Log.debug("Ping too long ! Disconnected ?")
self.connected = PYMUMBLE_CONN_STATE_NOT_CONNECTED
def ping_response(self, mess):
self.ping_stats['last_rcv'] = int(time.time() * 1000)
ping = int(time.time() * 1000) - self.ping_stats['time_send']
old_avg = self.ping_stats['avg']
nb = self.ping_stats['nb']
new_avg = ((self.ping_stats['avg'] * nb) + ping) / (nb + 1)
try:
self.ping_stats['var'] = self.ping_stats['var'] + pow(old_avg - new_avg, 2) + (1 / nb) * pow(ping - new_avg, 2)
except ZeroDivisionError:
pass
self.ping_stats['avg'] = new_avg
self.ping_stats['nb'] += 1
def send_message(self, type, message):
"""Send a control message to the server"""
packet = struct.pack("!HL", type, message.ByteSize()) + message.SerializeToString()
while len(packet) > 0:
self.Log.debug("sending message")
sent = self.control_socket.send(packet)
if sent < 0:
raise socket.error("Server socket error")
packet = packet[sent:]
def read_control_messages(self):
"""Read control messages coming from the server"""
# from tools import tohex # for debugging
try:
buffer = self.control_socket.recv(PYMUMBLE_READ_BUFFER_SIZE)
self.receive_buffer += buffer
except socket.error:
pass
while len(self.receive_buffer) >= 6: # header is present (type + length)
self.Log.debug("read control connection")
header = self.receive_buffer[0:6]
if len(header) < 6:
break
(type, size) = struct.unpack("!HL", header) # decode header
if len(self.receive_buffer) < size+6: # if not length data, read further
break
# self.Log.debug("message received : " + tohex(self.receive_buffer[0:size+6])) # for debugging
message = self.receive_buffer[6:size+6] # get the control message
self.receive_buffer = self.receive_buffer[size+6:] # remove from the buffer the read part
self.dispatch_control_message(type, message)
def dispatch_control_message(self, type, message):
"""Dispatch control messages based on their type"""
self.Log.debug("dispatch control message")
if type == PYMUMBLE_MSG_TYPES_UDPTUNNEL: # audio encapsulated in control message
self.sound_received(message)
elif type == PYMUMBLE_MSG_TYPES_VERSION:
mess = mumble_pb2.Version()
mess.ParseFromString(message)
self.Log.debug("message: Version : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_AUTHENTICATE:
mess = mumble_pb2.Authenticate()
mess.ParseFromString(message)
self.Log.debug("message: Authenticate : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_PING:
mess = mumble_pb2.Ping()
mess.ParseFromString(message)
self.Log.debug("message: Ping : %s", mess)
self.ping_response(mess)
elif type == PYMUMBLE_MSG_TYPES_REJECT:
mess = mumble_pb2.Reject()
mess.ParseFromString(message)
self.Log.debug("message: reject : %s", mess)
self.connected = PYMUMBLE_CONN_STATE_FAILED
self.ready_lock.release()
raise ConnectionRejectedError(mess.reason)
elif type == PYMUMBLE_MSG_TYPES_SERVERSYNC: # this message finish the connection process
mess = mumble_pb2.ServerSync()
mess.ParseFromString(message)
self.Log.debug("message: serversync : %s", mess)
self.users.set_myself(mess.session)
self.server_max_bandwidth = mess.max_bandwidth
self.set_bandwidth(mess.max_bandwidth)
if self.connected == PYMUMBLE_CONN_STATE_AUTHENTICATING:
self.connected = PYMUMBLE_CONN_STATE_CONNECTED
self.callbacks(PYMUMBLE_CLBK_CONNECTED)
self.ready_lock.release() # release the ready-lock
elif type == PYMUMBLE_MSG_TYPES_CHANNELREMOVE:
mess = mumble_pb2.ChannelRemove()
mess.ParseFromString(message)
self.Log.debug("message: ChannelRemove : %s", mess)
self.channels.remove(mess.channel_id)
elif type == PYMUMBLE_MSG_TYPES_CHANNELSTATE:
mess = mumble_pb2.ChannelState()
mess.ParseFromString(message)
self.Log.debug("message: channelstate : %s", mess)
self.channels.update(mess)
elif type == PYMUMBLE_MSG_TYPES_USERREMOVE:
mess = mumble_pb2.UserRemove()
mess.ParseFromString(message)
self.Log.debug("message: UserRemove : %s", mess)
self.users.remove(mess)
elif type == PYMUMBLE_MSG_TYPES_USERSTATE:
mess = mumble_pb2.UserState()
mess.ParseFromString(message)
self.Log.debug("message: userstate : %s", mess)
self.users.update(mess)
elif type == PYMUMBLE_MSG_TYPES_BANLIST:
mess = mumble_pb2.BanList()
mess.ParseFromString(message)
self.Log.debug("message: BanList : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_TEXTMESSAGE:
mess = mumble_pb2.TextMessage()
mess.ParseFromString(message)
self.Log.debug("message: TextMessage : %s", mess)
self.callbacks(PYMUMBLE_CLBK_TEXTMESSAGERECEIVED, mess)
elif type == PYMUMBLE_MSG_TYPES_PERMISSIONDENIED:
mess = mumble_pb2.PermissionDenied()
mess.ParseFromString(message)
self.Log.debug("message: PermissionDenied : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_ACL:
mess = mumble_pb2.ACL()
mess.ParseFromString(message)
self.Log.debug("message: ACL : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_QUERYUSERS:
mess = mumble_pb2.QueryUsers()
mess.ParseFromString(message)
self.Log.debug("message: QueryUsers : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_CRYPTSETUP:
mess = mumble_pb2.CryptSetup()
mess.ParseFromString(message)
self.Log.debug("message: CryptSetup : %s", mess)
self.ping()
elif type == PYMUMBLE_MSG_TYPES_CONTEXTACTIONMODIFY:
mess = mumble_pb2.ContextActionModify()
mess.ParseFromString(message)
self.Log.debug("message: ContextActionModify : %s", mess)
self.callbacks(PYMUMBLE_CLBK_CONTEXTACTIONRECEIVED, mess)
elif type == PYMUMBLE_MSG_TYPES_CONTEXTACTION:
mess = mumble_pb2.ContextAction()
mess.ParseFromString(message)
self.Log.debug("message: ContextAction : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_USERLIST:
mess = mumble_pb2.UserList()
mess.ParseFromString(message)
self.Log.debug("message: UserList : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_VOICETARGET:
mess = mumble_pb2.VoiceTarget()
mess.ParseFromString(message)
self.Log.debug("message: VoiceTarget : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_PERMISSIONQUERY:
mess = mumble_pb2.PermissionQuery()
mess.ParseFromString(message)
self.Log.debug("message: PermissionQuery : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_CODECVERSION:
mess = mumble_pb2.CodecVersion()
mess.ParseFromString(message)
self.Log.debug("message: CodecVersion : %s", mess)
self.sound_output.set_default_codec(mess)
elif type == PYMUMBLE_MSG_TYPES_USERSTATS:
mess = mumble_pb2.UserStats()
mess.ParseFromString(message)
self.Log.debug("message: UserStats : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_REQUESTBLOB:
mess = mumble_pb2.RequestBlob()
mess.ParseFromString(message)
self.Log.debug("message: RequestBlob : %s", mess)
elif type == PYMUMBLE_MSG_TYPES_SERVERCONFIG:
mess = mumble_pb2.ServerConfig()
mess.ParseFromString(message)
self.Log.debug("message: ServerConfig : %s", mess)
def set_bandwidth(self, bandwidth):
"""Set the total allowed outgoing bandwidth"""
if self.server_max_bandwidth is not None and bandwidth > self.server_max_bandwidth:
self.bandwidth = self.server_max_bandwidth
else:
self.bandwidth = bandwidth
self.sound_output.set_bandwidth(self.bandwidth) # communicate the update to the outgoing audio manager
def sound_received(self, message):
"""Manage a received sound message"""
# from tools import tohex # for debugging
pos = 0
# self.Log.debug("sound packet : " + tohex(message)) # for debugging
(header, ) = struct.unpack("!B", bytes([message[pos]])) # extract the header
type = (header & 0b11100000) >> 5
target = header & 0b00011111
pos += 1
if type == PYMUMBLE_AUDIO_TYPE_PING:
return
session = tools.VarInt() # decode session id
pos += session.decode(message[pos:pos+10])
sequence = tools.VarInt() # decode sequence number
pos += sequence.decode(message[pos:pos+10])
self.Log.debug("audio packet received from %i, sequence %i, type:%i, target:%i, lenght:%i", session.value, sequence.value, type, target, len(message))
terminator = False # set to true if it's the last 10 ms audio frame for the packet (used with CELT codec)
while (pos < len(message)) and not terminator: # get the audio frames one by one
if type == PYMUMBLE_AUDIO_TYPE_OPUS:
size = tools.VarInt() # OPUS use varint for the frame length
pos += size.decode(message[pos:pos+10])
size = size.value
if not (size & 0x2000): # terminator is 0x2000 in the resulting int.
terminator = True # should actually always be 0 as OPUS can use variable length audio frames
size &= 0x1fff # isolate the size from the terminator
else:
(header, ) = struct.unpack("!B", message[pos]) # CELT length and terminator is encoded in a 1 byte int
if not (header & 0b10000000):
terminator = True
size = header & 0b01111111
pos += 1
self.Log.debug("Audio frame : time:%f, last:%s, size:%i, type:%i, target:%i, pos:%i", time.time(), str(terminator), size, type, target, pos-1)
if size > 0 and self.receive_sound: # if audio must be treated
try:
newsound = self.users[session.value].sound.add(message[pos:pos+size],
sequence.value,
type,
target) # add the sound to the user's sound queue
self.callbacks(PYMUMBLE_CLBK_SOUNDRECEIVED, self.users[session.value], newsound)
sequence.value += int(round(newsound.duration / 1000 * 10)) # add 1 sequence per 10ms of audio
self.Log.debug("Audio frame : time:%f last:%s, size:%i, uncompressed:%i, type:%i, target:%i", time.time(), str(terminator), size, newsound.size, type, target)
except CodecNotSupportedError as msg:
print(msg)
except KeyError: # sound received after user removed
pass
# if len(message) - pos < size:
# raise InvalidFormatError("Invalid audio frame size")
pos += size # go further in the packet, after the audio frame
# TODO: get position info
def set_application_string(self, string):
"""Set the application name, that can be viewed by other clients on the server"""
self.application = string
def set_loop_rate(self, rate):
"""Set the current main loop rate (pause per iteration)"""
self.loop_rate = rate
def get_loop_rate(self):
"""Get the current main loop rate (pause per iteration)"""
return self.loop_rate
def set_codec_profile(self, profile):
"""set the audio profile"""
if profile in ["audio", "voip"]:
self.__opus_profile = profile
else:
raise ValueError("Unknown profile: " + str(profile))
def get_codec_profile(self):
"""return the audio profile string"""
return self.__opus_profile
def set_receive_sound(self, value):
"""Enable or disable the management of incoming sounds"""
if value:
self.receive_sound = True
else:
self.receive_sound = False
def is_ready(self):
"""Wait for the connection to be fully completed. To be used in the main thread"""
self.ready_lock.acquire()
self.ready_lock.release()
def execute_command(self, cmd, blocking=True):
"""Create a command to be sent to the server. To be used in the main thread"""
self.is_ready()
lock = self.commands.new_cmd(cmd)
if blocking and self.mumble_thread is not threading.current_thread():
lock.acquire()
lock.release()
return lock
# TODO: manage a timeout for blocking commands. Currently, no command actually waits for the server to execute
# The result of these commands should actually be checked against incoming server updates
def treat_command(self, cmd):
"""Send the awaiting commands to the server. Used in the pymumble thread."""
if cmd.cmd == PYMUMBLE_CMD_MOVE:
userstate = mumble_pb2.UserState()
userstate.session = cmd.parameters["session"]
userstate.channel_id = cmd.parameters["channel_id"]
self.Log.debug("Moving to channel")
self.send_message(PYMUMBLE_MSG_TYPES_USERSTATE, userstate)
cmd.response = True
self.commands.answer(cmd)
elif cmd.cmd == PYMUMBLE_CMD_TEXTMESSAGE:
textmessage = mumble_pb2.TextMessage()
textmessage.session.append(cmd.parameters["session"])
textmessage.channel_id.append(cmd.parameters["channel_id"])
textmessage.message = cmd.parameters["message"]
self.send_message(PYMUMBLE_MSG_TYPES_TEXTMESSAGE, textmessage)
cmd.response = True
self.commands.answer(cmd)
elif cmd.cmd == PYMUMBLE_CMD_TEXTPRIVATEMESSAGE:
textprivatemessage = mumble_pb2.TextMessage()
textprivatemessage.session.append(cmd.parameters["session"])
textprivatemessage.message = cmd.parameters["message"]
self.send_message(PYMUMBLE_MSG_TYPES_TEXTMESSAGE, textprivatemessage)
cmd.response = True
self.commands.answer(cmd)
elif cmd.cmd == PYMUMBLE_CMD_MODUSERSTATE:
userstate = mumble_pb2.UserState()
userstate.session = cmd.parameters["session"]
if "mute" in cmd.parameters:
userstate.mute = cmd.parameters["mute"]
if "self_mute" in cmd.parameters:
userstate.self_mute = cmd.parameters["self_mute"]
if "deaf" in cmd.parameters:
userstate.deaf = cmd.parameters["deaf"]
if "self_deaf" in cmd.parameters:
userstate.self_deaf = cmd.parameters["self_deaf"]
if "suppress" in cmd.parameters:
userstate.suppress = cmd.parameters["suppress"]
if "recording" in cmd.parameters:
userstate.recording = cmd.parameters["recording"]
if "comment" in cmd.parameters:
userstate.comment = cmd.parameters["comment"]
if "texture" in cmd.parameters:
userstate.texture = cmd.parameters["texture"]
self.send_message(PYMUMBLE_MSG_TYPES_USERSTATE, userstate)
cmd.response = True
self.commands.answer(cmd)
| gpl-3.0 | -3,118,328,671,455,202,300 | 41.318569 | 178 | 0.605088 | false |
evgenybf/pyXLWriter | pyXLWriter/utilites.py | 1 | 4897 | # pyXLWriter: A library for generating Excel Spreadsheets
# Copyright (c) 2004 Evgeny Filatov <[email protected]>
# Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel)
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#----------------------------------------------------------------------------
# This module was written/ported from PERL Spreadsheet::WriteExcel module
# The author of the PERL Spreadsheet::WriteExcel module is John McNamara
# <[email protected]>
#----------------------------------------------------------------------------
# See the README.txt distributed with pyXLWriter for more details.
"""pyXLWriter.utilites
Utilities for work with reference to cells
"""
__revision__ = """$Id: utilites.py,v 1.23 2004/08/20 05:16:17 fufff Exp $"""
#
# TODO: Optimazation - I must use re.compale everywhere.
#
import re
__all__ = ["cell_to_rowcol", "cell_to_rowcol2", "rowcol_to_cell",
"cellrange_to_rowcol_pair"]
_re_cell_ex = re.compile(r"(\$?)([A-I]?[A-Z])(\$?)(\d+)")
_re_row_range = re.compile(r"\$?(\d+):\$?(\d+)")
_re_col_range = re.compile(r"\$?([A-I]?[A-Z]):\$?([A-I]?[A-Z])")
_re_cell_range = re.compile(r"\$?([A-I]?[A-Z]\$?\d+):\$?([A-I]?[A-Z]\$?\d+)")
_re_cell_ref = re.compile(r"\$?([A-I]?[A-Z]\$?\d+)")
def _col_by_name(colname):
"""
"""
col = 0
pow = 1
for i in xrange(len(colname)-1, -1, -1):
ch = colname[i]
col += (ord(ch) - ord('A') + 1) * pow
pow *= 26
return col - 1
def cell_to_rowcol(cell):
"""Convert an Excel cell reference string in A1 notation
to numeric row/col notation.
Returns: row, col, row_abs, col_abs
"""
m = _re_cell_ex.match(cell)
if not m:
raise Exception("Error in cell format")
col_abs, col, row_abs, row = m.groups()
row_abs = bool(row_abs)
col_abs = bool(col_abs)
row = int(row) - 1
col = _col_by_name(col)
return row, col, row_abs, col_abs
def cell_to_rowcol2(cell):
"""Convert an Excel cell reference string in A1 notation
to numeric row/col notation.
Returns: row, col
"""
m = _re_cell_ex.match(cell)
if not m:
raise Exception("Error in cell format")
col_abs, col, row_abs, row = m.groups()
# Convert base26 column string to number
# All your Base are belong to us.
row = int(row) - 1
col = _col_by_name(col)
return row, col
def rowcol_to_cell(row, col, row_abs=False, col_abs=False):
"""Convert numeric row/col notation to an Excel cell reference string in
A1 notation.
"""
d = col // 26
m = col % 26
chr1 = "" # Most significant character in AA1
if row_abs:
row_abs = '$'
else:
row_abs = ''
if col_abs:
col_abs = '$'
else:
col_abs = ''
if d > 0:
chr1 = chr(ord('A') + d - 1)
chr2 = chr(ord('A') + m)
# Zero index to 1-index
return col_abs + chr1 + chr2 + row_abs + str(row + 1)
def cellrange_to_rowcol_pair(cellrange):
"""Convert cell range string in A1 notation to numeric row/col
pair.
Returns: row1, col1, row2, col2
"""
cellrange = cellrange.upper()
# Convert a row range: '1:3'
res = _re_row_range.match(cellrange)
if res:
row1 = int(res.group(1)) - 1
col1 = 0
row2 = int(res.group(2)) - 1
col2 = -1
return row1, col1, row2, col2
# Convert a column range: 'A:A' or 'B:G'.
# A range such as A:A is equivalent to A1:A16384, so add rows as required
res = _re_col_range.match(cellrange)
if res:
col1 = _col_by_name(res.group(1))
row1 = 0
col2 = _col_by_name(res.group(2))
row2 = -1
return row1, col1, row2, col2
# Convert a cell range: 'A1:B7'
res = _re_cell_range.match(cellrange)
if res:
row1, col1 = cell_to_rowcol2(res.group(1))
row2, col2 = cell_to_rowcol2(res.group(2))
return row1, col1, row2, col2
# Convert a cell reference: 'A1' or 'AD2000'
res = _re_cell_ref.match(cellrange)
if res:
row1, col1 = cell_to_rowcol2(res.group(1))
return row1, col1, row1, col1
raise Exception("Unknown cell reference %s" % (cell))
| lgpl-2.1 | -7,616,251,451,061,417,000 | 30.391026 | 77 | 0.590157 | false |
ngokevin/zamboni | settings_test.py | 1 | 2958 | import atexit
import os
import tempfile
from mkt.settings import ROOT
_tmpdirs = set()
def _cleanup():
try:
import sys
import shutil
except ImportError:
return
tmp = None
try:
for tmp in _tmpdirs:
shutil.rmtree(tmp)
except Exception, exc:
sys.stderr.write("\n** shutil.rmtree(%r): %s\n" % (tmp, exc))
atexit.register(_cleanup)
def _polite_tmpdir():
tmp = tempfile.mkdtemp()
_tmpdirs.add(tmp)
return tmp
# See settings.py for documentation:
IN_TEST_SUITE = True
NETAPP_STORAGE = _polite_tmpdir()
ADDONS_PATH = _polite_tmpdir()
GUARDED_ADDONS_PATH = _polite_tmpdir()
SIGNED_APPS_PATH = _polite_tmpdir()
SIGNED_APPS_REVIEWER_PATH = _polite_tmpdir()
UPLOADS_PATH = _polite_tmpdir()
TMP_PATH = _polite_tmpdir()
COLLECTIONS_ICON_PATH = _polite_tmpdir()
REVIEWER_ATTACHMENTS_PATH = _polite_tmpdir()
DUMPED_APPS_PATH = _polite_tmpdir()
AUTHENTICATION_BACKENDS = (
'django_browserid.auth.BrowserIDBackend',
)
# We won't actually send an email.
SEND_REAL_EMAIL = True
# Turn off search engine indexing.
USE_ELASTIC = False
# Ensure all validation code runs in tests:
VALIDATE_ADDONS = True
PAYPAL_PERMISSIONS_URL = ''
ENABLE_API_ERROR_SERVICE = False
SITE_URL = 'http://testserver'
BROWSERID_AUDIENCES = [SITE_URL]
STATIC_URL = SITE_URL + '/'
MEDIA_URL = '/media/'
CACHES = {
'default': {
'BACKEND': 'caching.backends.locmem.LocMemCache',
}
}
# COUNT() caching can't be invalidated, it just expires after x seconds. This
# is just too annoying for tests, so disable it.
CACHE_COUNT_TIMEOUT = -1
# Overrides whatever storage you might have put in local settings.
DEFAULT_FILE_STORAGE = 'amo.utils.LocalFileStorage'
VIDEO_LIBRARIES = ['lib.video.dummy']
ALLOW_SELF_REVIEWS = True
# Make sure debug toolbar output is disabled so it doesn't interfere with any
# html tests.
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda r: False,
'HIDE_DJANGO_SQL': True,
'TAG': 'div',
'ENABLE_STACKTRACES': False,
}
MOZMARKET_VENDOR_EXCLUDE = []
TASK_USER_ID = '4043307'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
SQL_RESET_SEQUENCES = False
GEOIP_URL = ''
GEOIP_DEFAULT_VAL = 'restofworld'
GEOIP_DEFAULT_TIMEOUT = .2
ES_DEFAULT_NUM_REPLICAS = 0
ES_DEFAULT_NUM_SHARDS = 3
IARC_MOCK = True
# Ensure that exceptions aren't re-raised.
DEBUG_PROPAGATE_EXCEPTIONS = False
PAYMENT_PROVIDERS = ['bango']
# When not testing this specific feature, make sure it's off.
PRE_GENERATE_APKS = False
# This is a precaution in case something isn't mocked right.
PRE_GENERATE_APK_URL = 'http://you-should-never-load-this.com/'
# A sample key for signing receipts.
WEBAPPS_RECEIPT_KEY = os.path.join(ROOT, 'mkt/webapps/tests/sample.key')
# A sample key for signing preverified-account assertions.
PREVERIFIED_ACCOUNT_KEY = os.path.join(ROOT, 'mkt/account/tests/sample.key')
| bsd-3-clause | 2,432,286,399,915,075,000 | 22.291339 | 77 | 0.701826 | false |
yfpeng/pengyifan-leetcode | src/main/python/pyleetcode/next_greater_element.py | 1 | 1874 | """
You are given two arrays (without duplicates) nums1 and nums2 where nums1's elements are subset of nums2. Find all the
next greater numbers for nums1's elements in the corresponding places of nums2.
The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2. If it does not exist,
output -1 for this number.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2].
Output: [-1,3,-1]
Explanation:
For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.
For number 1 in the first array, the next greater number for it in the second array is 3.
For number 2 in the first array, there is no next greater number for it in the second array, so output -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4].
Output: [3,-1]
Explanation:
For number 2 in the first array, the next greater number for it in the second array is 3.
For number 4 in the first array, there is no next greater number for it in the second array, so output -1.
Note:
- All elements in nums1 and nums2 are unique.
- The length of both nums1 and nums2 would not exceed 1000.
"""
def next_greater_element(findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
output = []
for num1 in findNums:
index = nums.index(num1) + 1
o = -1
while index < len(nums):
if num1 < nums[index]:
o = nums[index]
break
index += 1
output.append(o)
return output
def test_next_greater_element():
assert next_greater_element([4, 1, 2], [1, 3, 4, 2]) == [-1, 3, -1]
assert next_greater_element([2, 4], [1, 2, 3, 4]) == [3, -1]
if __name__ == '__main__':
test_next_greater_element() | bsd-3-clause | -2,137,404,932,578,816,800 | 30.25 | 122 | 0.623799 | false |
ProgrammingRobotsStudyGroup/robo_magellan | scripts/kill_switch.py | 1 | 3880 | #!/usr/bin/env python
#
# Copyright 2017 Robot Garden, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#------------------------------------------------------------------------------
# Monitors state of the kill switch by monitoring an UP GPIO input, which
# is connected to a contact closure from the kill switch module.
#
# Topics subscribed: None
# Topics published:
# kill_sw_enabled: std_msgs/Bool
#------------------------------------------------------------------------------
#
import RPi.GPIO as GPIO
import time
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import String
def kill_sw_mon():
rospy.init_node('kill_sw_mon', anonymous=True)
pub = rospy.Publisher('kill_sw_enabled', Bool, queue_size=10)
# first push of kill switch is going to start the state machine
global pub_exec_simple
pub_exec_simple = rospy.Publisher("exec_cmd_simple", String, queue_size = 10)
global once
once = False
rate = rospy.Rate(20)
gpio_pin = 7
GPIO.setmode(GPIO.BOARD)
# assumes the pin is pulled up be external resistor
GPIO.setup(gpio_pin, GPIO.IN)
# find the starting state of the input pin
n_qual = 5 # number of successive readings that must match to qualify
count = 0
last_val = False
start = time.time()
while count < n_qual:
if (time.time() - start) > 10:
break
val = GPIO.input(gpio_pin) == 0
if val == last_val:
count += 1
last_val = val
else:
count = 0 # reset
last_val = val
time.sleep(0.1) # pause between reads
if count >= n_qual:
kill_sw_ok = val # the value of n_qual consecutive reads
# print "Initial value is: %s" % val
else:
kill_sw_ok = False;
print "Initial value not found; count = %s" % count
# TODO need better error handling?
if not rospy.is_shutdown():
time.sleep(0.2)
pub.publish(kill_sw_ok) # publish initial state
rospy.loginfo(kill_sw_ok)
while not rospy.is_shutdown():
try:
if not kill_sw_ok:
# Use falling edge detection to see if pin is pulled
# low to avoid repeated polling
GPIO.wait_for_edge(gpio_pin, GPIO.FALLING)
time.sleep(0.1) # wait for sw bounce
if not GPIO.input(gpio_pin): # re-read to confirm
kill_sw_ok = True
pub.publish(kill_sw_ok)
rospy.loginfo(kill_sw_ok)
pub_exec_simple.publish("START_EXEC")
rospy.loginfo("kill_sw_mon: Pubishing START_EXEC")
else:
GPIO.wait_for_edge(gpio_pin, GPIO.RISING)
time.sleep(0.1) # wait for sw bounce
if GPIO.input(gpio_pin):
kill_sw_ok = False
pub.publish(kill_sw_ok)
rospy.loginfo(kill_sw_ok)
rate.sleep()
except Exception, e:
#Revert all GPIO pins to their normal states (i.e. input = safe)
GPIO.cleanup()
if __name__ == '__main__':
try:
kill_sw_mon()
except rospy.ROSInterruptException:
#Revert all GPIO pins to their normal states (i.e. input = safe)
GPIO.cleanup()
pass
| apache-2.0 | 8,239,543,852,723,142,000 | 33.954955 | 81 | 0.569072 | false |
jul/dsat | setup.py | 1 | 1616 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import unittest
import sys
import os
sys.path += [ "./dsat" ]
import dsat
#TODO call make text with sphinx (please don't use system) before
#building/install
fcontent = lambda f: open(f).read()
def test():
print "TEST SKIPPED"
return True
loader= unittest.TestLoader()
suite=loader.discover("")
runner=unittest.TextTestRunner()
result=runner.run(suite)
if not result.wasSuccessful():
raise Exception( "Test Failed: Aborting install")
long_desc = fcontent('README.rst')
setup(
name = "dsat",
version = dsat.__version__,
description = long_desc,
packages = find_packages(),
author_email = "[email protected]",
url='http://dsat.readthedocs.org/',
package_dir = dict( dsat = "dsat",),
install_requires = map(
str.strip,
open("requirements.txt").readlines()
),
### doc writer don't be stupid
### if there is an order for the file then use alphabetical
license = fcontent('LICENSE.txt'),
long_description = long_desc,
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Distributed Computing'
],
test_suite="tests",
)
if 'install' in sys.argv or 'develop' in sys.argv:
test()
print long_desc
| bsd-2-clause | 1,446,415,715,708,504,300 | 27.350877 | 67 | 0.591584 | false |
phtagn/sickbeard_mp4_automator | tmdb_api/tmdb.py | 1 | 14699 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#author:doganaydin
#project:themoviedb
#repository:http://github.com/doganaydin/themoviedb
#license: LGPLv3 http://www.gnu.org/licenses/lgpl.html
"""An interface to the themoviedb.org API"""
__author__ = "doganaydin"
__version__ = "1.0b"
try:
import simplejson
except:
import json as simplejson
import operator
import requests
config = {}
def configure(api_key, language='en'):
config['apikey'] = api_key
config['language'] = language
config['urls'] = {}
config['urls']['movie.search'] = "https://api.themoviedb.org/3/search/movie?query=%%s&api_key=%(apikey)s&page=%%s" % (config)
config['urls']['movie.info'] = "https://api.themoviedb.org/3/movie/%%s?api_key=%(apikey)s" % (config)
config['urls']['people.search'] = "https://api.themoviedb.org/3/search/person?query=%%s&api_key=%(apikey)s&page=%%s" % (config)
config['urls']['collection.info'] = "https://api.themoviedb.org/3/collection/%%s&api_key=%(apikey)s" % (config)
config['urls']['movie.alternativetitles'] = "https://api.themoviedb.org/3/movie/%%s/alternative_titles?api_key=%(apikey)s" % (config)
config['urls']['movie.casts'] = "https://api.themoviedb.org/3/movie/%%s/casts?api_key=%(apikey)s" % (config)
config['urls']['movie.images'] = "https://api.themoviedb.org/3/movie/%%s/images?api_key=%(apikey)s" % (config)
config['urls']['movie.keywords'] = "https://api.themoviedb.org/3/movie/%%s/keywords?api_key=%(apikey)s" % (config)
config['urls']['movie.releases'] = "https://api.themoviedb.org/3/movie/%%s/releases?api_key=%(apikey)s" % (config)
config['urls']['movie.trailers'] = "https://api.themoviedb.org/3/movie/%%s/trailers?api_key=%(apikey)s" % (config)
config['urls']['movie.translations'] = "https://api.themoviedb.org/3/movie/%%s/translations?api_key=%(apikey)s" % (config)
config['urls']['person.info'] = "https://api.themoviedb.org/3/person/%%s?api_key=%(apikey)s&append_to_response=images,credits" % (config)
config['urls']['latestmovie'] = "https://api.themoviedb.org/3/latest/movie?api_key=%(apikey)s" % (config)
config['urls']['config'] = "https://api.themoviedb.org/3/configuration?api_key=%(apikey)s" % (config)
config['urls']['request.token'] = "https://api.themoviedb.org/3/authentication/token/new?api_key=%(apikey)s" % (config)
config['urls']['session.id'] = "https://api.themoviedb.org/3/authentication/session/new?api_key=%(apikey)s&request_token=%%s" % (config)
config['urls']['movie.add.rating'] = "https://api.themoviedb.org/3/movie/%%s/rating?session_id=%%s&api_key=%(apikey)s" % (config)
config['api'] = {}
config['api']['backdrop.sizes'] = ""
config['api']['base.url'] = ""
config['api']['poster.sizes'] = ""
config['api']['profile.sizes'] = ""
config['api']['session.id'] = ""
class Core(object):
def getJSON(self, url, language=None):
language = language or config['language']
page = requests.get(url, params={'language': language}).content
try:
return simplejson.loads(page)
except:
return simplejson.loads(page.decode('utf-8'))
def escape(self,text):
if len(text) > 0:
return requests.utils.quote(text)
return False
def update_configuration(self):
c = self.getJSON(config['urls']['config'])
config['api']['backdrop.sizes'] = c['images']['backdrop_sizes']
config['api']['base.url'] = c['images']['base_url']
config['api']['poster.sizes'] = c['images']['poster_sizes']
config['api']['profile.sizes'] = c['images']['profile_sizes']
return "ok"
def backdrop_sizes(self,img_size):
size_list = {'s':'w300','m':'w780','l':'w1280','o':'original'}
return size_list[img_size]
def poster_sizes(self,img_size):
size_list = {'s':'w92','m':'185','l':'w500','o':'original'}
return size_list[img_size]
def profile_sizes(self,img_size):
size_list = {'s':'w45','m':'185','l':'w632','o':'original'}
return size_list[img_size]
def request_token(self):
req = self.getJSON(config['urls']['request.token'])
r = req["request_token"]
return {"url":"http://themoviedb.org/authenticate/%s" % r,"request_token":r}
def session_id(self,token):
sess = self.getJSON(config['urls']['session.id'] % token)
config['api']['session.id'] = sess["session_id"]
return sess["session_id"]
class Movies(Core):
def __init__(self, title="", limit=False, language=None):
self.limit = limit
self.update_configuration()
title = self.escape(title)
self.movies = self.getJSON(config['urls']['movie.search'] % (title,str(1)), language=language)
pages = self.movies["total_pages"]
if not self.limit:
if int(pages) > 1: #
for i in range(2,int(pages)+1): # Thanks @tBuLi
self.movies["results"].extend(self.getJSON(config['urls']['movie.search'] % (title,str(i)), language=language)["results"])
def __iter__(self):
for i in self.movies["results"]:
yield Movie(i["id"])
def get_total_results(self):
if self.limit:
return len(self.movies["results"])
return self.movies["total_results"]
def iter_results(self):
for i in self.movies["results"]:
yield i
class Movie(Core):
def __init__(self, movie_id, language=None):
self.movie_id = movie_id
self.update_configuration()
self.movies = self.getJSON(config['urls']['movie.info'] % self.movie_id, language=language)
self.casts = self.getJSON(config['urls']['movie.casts'] % self.movie_id, language=language)
self.releases = self.getJSON(config['urls']['movie.releases'] % self.movie_id, language=language)
def is_adult(self):
return self.movies['adult']
def get_mpaa_rating(self, country='US'):
for r in self.releases['countries']:
if country.lower() == r['iso_3166_1'].lower():
return r['certification']
def get_writers(self):
l = []
for r in self.casts['crew']:
if r['department'] == 'Writing':
l.append(r)
return l
def get_directors(self):
l = []
for r in self.casts['crew']:
if r['department'] == 'Directing':
l.append(r)
return l
def get_producers(self):
l = []
for r in self.casts['crew']:
if r['department'] == 'Production':
l.append(r)
return l
def get_cast(self):
return sorted(self.casts['cast'], key=lambda x: x['order'])
def get_collection_id(self):
return self.movies['belongs_to_collection']["id"]
def get_collection_name(self):
return self.movies['belongs_to_collection']["name"]
# Sizes = s->w300 m->w780 l->w1280 o->original(default)
def get_collection_backdrop(self,img_size="o"):
img_path = self.movies["belongs_to_collection"]["backdrop_path"]
return config['api']['base.url']+self.poster_sizes(img_size)+img_path
# Sizes = s->w92 m->w185 l->w500 o->original(default)
def get_collection_poster(self,img_size="o"):
img_path = self.movies["belongs_to_collection"]["poster_path"]
return config['api']['base.url']+self.poster_sizes(img_size)+img_path
def get_budget(self):
return self.movies['budget']
def get_genres(self):
genres = []
for i in self.movies['genres']:
genres.append({"id":i["id"],"name":i["name"]})
return genres
def get_homepage(self):
return self.movies['homepage']
def get_imdb_id(self):
return self.movies['imdb_id']
def get_overview(self):
return self.movies['overview']
def get_production_companies(self):
for i in self.movies['production_companies']:
companies = {"id":i["id"],"name":i["name"]}
return companies
def get_productions_countries(self):
countries = []
for i in self.movies['production_countries']:
countries.append({"iso_3166_1":i["iso_3166_1"],"name":i["name"]})
return countries
def get_revenue(self):
return self.movies['revenue']
def get_runtime(self):
return self.movies['runtime']
def get_spoken_languages(self):
langs = []
for i in self.movies['spoken_languages']:
langs.append({"iso_639_1":i["iso_639_1"],"name":i["name"]})
return langs
def get_tagline(self):
return self.movies['tagline']
def get_vote_average(self):
return self.movies['vote_average']
def get_vote_count(self):
return self.movies['vote_count']
def get_id(self):
return self.movie_id
# Sizes = s->w300 m->w780 l->w1280 o->original(default)
def get_backdrop(self,img_size="o"):
img_path = self.movies["backdrop_path"]
return config['api']['base.url']+self.backdrop_sizes(img_size)+img_path
def get_original_title(self):
return self.movies["original_title"]
def get_popularity(self):
return self.movies["popularity"]
def get_release_date(self):
return self.movies["release_date"]
def get_title(self):
return self.movies["title"]
# Sizes = s->w92 m->w185 l->w500 o->original(default)
def get_poster(self,img_size="o"):
img_path = self.movies["poster_path"]
return config['api']['base.url']+self.poster_sizes(img_size)+img_path
def get_trailers(self, language=None):
return self.getJSON(config['urls']['movie.trailers'] % self.movie_id, language=language)
def add_rating(self,value):
if isinstance(value,float) or isinstance(value,int):
if config["api"]["session.id"] == "":
return "PROBLEM_AUTH"
sess_id = config["api"]["session.id"]
data = {"value":float(value)}
req = requests.post(config['urls']['movie.add.rating'] % (self.movie_id,sess_id),data=data)
res = simplejson.loads(bytes(req.content).decode())
if res['status_message'] == "Success":
return True
else:
return False
return "ERROR"
class People(Core):
def __init__(self, people_name, limit=False, language=None):
self.limit = limit
self.update_configuration()
people_name = self.escape(people_name)
self.people = self.getJSON(config['urls']['people.search'] % (people_name,str(1)), language=language)
pages = self.people["total_pages"]
if not self.limit:
if int(pages) > 1:
for i in range(2,int(pages)+1):
self.people["results"].extend(self.getJSON(config['urls']['people.search'] % (people_name,str(i)), language=language)["results"])
def __iter__(self):
for i in self.people["results"]:
yield Person(i["id"])
def total_results(self):
return self.people["total_results"]
def get_total_results(self):
if self.limit:
return len(self.movies["results"])
return self.movies["total_results"]
def iter_results(self):
for i in self.people["results"]:
yield i
class Person(Core):
def __init__(self, person_id, language=None):
self.person_id = person_id
self.update_configuration()
self.person = self.getJSON(config['urls']['person.info'] % self.person_id, language=language)
def get_id(self):
return self.person_id
def is_adult(self):
return self.person["adult"]
def get_name(self):
return self.person["name"]
# Sizes = s->w45 m->w185 l->w632 o->original(default)
def get_profile_image(self,img_size="o"):
img_path = self.person["profile_path"]
return config['api']['base.url']+self.profile_sizes(img_size)+img_path
def get_biography(self):
return self.person['biography']
def get_birthday(self):
return self.person['birthday']
def get_deathday(self):
return self.person['deathday']
def get_place_of_birth(self):
return self.person['place_of_birth']
def get_homepage(self):
return self.person['homepage']
def get_also_known_as(self):
return self.person['also_known_as']
def get_image_aspect_ratio(self,image_index=0):
return self.person["images"]['profiles'][image_index]['aspect_ratio']
def get_image_height(self,image_index=0):
return self.person["images"]['profiles'][image_index]['height']
def get_image_width(self,image_index=0):
return self.person["images"]['profiles'][image_index]['width']
def get_image_iso_639_1(self,image_index=0):
return self.person["images"]['profiles'][image_index]['iso_639_1']
#Sizes = s->w92 m->w185 l->w500 o->original(default)
def get_image(self,img_size="o",image_index=0):
img_path = self.person["images"]['profiles'][image_index]['file_path']
return config['api']['base.url']+self.poster_sizes(img_size)+img_path
def cast(self):
for c in self.person["credits"]["cast"]:
yield Cast(c)
def crew(self):
for c in self.person["credits"]["crew"]:
yield Crew(c)
class Cast:
def __init__(self,c):
self.cast = c
def get_id(self):
return self.cast["id"]
def get_character(self):
return self.cast["character"]
def get_original_title(self):
return self.cast["original_title"]
def get_title(self):
return self.cast["title"]
def get_release_date(self):
return self.cast["release_date"]
# Sizes = s->w92 m->w185 l->w500 o->original(default)
def get_poster(self,img_size="o",person_index=0):
img_path = self.cast["poster_path"]
return config['api']['base.url']+Core().poster_sizes(img_size)+img_path
class Crew:
def __init__(self,c):
self.crew = c
def get_id(self):
return self.crew["id"]
def get_department(self):
return self.crew["department"]
def get_job(self):
return self.crew["job"]
def get_original_title(self):
return self.crew["original_title"]
def get_title(self):
return self.crew["title"]
def get_release_date(self):
return self.crew["release_date"]
# Sizes = s->w92 m->w185 l->w500 o->original(default)
def get_poster(self,img_size="o"):
img_path = self.crew["poster_path"]
return config['api']['base.url']+Core().poster_sizes(img_size)+img_path
| mit | -7,890,947,823,603,571,000 | 35.026961 | 149 | 0.598476 | false |
fossfreedom/coverart-browser | coverart_controllers.py | 1 | 28816 | # -*- Mode: python; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
#
# Copyright (C) 2012 - fossfreedom
# Copyright (C) 2012 - Agustin Carrasco
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from datetime import date
from collections import OrderedDict
from collections import namedtuple
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import RB
from gi.repository import Gio
from gi.repository import GLib
from coverart_browser_prefs import CoverLocale
from coverart_browser_prefs import GSetting
from coverart_utils import create_pixbuf_from_file_at_size
from coverart_utils import GenreConfiguredSpriteSheet
from coverart_utils import ConfiguredSpriteSheet
from coverart_utils import get_stock_size
from coverart_utils import CaseInsensitiveDict
from coverart_utils import Theme
import rb
MenuNodeT = namedtuple('MenuNode', 'label menutype typevalue')
def MenuNode(label, menutype=None, typevalue=None):
return MenuNodeT(label, menutype, typevalue)
class OptionsController(GObject.Object):
# properties
options = GObject.property(type=object, default=None)
current_key = GObject.property(type=str, default=None)
update_image = GObject.property(type=bool, default=False)
enabled = GObject.property(type=bool, default=True)
def __init__(self):
super(OptionsController, self).__init__()
# connect the variations on the current key to the controllers action
self.connect('notify::current-key', self._do_action)
def get_current_key_index(self):
return self.options.index(self.current_key)
def option_selected(self, key):
if key != self.current_key:
# update the current value
self.current_key = key
def _do_action(self, *args):
self.do_action()
def do_action(self):
pass
def get_current_image(self):
return None
def get_current_description(self):
return self.current_key
def update_images(self, *args):
pass
def create_spritesheet(self, plugin, sheet, typestr):
'''
helper function to create a specific spritesheet
'''
if sheet:
del sheet
return ConfiguredSpriteSheet(plugin, typestr, get_stock_size())
def create_button_image(self, plugin, image, icon_name):
'''
helper function to create a button image
'''
if image:
del image
path = 'img/' + Theme(self.plugin).current + '/'
return create_pixbuf_from_file_at_size(
rb.find_plugin_file(self.plugin, path + icon_name),
*get_stock_size())
class PlaylistPopupController(OptionsController):
def __init__(self, plugin, album_model):
super(PlaylistPopupController, self).__init__()
self._album_model = album_model
shell = plugin.shell
self.plugin = plugin
# get the library name and initialize the superclass with it
self._library_name = shell.props.library_source.props.name
# get the queue name
self._queue_name = shell.props.queue_source.props.name
if " (" in self._queue_name:
self._queue_name = self._queue_name[0:self._queue_name.find(" (")]
self._spritesheet = None
self._update_options(shell)
# get the playlist model so we can monitor changes
playlist_model = shell.props.display_page_model
# connect signals to update playlists
playlist_model.connect('row-inserted', self._update_options, shell)
playlist_model.connect('row-deleted', self._update_options, shell)
playlist_model.connect('row-changed', self._update_options, shell)
def update_images(self, *args):
self._spritesheet = self.create_spritesheet(self.plugin,
self._spritesheet, 'playlist')
if args[-1]:
self.update_image = True
def _update_options(self, *args):
shell = args[-1]
self.update_images(False)
playlist_manager = shell.props.playlist_manager
still_exists = self.current_key == self._library_name or \
self.current_key == self._queue_name
# retrieve the options
values = OrderedDict()
# library and play queue sources
values[self._library_name] = None
values[self._queue_name] = shell.props.queue_source
# playlists
playlists_entries = playlist_manager.get_playlists()
for playlist in playlists_entries:
if playlist.props.is_local:
name = playlist.props.name
values[name] = playlist
still_exists = still_exists or name == self.current_key
self.values = values
self.options = list(values.keys())
self.current_key = self.current_key if still_exists else \
self._library_name
def do_action(self):
playlist = self.values[self.current_key]
if not playlist:
self._album_model.remove_filter('model')
else:
self._album_model.replace_filter('model',
playlist.get_query_model())
def get_current_image(self):
playlist = self.values[self.current_key]
if self.current_key == self._library_name:
image = self._spritesheet['music']
elif self._queue_name in self.current_key:
image = self._spritesheet['queue']
elif isinstance(playlist, RB.StaticPlaylistSource):
image = self._spritesheet['playlist']
else:
image = self._spritesheet['smart']
return image
class GenrePopupController(OptionsController):
# properties
new_genre_icon = GObject.property(type=bool, default=False)
def __init__(self, plugin, album_model):
super(GenrePopupController, self).__init__()
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
self._album_model = album_model
shell = plugin.shell
self.plugin = plugin
# create a new property model for the genres
genres_model = RB.RhythmDBPropertyModel.new(shell.props.db,
RB.RhythmDBPropType.GENRE)
query = shell.props.library_source.props.base_query_model
genres_model.props.query_model = query
# initial genre
self._initial_genre = _('All Genres')
self._spritesheet = None
self._default_image = None
self._unrecognised_image = None
self._connect_properties()
self._connect_signals(query, genres_model)
# generate initial popup
self._update_options(genres_model)
def update_images(self, *args):
if self._spritesheet:
del self._spritesheet
self._spritesheet = GenreConfiguredSpriteSheet(self.plugin,
'genre', get_stock_size())
self._default_image = self.create_button_image(self.plugin,
self._default_image, 'default_genre.png')
self._unrecognised_image = self.create_button_image(self.plugin,
self._unrecognised_image, 'unrecognised_genre.png')
if args[-1]:
self.update_image = True
def _connect_signals(self, query, genres_model):
# connect signals to update genres
self.connect('notify::new-genre-icon', self._update_options, genres_model)
query.connect('row-inserted', self._update_options, genres_model)
query.connect('row-deleted', self._update_options, genres_model)
query.connect('row-changed', self._update_options, genres_model)
def _connect_properties(self):
gs = GSetting()
setting = gs.get_setting(gs.Path.PLUGIN)
setting.bind(gs.PluginKey.NEW_GENRE_ICON, self, 'new_genre_icon',
Gio.SettingsBindFlags.GET)
def _update_options(self, *args):
genres_model = args[-1]
self.update_images(False)
still_exists = False
# retrieve the options
options = []
row_num = 0
for row in genres_model:
if row_num == 0:
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
genre = _('All Genres')
row_num = row_num + 1
else:
genre = row[0]
options.append(genre)
still_exists = still_exists or genre == self.current_key
self.options = options
self.current_key = self.current_key if still_exists else \
self._initial_genre
def do_action(self):
'''
called when genre popup menu item chosen
return None if the first entry in popup returned
'''
if self.current_key == self._initial_genre:
self._album_model.remove_filter('genre')
else:
self._album_model.replace_filter('genre', self.current_key)
def get_current_image(self):
test_genre = self.current_key.lower()
if test_genre == self._initial_genre.lower():
image = self._default_image
else:
image = self._find_alternates(test_genre)
if image == self._unrecognised_image and \
test_genre in self._spritesheet:
image = self._spritesheet[test_genre]
return image
def _find_alternates(self, test_genre):
# the following genre checks are required
# 1. if we have user defined genres
# 2. then check locale specific system genres
# 3. then check local specific alternates
# 4. then check if we system genres
# where necessary check if any of the genres are a substring
# of test_genre - check in reverse order so that we
# test largest strings first (prevents spurious matches with
# short strings)
# N.B. we use RB.search_fold since the strings can be
# in a mixture of cases, both unicode (normalized or not) and str
# and as usual python cannot mix and match these types.
test_genre = RB.search_fold(test_genre)
ret, sprite = self._match_genres(test_genre, self._spritesheet.GENRE_USER)
if ret:
return sprite
for genre in sorted(self._spritesheet.locale_names,
key=lambda b: (-len(b), b)):
if RB.search_fold(genre) in test_genre:
return self._spritesheet[self._spritesheet.locale_names[genre]]
# next check locale alternates
ret, sprite = self._match_genres(test_genre, self._spritesheet.GENRE_LOCALE)
if ret:
return sprite
ret, sprite = self._match_genres(test_genre, self._spritesheet.GENRE_SYSTEM)
if ret:
return sprite
# check if any of the default genres are a substring
# of test_genre - check in reverse order so that we
# test largest strings first (prevents spurious matches with
# short strings)
for genre in sorted(self._spritesheet.names,
key=lambda b: (-len(b), b)):
if RB.search_fold(genre) in test_genre:
return self._spritesheet[genre]
# if no matches then default to unrecognised image
return self._unrecognised_image
def _match_genres(self, test_genre, genre_type):
case_search = CaseInsensitiveDict(
dict((k.name, v) for k, v in self._spritesheet.genre_alternate.items()
if k.genre_type == genre_type))
if test_genre in case_search:
return (True, self._spritesheet[case_search[test_genre]])
else:
return (False, None)
def get_current_description(self):
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
if self.current_key == self._initial_genre:
return _('All Genres')
else:
return self.current_key
class SortPopupController(OptionsController):
def __init__(self, plugin, viewmgr):
super(SortPopupController, self).__init__()
self._viewmgr = viewmgr
self.plugin = plugin
# sorts dictionary
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
self.values = OrderedDict([(_('Sort by album name'), 'name'),
(_('Sort by album artist'), 'artist'),
(_('Sort by year'), 'year'),
(_('Sort by rating'), 'rating')])
self.options = list(self.values.keys())
# get the current sort key and initialise the superclass
gs = GSetting()
source_settings = gs.get_setting(gs.Path.PLUGIN)
value = source_settings[gs.PluginKey.SORT_BY]
self._spritesheet = None
self.update_images(False)
self.current_key = list(self.values.keys())[
list(self.values.values()).index(value)]
def update_images(self, *args):
self._spritesheet = self.create_spritesheet(self.plugin,
self._spritesheet, 'sort')
if args[-1]:
self.update_image = True
def do_action(self):
sort = self.values[self.current_key]
gs = GSetting()
settings = gs.get_setting(gs.Path.PLUGIN)
settings[gs.PluginKey.SORT_BY] = sort
self._viewmgr.current_view.get_default_manager().emit('sort', "album")
def get_current_image(self):
sort = self.values[self.current_key]
return self._spritesheet[sort]
class ArtistSortPopupController(OptionsController):
def __init__(self, plugin, viewmgr):
super(ArtistSortPopupController, self).__init__()
self._viewmgr = viewmgr
self.plugin = plugin
# sorts dictionary
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
self.values = OrderedDict([(_('Sort by album name'), 'name_artist'),
(_('Sort by year'), 'year_artist'),
(_('Sort by rating'), 'rating_artist')])
self.options = list(self.values.keys())
# get the current sort key and initialise the superclass
gs = GSetting()
source_settings = gs.get_setting(gs.Path.PLUGIN)
value = source_settings[gs.PluginKey.SORT_BY_ARTIST]
if value not in list(self.values.values()):
print("here")
value = 'name_artist'
source_settings[gs.PluginKey.SORT_BY_ARTIST] = value
self._spritesheet = None
self.update_images(False)
self.current_key = list(self.values.keys())[
list(self.values.values()).index(value)]
print(self.current_key)
def update_images(self, *args):
self._spritesheet = self.create_spritesheet(self.plugin,
self._spritesheet, 'sort_artist')
if args[-1]:
self.update_image = True
def do_action(self):
sort = self.values[self.current_key]
gs = GSetting()
settings = gs.get_setting(gs.Path.PLUGIN)
settings[gs.PluginKey.SORT_BY_ARTIST] = sort
self._viewmgr.current_view.get_default_manager().emit('sort', "artist")
def get_current_image(self):
sort = self.values[self.current_key]
return self._spritesheet[sort]
class PropertiesMenuController(OptionsController):
favourites = GObject.property(type=bool, default=False)
follow = GObject.property(type=bool, default=False)
def __init__(self, plugin, source):
super(PropertiesMenuController, self).__init__()
self._source = source
self.plugin = plugin
self._connect_properties()
# sorts dictionary
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
# options
self.values = OrderedDict()
self.values[MenuNode(_('Download all covers'))] = 'download'
self.values[MenuNode(_('Play random album'))] = 'random'
self.values[MenuNode(_('Follow playing song'), 'check',
(True if self.follow else False))] = 'follow'
self.values[MenuNode('separator1', 'separator')] = ''
self.values[MenuNode(_('Use favourites only'), 'check',
(True if self.favourites else False))] = 'favourite'
self.values[MenuNode('separator2', 'separator')] = ''
self.values[MenuNode(_('Browser Preferences'))] = 'browser prefs'
self.values[MenuNode(_('Search Preferences'))] = 'search prefs'
self.options = list(self.values.keys())
self.update_images(False)
if self.favourites:
self._source.propertiesbutton_callback('favourite')
if self.follow:
self._source.propertiesbutton_callback('follow')
self.current_key = None
def _connect_properties(self):
gs = GSetting()
setting = gs.get_setting(gs.Path.PLUGIN)
setting.bind(
gs.PluginKey.USE_FAVOURITES,
self,
'favourites',
Gio.SettingsBindFlags.DEFAULT)
setting.bind(
gs.PluginKey.FOLLOWING,
self,
'follow',
Gio.SettingsBindFlags.DEFAULT)
def _change_key(self, dict, old, new):
for i in range(len(dict)):
k, v = dict.popitem(False)
dict[new if old == k else k] = v
def update_images(self, *args):
self._image = self.create_button_image(self.plugin,
None, 'properties.png')
if args[-1]:
self.update_image = True
def do_action(self):
if self.current_key:
key = [node for node in self.values if node.label == self.current_key]
if self.current_key == _('Use favourites only'):
self.favourites = not self.favourites
if self.current_key == _('Follow playing song'):
self.follow = not self.follow
self._source.propertiesbutton_callback(self.values[key[0]])
self.current_key = None
def get_current_image(self):
return self._image
def get_current_description(self):
return _('Properties')
class DecadePopupController(OptionsController):
def __init__(self, plugin, album_model):
super(DecadePopupController, self).__init__()
self._album_model = album_model
self.plugin = plugin
self._spritesheet = None
# decade options
cl = CoverLocale()
cl.switch_locale(cl.Locale.LOCALE_DOMAIN)
self.values = OrderedDict()
self.values[_('All Decades')] = [-1, 'All Decades']
# '20s' as in the decade 2010
self.values[_('20s')] = [2020, '20s']
#'10s' as in the decade 2010
self.values[_('10s')] = [2010, '10s']
#'00s' as in the decade 2000
self.values[_('00s')] = [2000, '00s']
#'90s' as in the decade 1990
self.values[_('90s')] = [1990, '90s']
#'80s' as in the decade 1980
self.values[_('80s')] = [1980, '80s']
#'70s' as in the decade 1970
self.values[_('70s')] = [1970, '70s']
#'60s' as in the decade 1960
self.values[_('60s')] = [1960, '60s']
#'50s' as in the decade 1950
self.values[_('50s')] = [1950, '50s']
#'40s' as in the decade 1940
self.values[_('40s')] = [1940, '40s']
#'30s' as in the decade 1930
self.values[_('30s')] = [1930, '30s']
#'Older' as in 'older than the year 1930'
self.values[_('Older')] = [-1, 'Older']
self.options = list(self.values.keys())
# if we aren't on the 20s yet, remove it
if date.today().year < 2020:
self.options.remove(_('20s'))
# define a initial decade an set the initial key
self._initial_decade = self.options[0]
self.update_images(False)
self.current_key = self._initial_decade
def update_images(self, *args):
self._spritesheet = self.create_spritesheet(self.plugin,
self._spritesheet, 'decade')
if args[-1]:
self.update_image = True
def do_action(self):
if self.current_key == self._initial_decade:
self._album_model.remove_filter('decade')
else:
self._album_model.replace_filter('decade',
self.values[self.current_key][0])
def get_current_image(self):
decade = self.values[self.current_key][1]
return self._spritesheet[decade]
def get_current_description(self):
return self.current_key
class SortOrderToggleController(OptionsController):
toolbar_type = "album"
def __init__(self, plugin, viewmgr):
super(SortOrderToggleController, self).__init__()
self._viewmgr = viewmgr
self.plugin = plugin
# options
self.values = OrderedDict([(_('Sort in descending order'), False),
(_('Sort in ascending order'), True)])
self.options = list(self.values.keys())
self._images = []
# set the current key
self.gs = GSetting()
self.settings = self.gs.get_setting(self.gs.Path.PLUGIN)
self.key = self.get_key()
sort_order = self.settings[self.key]
self.current_key = list(self.values.keys())[
list(self.values.values()).index(sort_order)]
self.update_images(False)
def get_key(self):
return self.gs.PluginKey.SORT_ORDER
def update_images(self, *args):
# initialize images
if len(self._images) > 0:
del self._images[:]
self._images.append(self.create_button_image(self.plugin,
None, 'arrow_down.png'))
self._images.append(self.create_button_image(self.plugin,
None, 'arrow_up.png'))
if args[-1]:
self.update_image = True
def do_action(self):
sort_order = self.values[self.current_key]
self.settings[self.key] = sort_order
self._viewmgr.current_view.get_default_manager().emit('sort', self.toolbar_type)
def get_current_image(self):
return self._images[self.get_current_key_index()]
class ArtistSortOrderToggleController(SortOrderToggleController):
toolbar_type = "artist"
def __init__(self, plugin, model):
super(ArtistSortOrderToggleController, self).__init__(plugin, model)
def get_key(self):
return self.gs.PluginKey.SORT_ORDER_ARTIST
class AlbumSearchEntryController(OptionsController):
# properties
search_text = GObject.property(type=str, default='')
def __init__(self, album_model):
super(AlbumSearchEntryController, self).__init__()
self._album_model = album_model
self._filter_type = 'all'
# options
self.values = OrderedDict()
self.values[_('Search all fields')] = 'all'
self.values[_('Search album artists')] = 'album_artist'
self.values[_('Search track artists')] = 'artist'
self.values[_('Search composers')] = 'composers'
self.values[_('Search albums')] = 'album_name'
self.values[_('Search titles')] = 'track'
self.options = list(self.values.keys())
self.current_key = list(self.values.keys())[0]
self._typing = False
self._typing_counter = 0
self._current_search_text = ""
def do_action(self):
# remove old filter
self._album_model.remove_filter(self._filter_type, False)
# assign the new filter
self._filter_type = self.values[self.current_key]
self.do_search(self.search_text, True)
def _search_typing(self, *args):
self._typing_counter = self._typing_counter + 1
if self._typing_counter >= 4 and self._typing:
self._typing = False
self._change_filter(self._current_search_text, False)
return self._typing
def _change_filter(self, search_text, force):
# self.search_text = search_text
self._current_search_text = search_text
if search_text:
self._album_model.replace_filter(self._filter_type,
search_text)
elif not force:
self._album_model.remove_filter(self._filter_type)
def do_search(self, search_text, force=False):
'''
if self.search_text != search_text or force:
self.search_text = search_text
if search_text:
self._album_model.replace_filter(self._filter_type,
search_text)
elif not force:
self._album_model.remove_filter(self._filter_type)
'''
# self.search_text = search_text
if force:
self._typing_counter = 99
self._typing = False
self._change_filter(search_text, force)
return
if self._current_search_text != search_text:
#self.search_text = search_text
self._current_search_text = search_text
self._typing_counter = 0
if not self._typing:
self._typing = True
Gdk.threads_add_timeout(GLib.PRIORITY_DEFAULT_IDLE, 100,
self._search_typing)
class AlbumQuickSearchController(object):
def __init__(self, album_manager):
self._album_manager = album_manager
def connect_quick_search(self, quick_search):
quick_search.connect('quick-search', self._on_quick_search)
quick_search.connect('arrow-pressed', self._on_arrow_pressed)
quick_search.connect('hide', self._on_hide)
def _on_quick_search(self, quick_search, search_text, *args):
album = self._album_manager.model.find_first_visible('album_name',
search_text)
if album:
path = self._album_manager.model.get_path(album)
self._album_manager.current_view.select_and_scroll_to_path(path)
def _on_arrow_pressed(self, quick_search, key, *args):
current = self._album_manager.current_view.get_selected_objects()[0]
search_text = quick_search.get_text()
album = None
if key == Gdk.KEY_Up:
album = self._album_manager.model.find_first_visible(
'album_name', search_text, current, True)
elif key == Gdk.KEY_Down:
album = self._album_manager.model.find_first_visible(
'album_name', search_text, current)
if album:
path = self._album_manager.model.get_path(album)
self._album_manager.current_view.select_and_scroll_to_path(path)
def _on_hide(self, quick_search, *args):
self._album_manager.current_view.grab_focus()
class ViewController(OptionsController):
def __init__(self, shell, viewmgr):
super(ViewController, self).__init__()
self._viewmgr = viewmgr
from coverart_browser_source import Views
views = Views(shell)
self.values = OrderedDict()
for view_name in views.get_view_names():
self.values[views.get_menu_name(view_name)] = view_name
print(view_name)
self.options = list(self.values.keys())
viewmgr.connect('new-view', self.on_notify_view_name)
def on_notify_view_name(self, *args):
for key in self.options:
if self.values[key] == self._viewmgr.view_name:
self.current_key = key
def do_action(self):
if self._viewmgr.view_name != self.values[self.current_key]:
self._viewmgr.view_name = self.values[self.current_key]
| gpl-3.0 | -712,676,144,686,856,600 | 32.941107 | 111 | 0.58877 | false |
neo900/skyline | src/horizon/listen.py | 1 | 4999 | import socket
from os import kill, getpid
from Queue import Full
from multiprocessing import Process
from struct import Struct, unpack
from msgpack import unpackb
from cPickle import loads
import logging
import settings
logger = logging.getLogger("HorizonLog")
class Listen(Process):
"""
The listener is responsible for listening on a port.
"""
def __init__(self, port, queue, parent_pid, type="pickle"):
super(Listen, self).__init__()
try:
self.ip = settings.HORIZON_IP
except AttributeError:
# Default for backwards compatibility
self.ip = socket.gethostname()
self.port = port
self.q = queue
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.type = type
def gen_unpickle(self, infile):
"""
Generate a pickle from a stream
"""
try:
bunch = loads(infile)
yield bunch
except EOFError:
return
def read_all(self, sock, n):
"""
Read n bytes from a stream
"""
data = ''
while n > 0:
buf = sock.recv(n)
n -= len(buf)
data += buf
return data
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def listen_pickle(self):
"""
Listen for pickles over tcp
"""
while 1:
try:
# Set up the TCP listening socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.ip, self.port))
s.setblocking(1)
s.listen(5)
logger.info('listening over tcp for pickles on %s' % self.port)
(conn, address) = s.accept()
logger.info('connection from %s:%s' % (address[0], self.port))
chunk = []
while 1:
self.check_if_parent_is_alive()
try:
length = Struct('!I').unpack(self.read_all(conn, 4))
body = self.read_all(conn, length[0])
# Iterate and chunk each individual datapoint
for bunch in self.gen_unpickle(body):
for metric in bunch:
chunk.append(metric)
# Queue the chunk and empty the variable
if len(chunk) > settings.CHUNK_SIZE:
try:
self.q.put(list(chunk), block=False)
chunk[:] = []
# Drop chunk if queue is full
except Full:
logger.info('queue is full, dropping datapoints')
chunk[:] = []
except Exception as e:
logger.info(e)
logger.info('incoming connection dropped, attempting to reconnect')
break
except Exception as e:
logger.info('can\'t connect to socket: ' + str(e))
break
def listen_udp(self):
"""
Listen over udp for MessagePack strings
"""
while 1:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((self.ip, self.port))
logger.info('listening over udp for messagepack on %s' % self.port)
chunk = []
while 1:
self.check_if_parent_is_alive()
data, addr = s.recvfrom(1024)
metric = unpackb(data)
chunk.append(metric)
# Queue the chunk and empty the variable
if len(chunk) > settings.CHUNK_SIZE:
try:
self.q.put(list(chunk), block=False)
chunk[:] = []
# Drop chunk if queue is full
except Full:
logger.info('queue is full, dropping datapoints')
chunk[:] = []
except Exception as e:
logger.info('can\'t connect to socket: ' + str(e))
break
def run(self):
"""
Called when process intializes.
"""
logger.info('started listener')
if self.type == 'pickle':
self.listen_pickle()
elif self.type == 'udp':
self.listen_udp()
else:
logging.error('unknown listener format')
| mit | -8,091,358,596,528,935,000 | 31.044872 | 91 | 0.455091 | false |
bavla/Graph | PsCoresTQ.py | 1 | 2315 | gdir = 'c:/users/batagelj/work/python/graph/graph'
# wdir = 'c:/users/batagelj/work/python/graph/JSON/test'
wdir = 'c:/users/batagelj/work/python/graph/JSON/SN5'
import sys, os, datetime, json
sys.path = [gdir]+sys.path; os.chdir(wdir)
import GraphNew as Graph
import TQ
# fJSON = 'ConnectivityWeighted.json'
# fJSON = "violenceE.json"
# fJSON = 'stem.json'
# fJSON = 'CcCtest.json'
# fJSON = 'Terror news 50.json'
fJSON = 'CcCSN5.json'
# S = Graph.Graph.loadNetJSON(fJSON); G = S.pairs2edges()
G = Graph.Graph.loadNetJSON(fJSON)
# G.saveNetJSON(file="Terror50E",indent=1)
# fJSON = 'ConnectivityTest.json'
# fJSON = 'ExampleB.json'
# fJSON = 'PathfinderTest.json'
# G = Graph.Graph.loadNetJSON(fJSON)
G.delLoops()
print("Temporal Ps cores in: ",fJSON)
t1 = datetime.datetime.now()
print("started: ",t1.ctime(),"\n")
Tmin,Tmax = G._graph['time']
D = { u: G.TQnetSum(u) for u in G._nodes }
# print("Sum =",D,"\n")
Core = { u: [d for d in D[u] if d[2]==0] for u in G.nodes() }
# core number = 0
D = { u: [d for d in D[u] if d[2]>0] for u in G.nodes() }
D = { u: d for u,d in D.items() if d!=[] }
Dmin = { u: min([e[2] for e in d]) for u,d in D.items() }
step = 0
while len(D)>0:
step += 1
dmin,u = min( (v,k) for k,v in Dmin.items() )
if step % 100 == 1:
print("{0:3d}. dmin={1:10.4f} node={2:4d}".format(step,dmin,u))
cCore = TQ.TQ.complement(Core[u],Tmin,Tmax+1)
core = TQ.TQ.extract(cCore,[d for d in D[u] if d[2] == dmin])
if core!=[]:
Core[u] = TQ.TQ.sum(Core[u],core)
D[u] = TQ.TQ.cutGE(TQ.TQ.sum(D[u],TQ.TQ.minus(core)),dmin)
for link in G.star(u):
v = G.twin(u,link)
if not(v in D): continue
chLink = TQ.TQ.minus(TQ.TQ.extract(core,G.getLink(link,'tq')))
if chLink==[]: continue
diff = TQ.TQ.cutGE(TQ.TQ.sum(D[v],chLink),0)
D[v] = [ (sd,fd,max(vd,dmin)) for sd,fd,vd in diff ]
if len(D[v])==0: del D[v]; del Dmin[v]
else: Dmin[v] = min([e[2] for e in D[v]])
if len(D[u])==0: del D[u]; del Dmin[u]
else: Dmin[u] = min([e[2] for e in D[u]])
print("{0:3d}. dmin={1:10.4f} node={2:4d}".format(step,dmin,u))
# print("\n-----\nCore =",Core)
t2 = datetime.datetime.now()
print("\nfinished: ",t2.ctime(),"\ntime used: ", t2-t1)
| gpl-3.0 | 3,479,323,956,564,014,600 | 36.583333 | 71 | 0.577538 | false |
HarmonyEnterpriseSolutions/harmony-platform | src/gnue/common/datasources/drivers/ldap/Connection.py | 1 | 5525 | # GNU Enterprise Common Library - Generic DBSIG2 database driver - Connection
#
# Copyright 2000-2007 Free Software Foundation
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: Connection.py,v 1.2 2008/11/04 20:14:03 oleg Exp $
"""
Generic Connection class for DBSIG2 based database driver plugins.
"""
from src.gnue.common.datasources.drivers.ldap import ResultSet
__all__ = ['Connection']
import ldap
from gnue.common.datasources import Exceptions
from gnue.common.datasources.drivers import Base
# =============================================================================
# Connection class
# =============================================================================
class Connection (Base.Connection):
"""
"""
_resultSetClass_ = ResultSet
# ---------------------------------------------------------------------------
# Constructor
# ---------------------------------------------------------------------------
def __init__ (self, connections, name, parameters):
Base.Connection.__init__ (self, connections, name, parameters)
# ---------------------------------------------------------------------------
# Implementations of virtual methods
# ---------------------------------------------------------------------------
def _getLoginFields_ (self):
return [(u_('User Name'), '_username', 'string', None, None, []),
(u_('Password'), '_password', 'password', None, None, [])]
# ---------------------------------------------------------------------------
def _connect_ (self, connectData):
print "_connect_", connectData
try:
self._ldapObject = ldap.open(connectData['host'], int(connectData.get('port', 389)))
self._ldapObject.simple_bind_s(connectData['_username'], connectData['_password'])
except ldap.LDAPError, e:
raise self.decorateError(
Exceptions.LoginError("%s: %s" % tuple(errors.getException()[1:3]))
)
# ---------------------------------------------------------------------------
def _insert_ (self, table, newfields):
raise NotImplementedError
# ---------------------------------------------------------------------------
def _update_ (self, table, oldfields, newfields):
raise NotImplementedError
# ---------------------------------------------------------------------------
def _delete_ (self, table, oldfields):
raise NotImplementedError
# ---------------------------------------------------------------------------
def _requery_ (self, table, oldfields, fields, parameters):
raise NotImplementedError
# ---------------------------------------------------------------------------
def _commit_ (self):
pass
# ---------------------------------------------------------------------------
def _rollback_ (self):
pass
# ---------------------------------------------------------------------------
def _close_ (self):
pass
# ---------------------------------------------------------------------------
# Virtual methods to be implemented by descendants
# ---------------------------------------------------------------------------
def _getConnectParams_ (self, connectData):
"""
Return a tuple with a list and a dictionary, being the parameters and
keyword arguments to be passed to the connection function of the DBSIG2
driver.
This method must be overwritten by all descendants.
"""
return ([], {})
# ---------------------------------------------------------------------------
def _createTimestamp_ (self, year, month, day, hour, minute, secs, msec = 0):
"""
Create a timestamp object for the given point in time.
This function doesn't have to be overwritten unless the handling of time
values is weird.
@param year: Year number
@param month: Month number (1 - 12)
@param day: Day of the month (1 - 31)
@param hour: Hour (0 - 23)
@param minute: Minute (0 - 59)
@param secs: Whole seconds (integer)
@param msec: Microseconds (integer)
returns: a timestamp object created by the driver's Timestamp constructor
"""
raise NotImplementedError
# ---------------------------------------------------------------------------
# Create an apropriate time object for the given values
# ---------------------------------------------------------------------------
def _createTime_ (self, hour, minute, second, msec = 0):
"""
Create a time object for the given point in time.
This function doesn't have to be overwritten unless the handling of time
values is weird.
@param hour: Hour (0 - 23)
@param minute: Minute (0 - 59)
@param second: Whole seconds (integer)
@param msec: Microseconds (integer)
returns: a time object created by the driver's Time constructor
"""
raise NotImplementedError
def decorateError(self, error):
"""
This function used to make database related error user frielndly
"""
return error
| gpl-2.0 | 5,706,516,712,830,585,000 | 31.122093 | 87 | 0.520543 | false |
doraemonext/wechat-platform | wechat_platform/system/response/models.py | 1 | 7805 | # -*- coding: utf-8 -*-
import logging
from time import time
from django.db import models
from wechat_sdk.messages import EventMessage
from system.official_account.models import OfficialAccount
logger_response = logging.getLogger(__name__)
class ResponseManager(models.Manager):
"""
微信服务器响应消息记录表 Manager
"""
def get(self, official_account, msgid):
return super(ResponseManager, self).get_queryset().filter(
official_account=official_account
).filter(
msgid=msgid
).exclude(
type=Response.TYPE_WAITING
)
def get_latest(self, official_account, wechat_instance):
"""
获取指定公众号指定OpenID下最新一条回复
:param official_account: 微信公众号实例 (OfficialAccount)
:param wechat_instance: 微信请求实例 (WechatBasic)
:return: 响应实例 (Response)
"""
message = wechat_instance.get_message()
return super(ResponseManager, self).get_queryset().filter(
official_account=official_account
).filter(
target=message.source
).exclude(
type=Response.TYPE_WAITING
).order_by(
'-time'
)[:1]
def add(self, official_account, wechat_instance, type, pattern, raw, plugin_dict):
"""
添加一条新的响应消息记录
:param official_account: 微信公众号实例 (OfficialAccount)
:param wechat_instance: 微信请求实例 (WechatBasic)
:param type: 信息类型
:param pattern: 响应方式
:param raw: 原始信息内容
:param plugin_dict: 所使用的插件字典, exp: {'iden': 'text', 'reply_id': 54}
"""
message = wechat_instance.get_message()
if isinstance(message, EventMessage):
msgid = message.target + str(message.time)
else:
msgid = message.id
# 先在数据库中查找等待中的消息回复
response = super(ResponseManager, self).get_queryset().filter(
official_account=official_account
).filter(
msgid=msgid
).filter(
type=Response.TYPE_WAITING
)
if response:
response = response[0]
response.time = int(time())
response.type = type
response.pattern = pattern
response.raw = raw
response.plugin_iden = plugin_dict['iden']
response.reply_id = plugin_dict['reply_id']
response.save()
logger_response.debug('Response has been updated [Detail: %s]' % response.__dict__)
else:
response = super(ResponseManager, self).create(
official_account=official_account,
msgid=msgid,
target=message.source,
source=message.target,
time=int(time()),
type=type,
pattern=pattern,
raw=raw,
plugin_iden=plugin_dict['iden'],
reply_id=plugin_dict['reply_id'],
)
logger_response.debug('New response created [Detail: %s]' % response.__dict__)
return response
def is_waiting(self, official_account, wechat_instance):
"""
判断该回复是否正在等待中
:param official_account: 微信公众号实例 (OfficialAccount)
:param wechat_instance: 微信请求实例 (WechatBasic)
:return: 如果正在等待中, 返回 True
"""
message = wechat_instance.get_message()
if isinstance(message, EventMessage):
msgid = message.target + str(message.time)
else:
msgid = message.id
# 在数据库中查找等待中的消息回复
response = super(ResponseManager, self).get_queryset().filter(
official_account=official_account
).filter(
msgid=msgid
).filter(
type=Response.TYPE_WAITING
)
if response:
return True
else:
return False
def add_waiting(self, official_account, wechat_instance):
"""
添加一条新的响应消息记录 (说明该请求正在被执行中)
:param official_account: 微信公众号实例 (OfficialAccount)
:param wechat_instance: 微信请求实例 (WechatBasic)
"""
message = wechat_instance.get_message()
if isinstance(message, EventMessage):
msgid = message.target + str(message.time)
else:
msgid = message.id
response = super(ResponseManager, self).create(
official_account=official_account,
msgid=msgid,
target=message.source,
source=message.target,
time=int(time()),
type=Response.TYPE_WAITING,
pattern=Response.PATTERN_WAITING,
raw=''
)
logger_response.debug('New response created [Detail: %s]' % response.__dict__)
return response
def end_waiting(self, official_account, wechat_instance):
"""
结束一条正在等待的响应消息记录
:param official_account: 微信公众号实例 (OfficialAccount)
:param wechat_instance: 微信请求实例 (WechatBasic)
"""
message = wechat_instance.get_message()
if isinstance(message, EventMessage):
msgid = message.target + str(message.time)
else:
msgid = message.id
# 在数据库中查找等待中的消息回复
response = super(ResponseManager, self).get_queryset().filter(
official_account=official_account
).filter(
msgid=msgid
).filter(
type=Response.TYPE_WAITING
)
if response:
response[0].delete()
class Response(models.Model):
"""
微信服务器响应消息记录表
"""
TYPE_TEXT = 'text'
TYPE_IMAGE = 'image'
TYPE_VIDEO = 'video'
TYPE_VOICE = 'voice'
TYPE_NEWS = 'news'
TYPE_MUSIC = 'music'
TYPE_WAITING = 'waiting'
TYPE = (
(TYPE_TEXT, u'文本消息'),
(TYPE_IMAGE, u'图片消息'),
(TYPE_VIDEO, u'视频消息'),
(TYPE_VOICE, u'语音消息'),
(TYPE_NEWS, u'图文消息'),
(TYPE_MUSIC, u'音乐消息'),
(TYPE_WAITING, u'执行中消息'),
)
PATTERN_NORMAL = 0
PATTERN_SERVICE = 1
PATTERN_SIMULATION = 2
PATTERN_WAITING = 3
PATTERN = (
(PATTERN_NORMAL, u'正常XML返回模式'),
(PATTERN_SERVICE, u'多客服返回模式'),
(PATTERN_SIMULATION, u'模拟登陆返回模式'),
(PATTERN_WAITING, u'执行中消息'),
)
official_account = models.ForeignKey(OfficialAccount, verbose_name=u'所属公众号')
msgid = models.CharField(u'MsgID或FromUserName+CreateTime', max_length=50)
target = models.CharField(u'目标用户OpenID', max_length=50)
source = models.CharField(u'来源用户OpenID', max_length=50)
time = models.IntegerField(u'信息发送时间')
type = models.CharField(u'信息类型', choices=TYPE, max_length=15)
pattern = models.IntegerField(u'响应方式', choices=PATTERN)
raw = models.TextField(u'响应信息原始内容')
plugin_iden = models.CharField(u'插件标识符', max_length=50, null=True, blank=True)
reply_id = models.IntegerField(u'插件回复ID', null=True, blank=True)
objects = models.Manager()
manager = ResponseManager()
class Meta:
verbose_name = u'微信服务器响应信息'
verbose_name_plural = u'微信服务器响应信息'
db_table = 'response'
def __unicode__(self):
return self.raw | bsd-2-clause | 8,729,925,608,646,641,000 | 30.864865 | 95 | 0.579952 | false |
blab/antibody-response-pulse | bcell-array/code/Virus_Bcell_IgM_IgG_Landscape.py | 1 | 11385 |
# coding: utf-8
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for repeated infection
# In[1]:
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
import alva_machinery_event_OAS_new as alva
AlvaFontSize = 23
AlvaFigSize = (15, 5)
numberingFig = 0
# equation plotting
dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/bcell-array/figure'
file_name = 'Virus-Bcell-IgM-IgG'
figure_name = '-equation'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 5))
plt.axis('off')
plt.title(r'$ Virus-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ repeated-infection) $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = +\mu_{v} V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} M_{n}(t) V_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = +\mu_{b}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) + (\beta_{m} + \beta_{g}) V_{n}(t) B_{n}(t) - \mu_{b} B_{n}(t) + m_b V_{n}(t)\frac{B_{i-1}(t) - 2B_i(t) + B_{i+1}(t)}{(\Delta i)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = +\xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = +\xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) - \mu_{g} G_{n}(t) + m_a V_{n}(t)\frac{G_{i-1}(t) - 2G_i(t) + G_{i+1}(t)}{(\Delta i)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# define the V-M-G partial differential equations
def dVdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dV_dt_array[:] = +inRateV*V[:]*(1 - V[:]/maxV) - killRateVm*M[:]*V[:] - killRateVg*G[:]*V[:]
return(dV_dt_array)
def dBdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Bcopy = np.copy(B)
centerX = Bcopy[:]
leftX = np.roll(Bcopy[:], 1)
rightX = np.roll(Bcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dB_dt_array[:] = +inRateB*V[:]*(1 - V[:]/maxV) + (actRateBm + alva.event_active + alva.event_OAS_B)*V[:]*B[:] - outRateB*B[:] + mutatRateB*V[:]*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dB_dt_array)
def dMdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dM_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dM_dt_array[:] = +inRateM*B[:] - consumeRateM*M[:]*V[:] - outRateM*M[:]
return(dM_dt_array)
def dGdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dG_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Gcopy = np.copy(G)
centerX = Gcopy[:]
leftX = np.roll(Gcopy[:], 1)
rightX = np.roll(Gcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dG_dt_array[:] = +(inRateG + alva.event_OAS)*B[:] - consumeRateG*G[:]*V[:] - outRateG*G[:] + mutatRateA*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dG_dt_array)
# In[2]:
# setting parameter
timeUnit = 'year'
if timeUnit == 'hour':
hour = float(1)
day = float(24)
elif timeUnit == 'day':
day = float(1)
hour = float(1)/24
elif timeUnit == 'year':
year = float(1)
day = float(1)/365
hour = float(1)/24/365
maxV = float(50) # max virus/micro-liter
inRateV = 0.2/hour # in-rate of virus
killRateVm = 0.0003/hour # kill-rate of virus by antibody-IgM
killRateVg = killRateVm # kill-rate of virus by antibody-IgG
inRateB = 0.06/hour # in-rate of B-cell
outRateB = inRateB/8 # out-rate of B-cell
actRateBm = killRateVm # activation rate of naive B-cell
inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell
outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell
consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus
inRateG = inRateM/10 # in-rate of antibody-IgG from memory B-cell
outRateG = outRateM/250 # out-rate of antibody-IgG from memory B-cell
consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus
mutatRateB = 0.00002/hour # B-cell mutation rate
mutatRateA = 0.0002/hour # mutation rate
# time boundary and griding condition
minT = float(0)
maxT = float(10*12*30*day)
totalPoint_T = int(6*10**3 + 1)
gT = np.linspace(minT, maxT, totalPoint_T)
spacingT = np.linspace(minT, maxT, num = totalPoint_T, retstep = True)
gT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(9)
totalPoint_X = int(maxX - minX + 1)
gX = np.linspace(minX, maxX, totalPoint_X)
gridingX = np.linspace(minX, maxX, num = totalPoint_X, retstep = True)
gX = gridingX[0]
dx = gridingX[1]
gV_array = np.zeros([totalPoint_X, totalPoint_T])
gB_array = np.zeros([totalPoint_X, totalPoint_T])
gM_array = np.zeros([totalPoint_X, totalPoint_T])
gG_array = np.zeros([totalPoint_X, totalPoint_T])
# initial output condition
#gV_array[1, 0] = float(2)
#[pre-parameter, post-parameter, recovered-day, OAS+, OSA-, origin_virus]
actRateBg_1st = 0.0002/hour # activation rate of memory B-cell at 1st time (pre-)
actRateBg_2nd = actRateBg_1st*10 # activation rate of memory B-cell at 2nd time (post-)
origin_virus = int(2)
current_virus = int(6)
event_parameter = np.array([[actRateBg_1st,
actRateBg_2nd,
14*day,
+5/hour,
-actRateBm - actRateBg_1st + (actRateBm + actRateBg_1st)/1.3,
origin_virus,
current_virus]])
# [viral population, starting time] ---first
infection_period = 12*30*day
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 3
infection_starting_time = np.arange(int(maxX + 1))*infection_period
event_1st = np.zeros([int(maxX + 1), 2])
event_1st[:, 0] = viral_population
event_1st[:, 1] = infection_starting_time
print ('event_1st = {:}'.format(event_1st))
# [viral population, starting time] ---2nd]
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 0
infection_starting_time = np.arange(int(maxX + 1))*0
event_2nd = np.zeros([int(maxX + 1), 2])
event_2nd[:, 0] = viral_population
event_2nd[:, 1] = infection_starting_time
print ('event_2nd = {:}'.format(event_2nd))
event_table = np.array([event_parameter, event_1st, event_2nd])
# Runge Kutta numerical solution
pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array])
initial_Out = np.array([gV_array, gB_array, gM_array, gG_array])
gOut_array = alva.AlvaRungeKutta4XT(pde_array, initial_Out, minX, maxX, totalPoint_X, minT, maxT, totalPoint_T, event_table)
# plotting
gV = gOut_array[0]
gB = gOut_array[1]
gM = gOut_array[2]
gG = gOut_array[3]
numberingFig = numberingFig + 1
for i in range(totalPoint_X):
figure_name = '-response-%i'%(i)
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gT, gV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i] + gG[i], color = 'gray', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ from \ Virus-{%i} $'%(i), fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xlim([minT, maxT])
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.ylim([2**0, 2**12])
plt.yscale('log', basey = 2)
plt.legend(loc = (1,0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[3]:
# Normalization stacked graph
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.stackplot(gT, gM + gG, alpha = 0.3)
plt.title(r'$ Stacked-graph \ of \ Antibody $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.ylim([2**0, 2**12])
plt.yscale('log', basey = 2)
plt.grid(True)
plt.show()
# In[4]:
# expected peak of the antibody response
totalColor = current_virus - origin_virus + 1
AlvaColor = [plt.get_cmap('rainbow')(float(i)/(totalColor)) for i in range(1, totalColor + 1)]
sample_time = 90*day
# plotting
figure_name = '-landscape'
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 9))
for i in range(origin_virus, current_virus + 1):
detect_xn = current_virus + 2 - i
if detect_xn == origin_virus:
virus_label = '$ origin-virus $'
elif detect_xn == current_virus:
virus_label = '$ current-virus $'
else: virus_label = '$ {:}th-virus $'.format(detect_xn - origin_virus + 1)
detect_time = int(totalPoint_T/(maxT - minT)*(detect_xn*infection_period + sample_time))
plt.plot(gX, gM[:, detect_time] + gG[:, detect_time], marker = 'o', markersize = 20
, color = AlvaColor[detect_xn - origin_virus], label = virus_label)
plt.fill_between(gX, gM[:, detect_time] + gG[:, detect_time], facecolor = AlvaColor[detect_xn - origin_virus]
, alpha = 0.5)
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ Landscape $', fontsize = AlvaFontSize)
plt.xlabel(r'$ Virus \ space \ (Antigenic-distance) $', fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xlim([minX, maxX])
plt.xticks(fontsize = AlvaFontSize)
plt.yticks(fontsize = AlvaFontSize)
plt.ylim([2**0, 2**9])
plt.yscale('log', basey = 2)
plt.legend(loc = (1,0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[ ]:
| gpl-2.0 | 5,572,586,783,299,461,000 | 36.084691 | 257 | 0.622047 | false |
moyogo/ufo2ft | Lib/ufo2ft/outlineCompiler.py | 1 | 55408 | import logging
import math
from collections import Counter, namedtuple
from io import BytesIO
from types import SimpleNamespace
from fontTools.cffLib import (
CharStrings,
GlobalSubrsIndex,
IndexedStrings,
PrivateDict,
SubrsIndex,
TopDict,
TopDictIndex,
)
from fontTools.misc.arrayTools import unionRect
from fontTools.misc.fixedTools import otRound
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.reverseContourPen import ReverseContourPen
from fontTools.pens.t2CharStringPen import T2CharStringPen
from fontTools.pens.ttGlyphPen import TTGlyphPen
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._g_l_y_f import USE_MY_METRICS, Glyph
from fontTools.ttLib.tables._h_e_a_d import mac_epoch_diff
from fontTools.ttLib.tables.O_S_2f_2 import Panose
from ufo2ft.constants import COLOR_LAYERS_KEY, COLOR_PALETTES_KEY
from ufo2ft.errors import InvalidFontData
from ufo2ft.fontInfoData import (
dateStringForNow,
dateStringToTimeValue,
getAttrWithFallback,
intListToNum,
normalizeStringForPostscript,
)
from ufo2ft.util import (
_copyGlyph,
calcCodePageRanges,
makeOfficialGlyphOrder,
makeUnicodeToGlyphNameMapping,
)
logger = logging.getLogger(__name__)
BoundingBox = namedtuple("BoundingBox", ["xMin", "yMin", "xMax", "yMax"])
EMPTY_BOUNDING_BOX = BoundingBox(0, 0, 0, 0)
def _isNonBMP(s):
for c in s:
if ord(c) > 65535:
return True
return False
def _getVerticalOrigin(font, glyph):
if hasattr(glyph, "verticalOrigin") and glyph.verticalOrigin is not None:
verticalOrigin = glyph.verticalOrigin
else:
os2 = font.get("OS/2")
typo_ascender = os2.sTypoAscender if os2 is not None else 0
verticalOrigin = typo_ascender
return otRound(verticalOrigin)
class BaseOutlineCompiler:
"""Create a feature-less outline binary."""
sfntVersion = None
tables = frozenset(
[
"head",
"hmtx",
"hhea",
"name",
"maxp",
"cmap",
"OS/2",
"post",
"vmtx",
"vhea",
"COLR",
"CPAL",
]
)
def __init__(
self,
font,
glyphSet=None,
glyphOrder=None,
tables=None,
notdefGlyph=None,
):
self.ufo = font
# use the previously filtered glyphSet, if any
if glyphSet is None:
glyphSet = {g.name: g for g in font}
self.makeMissingRequiredGlyphs(font, glyphSet, self.sfntVersion, notdefGlyph)
self.allGlyphs = glyphSet
# store the glyph order
if glyphOrder is None:
glyphOrder = font.glyphOrder
self.glyphOrder = self.makeOfficialGlyphOrder(glyphOrder)
# make a reusable character mapping
self.unicodeToGlyphNameMapping = self.makeUnicodeToGlyphNameMapping()
if tables is not None:
self.tables = tables
# cached values defined later on
self._glyphBoundingBoxes = None
self._fontBoundingBox = None
self._compiledGlyphs = None
def compile(self):
"""
Compile the OpenType binary.
"""
self.otf = TTFont(sfntVersion=self.sfntVersion)
# only compile vertical metrics tables if vhea metrics are defined
vertical_metrics = [
"openTypeVheaVertTypoAscender",
"openTypeVheaVertTypoDescender",
"openTypeVheaVertTypoLineGap",
]
self.vertical = all(
getAttrWithFallback(self.ufo.info, metric) is not None
for metric in vertical_metrics
)
self.colorLayers = (
COLOR_LAYERS_KEY in self.ufo.lib and COLOR_PALETTES_KEY in self.ufo.lib
)
# write the glyph order
self.otf.setGlyphOrder(self.glyphOrder)
# populate basic tables
self.setupTable_head()
self.setupTable_hmtx()
self.setupTable_hhea()
self.setupTable_name()
self.setupTable_maxp()
self.setupTable_cmap()
self.setupTable_OS2()
self.setupTable_post()
if self.vertical:
self.setupTable_vmtx()
self.setupTable_vhea()
if self.colorLayers:
self.setupTable_COLR()
self.setupTable_CPAL()
self.setupOtherTables()
self.importTTX()
return self.otf
def compileGlyphs(self):
"""Compile glyphs and return dict keyed by glyph name.
**This should not be called externally.**
Subclasses must override this method to handle compilation of glyphs.
"""
raise NotImplementedError
def getCompiledGlyphs(self):
if self._compiledGlyphs is None:
self._compiledGlyphs = self.compileGlyphs()
return self._compiledGlyphs
def makeGlyphsBoundingBoxes(self):
"""
Make bounding boxes for all the glyphs, and return a dictionary of
BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
The bbox values are integers.
**This should not be called externally.**
Subclasses must override this method to handle the bounds creation for
their specific glyph type.
"""
raise NotImplementedError
@property
def glyphBoundingBoxes(self):
if self._glyphBoundingBoxes is None:
self._glyphBoundingBoxes = self.makeGlyphsBoundingBoxes()
return self._glyphBoundingBoxes
def makeFontBoundingBox(self):
"""
Make a bounding box for the font.
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
"""
fontBox = None
for glyphBox in self.glyphBoundingBoxes.values():
if glyphBox is None:
continue
if fontBox is None:
fontBox = glyphBox
else:
fontBox = unionRect(fontBox, glyphBox)
if fontBox is None: # unlikely
fontBox = EMPTY_BOUNDING_BOX
return fontBox
@property
def fontBoundingBox(self):
if self._fontBoundingBox is None:
self._fontBoundingBox = self.makeFontBoundingBox()
return self._fontBoundingBox
def makeUnicodeToGlyphNameMapping(self):
"""
Make a ``unicode : glyph name`` mapping for the font.
**This should not be called externally.** Subclasses
may override this method to handle the mapping creation
in a different way if desired.
"""
return makeUnicodeToGlyphNameMapping(self.allGlyphs, self.glyphOrder)
@staticmethod
def makeMissingRequiredGlyphs(font, glyphSet, sfntVersion, notdefGlyph=None):
"""
Add .notdef to the glyph set if it is not present.
**This should not be called externally.** Subclasses
may override this method to handle the glyph creation
in a different way if desired.
"""
if ".notdef" in glyphSet:
return
reverseContour = sfntVersion == "\000\001\000\000"
if notdefGlyph:
notdefGlyph = _copyGlyph(notdefGlyph, reverseContour=reverseContour)
else:
unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm"))
ascender = otRound(getAttrWithFallback(font.info, "ascender"))
descender = otRound(getAttrWithFallback(font.info, "descender"))
defaultWidth = otRound(unitsPerEm * 0.5)
notdefGlyph = StubGlyph(
name=".notdef",
width=defaultWidth,
unitsPerEm=unitsPerEm,
ascender=ascender,
descender=descender,
reverseContour=reverseContour,
)
glyphSet[".notdef"] = notdefGlyph
def makeOfficialGlyphOrder(self, glyphOrder):
"""
Make the final glyph order.
**This should not be called externally.** Subclasses
may override this method to handle the order creation
in a different way if desired.
"""
return makeOfficialGlyphOrder(self.allGlyphs, glyphOrder)
# --------------
# Table Builders
# --------------
def setupTable_gasp(self):
if "gasp" not in self.tables:
return
self.otf["gasp"] = gasp = newTable("gasp")
gasp_ranges = dict()
for record in self.ufo.info.openTypeGaspRangeRecords:
rangeMaxPPEM = record["rangeMaxPPEM"]
behavior_bits = record["rangeGaspBehavior"]
rangeGaspBehavior = intListToNum(behavior_bits, 0, 4)
gasp_ranges[rangeMaxPPEM] = rangeGaspBehavior
gasp.gaspRange = gasp_ranges
def setupTable_head(self):
"""
Make the head table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "head" not in self.tables:
return
self.otf["head"] = head = newTable("head")
font = self.ufo
head.checkSumAdjustment = 0
head.tableVersion = 1.0
head.magicNumber = 0x5F0F3CF5
# version numbers
# limit minor version to 3 digits as recommended in OpenType spec:
# https://www.microsoft.com/typography/otspec/recom.htm
versionMajor = getAttrWithFallback(font.info, "versionMajor")
versionMinor = getAttrWithFallback(font.info, "versionMinor")
fullFontRevision = float("%d.%03d" % (versionMajor, versionMinor))
head.fontRevision = round(fullFontRevision, 3)
if head.fontRevision != fullFontRevision:
logger.warning(
"Minor version in %s has too many digits and won't fit into "
"the head table's fontRevision field; rounded to %s.",
fullFontRevision,
head.fontRevision,
)
# upm
head.unitsPerEm = otRound(getAttrWithFallback(font.info, "unitsPerEm"))
# times
head.created = (
dateStringToTimeValue(getAttrWithFallback(font.info, "openTypeHeadCreated"))
- mac_epoch_diff
)
head.modified = dateStringToTimeValue(dateStringForNow()) - mac_epoch_diff
# bounding box
xMin, yMin, xMax, yMax = self.fontBoundingBox
head.xMin = otRound(xMin)
head.yMin = otRound(yMin)
head.xMax = otRound(xMax)
head.yMax = otRound(yMax)
# style mapping
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
macStyle = []
if styleMapStyleName == "bold":
macStyle = [0]
elif styleMapStyleName == "bold italic":
macStyle = [0, 1]
elif styleMapStyleName == "italic":
macStyle = [1]
head.macStyle = intListToNum(macStyle, 0, 16)
# misc
head.flags = intListToNum(
getAttrWithFallback(font.info, "openTypeHeadFlags"), 0, 16
)
head.lowestRecPPEM = otRound(
getAttrWithFallback(font.info, "openTypeHeadLowestRecPPEM")
)
head.fontDirectionHint = 2
head.indexToLocFormat = 0
head.glyphDataFormat = 0
def setupTable_name(self):
"""
Make the name table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "name" not in self.tables:
return
font = self.ufo
self.otf["name"] = name = newTable("name")
name.names = []
# Set name records from font.info.openTypeNameRecords
for nameRecord in getAttrWithFallback(font.info, "openTypeNameRecords"):
nameId = nameRecord["nameID"]
platformId = nameRecord["platformID"]
platEncId = nameRecord["encodingID"]
langId = nameRecord["languageID"]
# on Python 2, plistLib (used by ufoLib) returns unicode strings
# only when plist data contain non-ascii characters, and returns
# ascii-encoded bytes when it can. On the other hand, fontTools's
# name table `setName` method wants unicode strings, so we must
# decode them first
nameVal = nameRecord["string"]
name.setName(nameVal, nameId, platformId, platEncId, langId)
# Build name records
familyName = getAttrWithFallback(font.info, "styleMapFamilyName")
styleName = getAttrWithFallback(font.info, "styleMapStyleName").title()
preferredFamilyName = getAttrWithFallback(
font.info, "openTypeNamePreferredFamilyName"
)
preferredSubfamilyName = getAttrWithFallback(
font.info, "openTypeNamePreferredSubfamilyName"
)
fullName = f"{preferredFamilyName} {preferredSubfamilyName}"
nameVals = {
0: getAttrWithFallback(font.info, "copyright"),
1: familyName,
2: styleName,
3: getAttrWithFallback(font.info, "openTypeNameUniqueID"),
4: fullName,
5: getAttrWithFallback(font.info, "openTypeNameVersion"),
6: getAttrWithFallback(font.info, "postscriptFontName"),
7: getAttrWithFallback(font.info, "trademark"),
8: getAttrWithFallback(font.info, "openTypeNameManufacturer"),
9: getAttrWithFallback(font.info, "openTypeNameDesigner"),
10: getAttrWithFallback(font.info, "openTypeNameDescription"),
11: getAttrWithFallback(font.info, "openTypeNameManufacturerURL"),
12: getAttrWithFallback(font.info, "openTypeNameDesignerURL"),
13: getAttrWithFallback(font.info, "openTypeNameLicense"),
14: getAttrWithFallback(font.info, "openTypeNameLicenseURL"),
16: preferredFamilyName,
17: preferredSubfamilyName,
18: getAttrWithFallback(font.info, "openTypeNameCompatibleFullName"),
19: getAttrWithFallback(font.info, "openTypeNameSampleText"),
21: getAttrWithFallback(font.info, "openTypeNameWWSFamilyName"),
22: getAttrWithFallback(font.info, "openTypeNameWWSSubfamilyName"),
}
# don't add typographic names if they are the same as the legacy ones
if nameVals[1] == nameVals[16]:
del nameVals[16]
if nameVals[2] == nameVals[17]:
del nameVals[17]
# postscript font name
if nameVals[6]:
nameVals[6] = normalizeStringForPostscript(nameVals[6])
for nameId in sorted(nameVals.keys()):
nameVal = nameVals[nameId]
if not nameVal:
continue
platformId = 3
platEncId = 10 if _isNonBMP(nameVal) else 1
langId = 0x409
# Set built name record if not set yet
if name.getName(nameId, platformId, platEncId, langId):
continue
name.setName(nameVal, nameId, platformId, platEncId, langId)
def setupTable_maxp(self):
"""
Make the maxp table.
**This should not be called externally.** Subclasses
must override or supplement this method to handle the
table creation for either CFF or TT data.
"""
raise NotImplementedError
def setupTable_cmap(self):
"""
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "cmap" not in self.tables:
return
from fontTools.ttLib.tables._c_m_a_p import cmap_format_4
nonBMP = {k: v for k, v in self.unicodeToGlyphNameMapping.items() if k > 65535}
if nonBMP:
mapping = {
k: v for k, v in self.unicodeToGlyphNameMapping.items() if k <= 65535
}
else:
mapping = dict(self.unicodeToGlyphNameMapping)
# mac
cmap4_0_3 = cmap_format_4(4)
cmap4_0_3.platformID = 0
cmap4_0_3.platEncID = 3
cmap4_0_3.language = 0
cmap4_0_3.cmap = mapping
# windows
cmap4_3_1 = cmap_format_4(4)
cmap4_3_1.platformID = 3
cmap4_3_1.platEncID = 1
cmap4_3_1.language = 0
cmap4_3_1.cmap = mapping
# store
self.otf["cmap"] = cmap = newTable("cmap")
cmap.tableVersion = 0
cmap.tables = [cmap4_0_3, cmap4_3_1]
# If we have glyphs outside Unicode BMP, we must set another
# subtable that can hold longer codepoints for them.
if nonBMP:
from fontTools.ttLib.tables._c_m_a_p import cmap_format_12
nonBMP.update(mapping)
# mac
cmap12_0_4 = cmap_format_12(12)
cmap12_0_4.platformID = 0
cmap12_0_4.platEncID = 4
cmap12_0_4.language = 0
cmap12_0_4.cmap = nonBMP
# windows
cmap12_3_10 = cmap_format_12(12)
cmap12_3_10.platformID = 3
cmap12_3_10.platEncID = 10
cmap12_3_10.language = 0
cmap12_3_10.cmap = nonBMP
# update tables registry
cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10]
def setupTable_OS2(self):
"""
Make the OS/2 table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "OS/2" not in self.tables:
return
self.otf["OS/2"] = os2 = newTable("OS/2")
font = self.ufo
os2.version = 0x0004
# average glyph width
os2.xAvgCharWidth = 0
hmtx = self.otf.get("hmtx")
if hmtx is not None:
widths = [width for width, _ in hmtx.metrics.values() if width > 0]
if widths:
os2.xAvgCharWidth = otRound(sum(widths) / len(widths))
# weight and width classes
os2.usWeightClass = getAttrWithFallback(font.info, "openTypeOS2WeightClass")
os2.usWidthClass = getAttrWithFallback(font.info, "openTypeOS2WidthClass")
# embedding
os2.fsType = intListToNum(
getAttrWithFallback(font.info, "openTypeOS2Type"), 0, 16
)
# subscript, superscript, strikeout values, taken from AFDKO:
# FDK/Tools/Programs/makeotf/makeotf_lib/source/hotconv/hot.c
unitsPerEm = getAttrWithFallback(font.info, "unitsPerEm")
italicAngle = getAttrWithFallback(font.info, "italicAngle")
xHeight = getAttrWithFallback(font.info, "xHeight")
def adjustOffset(offset, angle):
"""Adjust Y offset based on italic angle, to get X offset."""
return offset * math.tan(math.radians(-angle)) if angle else 0
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXSize")
if v is None:
v = unitsPerEm * 0.65
os2.ySubscriptXSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYSize")
if v is None:
v = unitsPerEm * 0.6
os2.ySubscriptYSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptYOffset")
if v is None:
v = unitsPerEm * 0.075
os2.ySubscriptYOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SubscriptXOffset")
if v is None:
v = adjustOffset(-os2.ySubscriptYOffset, italicAngle)
os2.ySubscriptXOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXSize")
if v is None:
v = os2.ySubscriptXSize
os2.ySuperscriptXSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYSize")
if v is None:
v = os2.ySubscriptYSize
os2.ySuperscriptYSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptYOffset")
if v is None:
v = unitsPerEm * 0.35
os2.ySuperscriptYOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2SuperscriptXOffset")
if v is None:
v = adjustOffset(os2.ySuperscriptYOffset, italicAngle)
os2.ySuperscriptXOffset = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutSize")
if v is None:
v = getAttrWithFallback(font.info, "postscriptUnderlineThickness")
os2.yStrikeoutSize = otRound(v)
v = getAttrWithFallback(font.info, "openTypeOS2StrikeoutPosition")
if v is None:
v = xHeight * 0.6 if xHeight else unitsPerEm * 0.22
os2.yStrikeoutPosition = otRound(v)
# family class
ibmFontClass, ibmFontSubclass = getAttrWithFallback(
font.info, "openTypeOS2FamilyClass"
)
os2.sFamilyClass = (ibmFontClass << 8) + ibmFontSubclass
# panose
data = getAttrWithFallback(font.info, "openTypeOS2Panose")
panose = Panose()
panose.bFamilyType = data[0]
panose.bSerifStyle = data[1]
panose.bWeight = data[2]
panose.bProportion = data[3]
panose.bContrast = data[4]
panose.bStrokeVariation = data[5]
panose.bArmStyle = data[6]
panose.bLetterForm = data[7]
panose.bMidline = data[8]
panose.bXHeight = data[9]
os2.panose = panose
# Unicode ranges
uniRanges = getAttrWithFallback(font.info, "openTypeOS2UnicodeRanges")
if uniRanges is not None:
os2.ulUnicodeRange1 = intListToNum(uniRanges, 0, 32)
os2.ulUnicodeRange2 = intListToNum(uniRanges, 32, 32)
os2.ulUnicodeRange3 = intListToNum(uniRanges, 64, 32)
os2.ulUnicodeRange4 = intListToNum(uniRanges, 96, 32)
else:
os2.recalcUnicodeRanges(self.otf)
# codepage ranges
codepageRanges = getAttrWithFallback(font.info, "openTypeOS2CodePageRanges")
if codepageRanges is None:
unicodes = self.unicodeToGlyphNameMapping.keys()
codepageRanges = calcCodePageRanges(unicodes)
os2.ulCodePageRange1 = intListToNum(codepageRanges, 0, 32)
os2.ulCodePageRange2 = intListToNum(codepageRanges, 32, 32)
# vendor id
os2.achVendID = getAttrWithFallback(font.info, "openTypeOS2VendorID")
# vertical metrics
os2.sxHeight = otRound(getAttrWithFallback(font.info, "xHeight"))
os2.sCapHeight = otRound(getAttrWithFallback(font.info, "capHeight"))
os2.sTypoAscender = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoAscender")
)
os2.sTypoDescender = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoDescender")
)
os2.sTypoLineGap = otRound(
getAttrWithFallback(font.info, "openTypeOS2TypoLineGap")
)
os2.usWinAscent = otRound(
getAttrWithFallback(font.info, "openTypeOS2WinAscent")
)
os2.usWinDescent = otRound(
getAttrWithFallback(font.info, "openTypeOS2WinDescent")
)
# style mapping
selection = list(getAttrWithFallback(font.info, "openTypeOS2Selection"))
styleMapStyleName = getAttrWithFallback(font.info, "styleMapStyleName")
if styleMapStyleName == "regular":
selection.append(6)
elif styleMapStyleName == "bold":
selection.append(5)
elif styleMapStyleName == "italic":
selection.append(0)
elif styleMapStyleName == "bold italic":
selection += [0, 5]
os2.fsSelection = intListToNum(selection, 0, 16)
# characetr indexes
unicodes = [i for i in self.unicodeToGlyphNameMapping.keys() if i is not None]
if unicodes:
minIndex = min(unicodes)
maxIndex = max(unicodes)
else:
# the font may have *no* unicode values (it really happens!) so
# there needs to be a fallback. use 0xFFFF, as AFDKO does:
# FDK/Tools/Programs/makeotf/makeotf_lib/source/hotconv/map.c
minIndex = 0xFFFF
maxIndex = 0xFFFF
if maxIndex > 0xFFFF:
# the spec says that 0xFFFF should be used
# as the max if the max exceeds 0xFFFF
maxIndex = 0xFFFF
os2.fsFirstCharIndex = minIndex
os2.fsLastCharIndex = maxIndex
os2.usBreakChar = 32
os2.usDefaultChar = 0
# maximum contextual lookup length
os2.usMaxContex = 0
def setupTable_hmtx(self):
"""
Make the hmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "hmtx" not in self.tables:
return
self.otf["hmtx"] = hmtx = newTable("hmtx")
hmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
width = otRound(glyph.width)
if width < 0:
raise ValueError("The width should not be negative: '%s'" % (glyphName))
bounds = self.glyphBoundingBoxes[glyphName]
left = bounds.xMin if bounds else 0
hmtx[glyphName] = (width, left)
def _setupTable_hhea_or_vhea(self, tag):
"""
Make the hhea table or the vhea table. This assume the hmtx or
the vmtx were respectively made first.
"""
if tag not in self.tables:
return
if tag == "hhea":
isHhea = True
else:
isHhea = False
self.otf[tag] = table = newTable(tag)
mtxTable = self.otf.get(tag[0] + "mtx")
font = self.ufo
if isHhea:
table.tableVersion = 0x00010000
else:
table.tableVersion = 0x00011000
# Vertical metrics in hhea, horizontal metrics in vhea
# and caret info.
# The hhea metrics names are formed as:
# "openType" + tag.title() + "Ascender", etc.
# While vhea metrics names are formed as:
# "openType" + tag.title() + "VertTypo" + "Ascender", etc.
# Caret info names only differ by tag.title().
commonPrefix = "openType%s" % tag.title()
if isHhea:
metricsPrefix = commonPrefix
else:
metricsPrefix = "openType%sVertTypo" % tag.title()
metricsDict = {
"ascent": "%sAscender" % metricsPrefix,
"descent": "%sDescender" % metricsPrefix,
"lineGap": "%sLineGap" % metricsPrefix,
"caretSlopeRise": "%sCaretSlopeRise" % commonPrefix,
"caretSlopeRun": "%sCaretSlopeRun" % commonPrefix,
"caretOffset": "%sCaretOffset" % commonPrefix,
}
for otfName, ufoName in metricsDict.items():
setattr(table, otfName, otRound(getAttrWithFallback(font.info, ufoName)))
# Horizontal metrics in hhea, vertical metrics in vhea
advances = [] # width in hhea, height in vhea
firstSideBearings = [] # left in hhea, top in vhea
secondSideBearings = [] # right in hhea, bottom in vhea
extents = []
if mtxTable is not None:
for glyphName in self.allGlyphs:
advance, firstSideBearing = mtxTable[glyphName]
advances.append(advance)
bounds = self.glyphBoundingBoxes[glyphName]
if bounds is None:
continue
if isHhea:
boundsAdvance = bounds.xMax - bounds.xMin
# equation from the hhea spec for calculating xMaxExtent:
# Max(lsb + (xMax - xMin))
extent = firstSideBearing + boundsAdvance
else:
boundsAdvance = bounds.yMax - bounds.yMin
# equation from the vhea spec for calculating yMaxExtent:
# Max(tsb + (yMax - yMin)).
extent = firstSideBearing + boundsAdvance
secondSideBearing = advance - firstSideBearing - boundsAdvance
firstSideBearings.append(firstSideBearing)
secondSideBearings.append(secondSideBearing)
extents.append(extent)
setattr(
table,
"advance%sMax" % ("Width" if isHhea else "Height"),
max(advances) if advances else 0,
)
setattr(
table,
"min%sSideBearing" % ("Left" if isHhea else "Top"),
min(firstSideBearings) if firstSideBearings else 0,
)
setattr(
table,
"min%sSideBearing" % ("Right" if isHhea else "Bottom"),
min(secondSideBearings) if secondSideBearings else 0,
)
setattr(
table,
"%sMaxExtent" % ("x" if isHhea else "y"),
max(extents) if extents else 0,
)
if isHhea:
reserved = range(4)
else:
# vhea.reserved0 is caretOffset for legacy reasons
reserved = range(1, 5)
for i in reserved:
setattr(table, "reserved%i" % i, 0)
table.metricDataFormat = 0
# glyph count
setattr(
table, "numberOf%sMetrics" % ("H" if isHhea else "V"), len(self.allGlyphs)
)
def setupTable_hhea(self):
"""
Make the hhea table. This assumes that the hmtx table was made first.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self._setupTable_hhea_or_vhea("hhea")
def setupTable_vmtx(self):
"""
Make the vmtx table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "vmtx" not in self.tables:
return
self.otf["vmtx"] = vmtx = newTable("vmtx")
vmtx.metrics = {}
for glyphName, glyph in self.allGlyphs.items():
height = otRound(glyph.height)
if height < 0:
raise ValueError(
"The height should not be negative: '%s'" % (glyphName)
)
verticalOrigin = _getVerticalOrigin(self.otf, glyph)
bounds = self.glyphBoundingBoxes[glyphName]
top = bounds.yMax if bounds else 0
vmtx[glyphName] = (height, verticalOrigin - top)
def setupTable_VORG(self):
"""
Make the VORG table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "VORG" not in self.tables:
return
self.otf["VORG"] = vorg = newTable("VORG")
vorg.majorVersion = 1
vorg.minorVersion = 0
vorg.VOriginRecords = {}
# Find the most frequent verticalOrigin
vorg_count = Counter(
_getVerticalOrigin(self.otf, glyph) for glyph in self.allGlyphs.values()
)
vorg.defaultVertOriginY = vorg_count.most_common(1)[0][0]
if len(vorg_count) > 1:
for glyphName, glyph in self.allGlyphs.items():
vertOriginY = _getVerticalOrigin(self.otf, glyph)
if vertOriginY == vorg.defaultVertOriginY:
continue
vorg.VOriginRecords[glyphName] = vertOriginY
vorg.numVertOriginYMetrics = len(vorg.VOriginRecords)
def setupTable_vhea(self):
"""
Make the vhea table. This assumes that the head and vmtx tables were
made first.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
self._setupTable_hhea_or_vhea("vhea")
def setupTable_post(self):
"""
Make the post table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "post" not in self.tables:
return
self.otf["post"] = post = newTable("post")
font = self.ufo
post.formatType = 3.0
# italic angle
italicAngle = getAttrWithFallback(font.info, "italicAngle")
post.italicAngle = italicAngle
# underline
underlinePosition = getAttrWithFallback(
font.info, "postscriptUnderlinePosition"
)
post.underlinePosition = otRound(underlinePosition)
underlineThickness = getAttrWithFallback(
font.info, "postscriptUnderlineThickness"
)
post.underlineThickness = otRound(underlineThickness)
post.isFixedPitch = getAttrWithFallback(font.info, "postscriptIsFixedPitch")
# misc
post.minMemType42 = 0
post.maxMemType42 = 0
post.minMemType1 = 0
post.maxMemType1 = 0
def setupTable_COLR(self):
"""
Compile the COLR table.
**This should not be called externally.**
"""
if "COLR" not in self.tables:
return
from fontTools.colorLib.builder import buildCOLR
layerInfo = self.ufo.lib[COLOR_LAYERS_KEY]
glyphMap = self.otf.getReverseGlyphMap()
if layerInfo:
self.otf["COLR"] = buildCOLR(layerInfo, glyphMap=glyphMap)
def setupTable_CPAL(self):
"""
Compile the CPAL table.
**This should not be called externally.**
"""
if "CPAL" not in self.tables:
return
from fontTools.colorLib.builder import buildCPAL
from fontTools.colorLib.errors import ColorLibError
# colorLib wants colors as tuples, plistlib gives us lists
palettes = [
[tuple(color) for color in palette]
for palette in self.ufo.lib[COLOR_PALETTES_KEY]
]
try:
self.otf["CPAL"] = buildCPAL(palettes)
except ColorLibError as e:
raise InvalidFontData("Failed to build CPAL table") from e
def setupOtherTables(self):
"""
Make the other tables. The default implementation does nothing.
**This should not be called externally.** Subclasses
may override this method to add other tables to the
font if desired.
"""
pass
def importTTX(self):
"""
Merge TTX files from data directory "com.github.fonttools.ttx"
**This should not be called externally.** Subclasses
may override this method to handle the bounds creation
in a different way if desired.
"""
import os
prefix = "com.github.fonttools.ttx"
if not hasattr(self.ufo, "data"):
return
if not self.ufo.data.fileNames:
return
for path in self.ufo.data.fileNames:
foldername, filename = os.path.split(path)
if foldername == prefix and filename.endswith(".ttx"):
ttx = self.ufo.data[path].decode("utf-8")
fp = BytesIO(ttx.encode("utf-8"))
# Preserve the original SFNT version when loading a TTX dump.
sfntVersion = self.otf.sfntVersion
try:
self.otf.importXML(fp)
finally:
self.otf.sfntVersion = sfntVersion
class OutlineOTFCompiler(BaseOutlineCompiler):
"""Compile a .otf font with CFF outlines."""
sfntVersion = "OTTO"
tables = BaseOutlineCompiler.tables | {"CFF", "VORG"}
def __init__(
self,
font,
glyphSet=None,
glyphOrder=None,
tables=None,
notdefGlyph=None,
roundTolerance=None,
optimizeCFF=True,
):
if roundTolerance is not None:
self.roundTolerance = float(roundTolerance)
else:
# round all coordinates to integers by default
self.roundTolerance = 0.5
super().__init__(
font,
glyphSet=glyphSet,
glyphOrder=glyphOrder,
tables=tables,
notdefGlyph=notdefGlyph,
)
self.optimizeCFF = optimizeCFF
self._defaultAndNominalWidths = None
def getDefaultAndNominalWidths(self):
"""Return (defaultWidthX, nominalWidthX).
If fontinfo.plist doesn't define these explicitly, compute optimal values
from the glyphs' advance widths.
"""
if self._defaultAndNominalWidths is None:
info = self.ufo.info
# populate the width values
if all(
getattr(info, attr, None) is None
for attr in ("postscriptDefaultWidthX", "postscriptNominalWidthX")
):
# no custom values set in fontinfo.plist; compute optimal ones
from fontTools.cffLib.width import optimizeWidths
widths = [otRound(glyph.width) for glyph in self.allGlyphs.values()]
defaultWidthX, nominalWidthX = optimizeWidths(widths)
else:
defaultWidthX = otRound(
getAttrWithFallback(info, "postscriptDefaultWidthX")
)
nominalWidthX = otRound(
getAttrWithFallback(info, "postscriptNominalWidthX")
)
self._defaultAndNominalWidths = (defaultWidthX, nominalWidthX)
return self._defaultAndNominalWidths
def compileGlyphs(self):
"""Compile and return the CFF T2CharStrings for this font."""
defaultWidth, nominalWidth = self.getDefaultAndNominalWidths()
# The real PrivateDict will be created later on in setupTable_CFF.
# For convenience here we use a namespace object to pass the default/nominal
# widths that we need to draw the charstrings when computing their bounds.
private = SimpleNamespace(
defaultWidthX=defaultWidth, nominalWidthX=nominalWidth
)
compiledGlyphs = {}
for glyphName in self.glyphOrder:
glyph = self.allGlyphs[glyphName]
cs = self.getCharStringForGlyph(glyph, private)
compiledGlyphs[glyphName] = cs
return compiledGlyphs
def makeGlyphsBoundingBoxes(self):
"""
Make bounding boxes for all the glyphs, and return a dictionary of
BoundingBox(xMin, xMax, yMin, yMax) namedtuples keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
Check that the float values are within the range of the specified
self.roundTolerance, and if so use the rounded value; else take the
floor or ceiling to ensure that the bounding box encloses the original
values.
"""
def toInt(value, else_callback):
rounded = otRound(value)
if tolerance >= 0.5 or abs(rounded - value) <= tolerance:
return rounded
else:
return int(else_callback(value))
tolerance = self.roundTolerance
glyphBoxes = {}
charStrings = self.getCompiledGlyphs()
for name, cs in charStrings.items():
bounds = cs.calcBounds(charStrings)
if bounds is not None:
rounded = []
for value in bounds[:2]:
rounded.append(toInt(value, math.floor))
for value in bounds[2:]:
rounded.append(toInt(value, math.ceil))
bounds = BoundingBox(*rounded)
if bounds == EMPTY_BOUNDING_BOX:
bounds = None
glyphBoxes[name] = bounds
return glyphBoxes
def getCharStringForGlyph(self, glyph, private, globalSubrs=None):
"""
Get a Type2CharString for the *glyph*
**This should not be called externally.** Subclasses
may override this method to handle the charstring creation
in a different way if desired.
"""
width = glyph.width
defaultWidth = private.defaultWidthX
nominalWidth = private.nominalWidthX
if width == defaultWidth:
# if width equals the default it can be omitted from charstring
width = None
else:
# subtract the nominal width
width -= nominalWidth
if width is not None:
width = otRound(width)
pen = T2CharStringPen(width, self.allGlyphs, roundTolerance=self.roundTolerance)
glyph.draw(pen)
charString = pen.getCharString(private, globalSubrs, optimize=self.optimizeCFF)
return charString
def setupTable_maxp(self):
"""Make the maxp table."""
if "maxp" not in self.tables:
return
self.otf["maxp"] = maxp = newTable("maxp")
maxp.tableVersion = 0x00005000
maxp.numGlyphs = len(self.glyphOrder)
def setupOtherTables(self):
self.setupTable_CFF()
if self.vertical:
self.setupTable_VORG()
def setupTable_CFF(self):
"""Make the CFF table."""
if not {"CFF", "CFF "}.intersection(self.tables):
return
self.otf["CFF "] = cff = newTable("CFF ")
cff = cff.cff
# set up the basics
cff.major = 1
cff.minor = 0
cff.hdrSize = 4
cff.offSize = 4
cff.fontNames = []
strings = IndexedStrings()
cff.strings = strings
private = PrivateDict(strings=strings)
private.rawDict.update(private.defaults)
globalSubrs = GlobalSubrsIndex(private=private)
topDict = TopDict(GlobalSubrs=globalSubrs, strings=strings)
topDict.Private = private
charStrings = topDict.CharStrings = CharStrings(
file=None,
charset=None,
globalSubrs=globalSubrs,
private=private,
fdSelect=None,
fdArray=None,
)
charStrings.charStringsAreIndexed = True
topDict.charset = []
charStringsIndex = charStrings.charStringsIndex = SubrsIndex(
private=private, globalSubrs=globalSubrs
)
cff.topDictIndex = topDictIndex = TopDictIndex()
topDictIndex.append(topDict)
topDictIndex.strings = strings
cff.GlobalSubrs = globalSubrs
# populate naming data
info = self.ufo.info
psName = getAttrWithFallback(info, "postscriptFontName")
cff.fontNames.append(psName)
topDict = cff.topDictIndex[0]
topDict.version = "%d.%d" % (
getAttrWithFallback(info, "versionMajor"),
getAttrWithFallback(info, "versionMinor"),
)
trademark = getAttrWithFallback(info, "trademark")
if trademark:
trademark = normalizeStringForPostscript(
trademark.replace("\u00A9", "Copyright")
)
if trademark != self.ufo.info.trademark:
logger.info(
"The trademark was normalized for storage in the "
"CFF table and consequently some characters were "
"dropped: '%s'",
trademark,
)
if trademark is None:
trademark = ""
topDict.Notice = trademark
copyright = getAttrWithFallback(info, "copyright")
if copyright:
copyright = normalizeStringForPostscript(
copyright.replace("\u00A9", "Copyright")
)
if copyright != self.ufo.info.copyright:
logger.info(
"The copyright was normalized for storage in the "
"CFF table and consequently some characters were "
"dropped: '%s'",
copyright,
)
if copyright is None:
copyright = ""
topDict.Copyright = copyright
topDict.FullName = getAttrWithFallback(info, "postscriptFullName")
topDict.FamilyName = getAttrWithFallback(
info, "openTypeNamePreferredFamilyName"
)
topDict.Weight = getAttrWithFallback(info, "postscriptWeightName")
# populate various numbers
topDict.isFixedPitch = getAttrWithFallback(info, "postscriptIsFixedPitch")
topDict.ItalicAngle = getAttrWithFallback(info, "italicAngle")
underlinePosition = getAttrWithFallback(info, "postscriptUnderlinePosition")
topDict.UnderlinePosition = otRound(underlinePosition)
underlineThickness = getAttrWithFallback(info, "postscriptUnderlineThickness")
topDict.UnderlineThickness = otRound(underlineThickness)
# populate font matrix
unitsPerEm = otRound(getAttrWithFallback(info, "unitsPerEm"))
topDict.FontMatrix = [1.0 / unitsPerEm, 0, 0, 1.0 / unitsPerEm, 0, 0]
# populate the width values
defaultWidthX, nominalWidthX = self.getDefaultAndNominalWidths()
if defaultWidthX:
private.rawDict["defaultWidthX"] = defaultWidthX
if nominalWidthX:
private.rawDict["nominalWidthX"] = nominalWidthX
# populate hint data
blueFuzz = otRound(getAttrWithFallback(info, "postscriptBlueFuzz"))
blueShift = otRound(getAttrWithFallback(info, "postscriptBlueShift"))
blueScale = getAttrWithFallback(info, "postscriptBlueScale")
forceBold = getAttrWithFallback(info, "postscriptForceBold")
blueValues = getAttrWithFallback(info, "postscriptBlueValues")
if isinstance(blueValues, list):
blueValues = [otRound(i) for i in blueValues]
otherBlues = getAttrWithFallback(info, "postscriptOtherBlues")
if isinstance(otherBlues, list):
otherBlues = [otRound(i) for i in otherBlues]
familyBlues = getAttrWithFallback(info, "postscriptFamilyBlues")
if isinstance(familyBlues, list):
familyBlues = [otRound(i) for i in familyBlues]
familyOtherBlues = getAttrWithFallback(info, "postscriptFamilyOtherBlues")
if isinstance(familyOtherBlues, list):
familyOtherBlues = [otRound(i) for i in familyOtherBlues]
stemSnapH = getAttrWithFallback(info, "postscriptStemSnapH")
if isinstance(stemSnapH, list):
stemSnapH = [otRound(i) for i in stemSnapH]
stemSnapV = getAttrWithFallback(info, "postscriptStemSnapV")
if isinstance(stemSnapV, list):
stemSnapV = [otRound(i) for i in stemSnapV]
# only write the blues data if some blues are defined.
if any((blueValues, otherBlues, familyBlues, familyOtherBlues)):
private.rawDict["BlueFuzz"] = blueFuzz
private.rawDict["BlueShift"] = blueShift
private.rawDict["BlueScale"] = blueScale
private.rawDict["ForceBold"] = forceBold
if blueValues:
private.rawDict["BlueValues"] = blueValues
if otherBlues:
private.rawDict["OtherBlues"] = otherBlues
if familyBlues:
private.rawDict["FamilyBlues"] = familyBlues
if familyOtherBlues:
private.rawDict["FamilyOtherBlues"] = familyOtherBlues
# only write the stems if both are defined.
if stemSnapH and stemSnapV:
private.rawDict["StemSnapH"] = stemSnapH
private.rawDict["StdHW"] = stemSnapH[0]
private.rawDict["StemSnapV"] = stemSnapV
private.rawDict["StdVW"] = stemSnapV[0]
# populate glyphs
cffGlyphs = self.getCompiledGlyphs()
for glyphName in self.glyphOrder:
charString = cffGlyphs[glyphName]
charString.private = private
charString.globalSubrs = globalSubrs
# add to the font
if glyphName in charStrings:
# XXX a glyph already has this name. should we choke?
glyphID = charStrings.charStrings[glyphName]
charStringsIndex.items[glyphID] = charString
else:
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
topDict.FontBBox = self.fontBoundingBox
class OutlineTTFCompiler(BaseOutlineCompiler):
"""Compile a .ttf font with TrueType outlines."""
sfntVersion = "\000\001\000\000"
tables = BaseOutlineCompiler.tables | {"loca", "gasp", "glyf"}
def compileGlyphs(self):
"""Compile and return the TrueType glyphs for this font."""
allGlyphs = self.allGlyphs
ttGlyphs = {}
for name in self.glyphOrder:
glyph = allGlyphs[name]
pen = TTGlyphPen(allGlyphs)
try:
glyph.draw(pen)
except NotImplementedError:
logger.error("%r has invalid curve format; skipped", name)
ttGlyph = Glyph()
else:
ttGlyph = pen.glyph()
ttGlyphs[name] = ttGlyph
return ttGlyphs
def makeGlyphsBoundingBoxes(self):
"""Make bounding boxes for all the glyphs.
Return a dictionary of BoundingBox(xMin, xMax, yMin, yMax) namedtuples
keyed by glyph names.
The bounding box of empty glyphs (without contours or components) is
set to None.
"""
glyphBoxes = {}
ttGlyphs = self.getCompiledGlyphs()
for glyphName, glyph in ttGlyphs.items():
glyph.recalcBounds(ttGlyphs)
bounds = BoundingBox(glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax)
if bounds == EMPTY_BOUNDING_BOX:
bounds = None
glyphBoxes[glyphName] = bounds
return glyphBoxes
def setupTable_maxp(self):
"""Make the maxp table."""
if "maxp" not in self.tables:
return
self.otf["maxp"] = maxp = newTable("maxp")
maxp.tableVersion = 0x00010000
maxp.numGlyphs = len(self.glyphOrder)
maxp.maxZones = 1
maxp.maxTwilightPoints = 0
maxp.maxStorage = 0
maxp.maxFunctionDefs = 0
maxp.maxInstructionDefs = 0
maxp.maxStackElements = 0
maxp.maxSizeOfInstructions = 0
maxp.maxComponentElements = max(
len(g.components) for g in self.allGlyphs.values()
)
def setupTable_post(self):
"""Make a format 2 post table with the compiler's glyph order."""
super().setupTable_post()
if "post" not in self.otf:
return
post = self.otf["post"]
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
post.glyphOrder = self.glyphOrder
def setupOtherTables(self):
self.setupTable_glyf()
if self.ufo.info.openTypeGaspRangeRecords:
self.setupTable_gasp()
def setupTable_glyf(self):
"""Make the glyf table."""
if not {"glyf", "loca"}.issubset(self.tables):
return
self.otf["loca"] = newTable("loca")
self.otf["glyf"] = glyf = newTable("glyf")
glyf.glyphs = {}
glyf.glyphOrder = self.glyphOrder
hmtx = self.otf.get("hmtx")
ttGlyphs = self.getCompiledGlyphs()
for name in self.glyphOrder:
ttGlyph = ttGlyphs[name]
if ttGlyph.isComposite() and hmtx is not None and self.autoUseMyMetrics:
self.autoUseMyMetrics(ttGlyph, name, hmtx)
glyf[name] = ttGlyph
@staticmethod
def autoUseMyMetrics(ttGlyph, glyphName, hmtx):
"""Set the "USE_MY_METRICS" flag on the first component having the
same advance width as the composite glyph, no transform and no
horizontal shift (but allow it to shift vertically).
This forces the composite glyph to use the possibly hinted horizontal
metrics of the sub-glyph, instead of those from the "hmtx" table.
"""
width = hmtx[glyphName][0]
for component in ttGlyph.components:
try:
baseName, transform = component.getComponentInfo()
except AttributeError:
# component uses '{first,second}Pt' instead of 'x' and 'y'
continue
try:
baseMetrics = hmtx[baseName]
except KeyError:
continue # ignore missing components
else:
if baseMetrics[0] == width and transform[:-1] == (1, 0, 0, 1, 0):
component.flags |= USE_MY_METRICS
break
class StubGlyph:
"""
This object will be used to create missing glyphs
(specifically .notdef) in the provided UFO.
"""
def __init__(
self,
name,
width,
unitsPerEm,
ascender,
descender,
unicodes=None,
reverseContour=False,
):
self.name = name
self.width = width
self.unitsPerEm = unitsPerEm
self.ascender = ascender
self.descender = descender
self.unicodes = unicodes if unicodes is not None else []
self.components = []
self.anchors = []
if self.unicodes:
self.unicode = self.unicodes[0]
else:
self.unicode = None
if name == ".notdef":
self.draw = self._drawDefaultNotdef
self.reverseContour = reverseContour
def __len__(self):
if self.name == ".notdef":
return 1
return 0
@property
def height(self):
return self.ascender - self.descender
def draw(self, pen):
pass
def _drawDefaultNotdef(self, pen):
# Draw contour in PostScript direction (counter-clockwise) by default. Reverse
# for TrueType.
if self.reverseContour:
pen = ReverseContourPen(pen)
width = otRound(self.unitsPerEm * 0.5)
stroke = otRound(self.unitsPerEm * 0.05)
ascender = self.ascender
descender = self.descender
xMin = stroke
xMax = width - stroke
yMax = ascender
yMin = descender
pen.moveTo((xMin, yMin))
pen.lineTo((xMax, yMin))
pen.lineTo((xMax, yMax))
pen.lineTo((xMin, yMax))
pen.lineTo((xMin, yMin))
pen.closePath()
xMin += stroke
xMax -= stroke
yMax -= stroke
yMin += stroke
pen.moveTo((xMin, yMin))
pen.lineTo((xMin, yMax))
pen.lineTo((xMax, yMax))
pen.lineTo((xMax, yMin))
pen.lineTo((xMin, yMin))
pen.closePath()
def _get_controlPointBounds(self):
pen = ControlBoundsPen(None)
self.draw(pen)
return pen.bounds
controlPointBounds = property(_get_controlPointBounds)
| mit | 6,340,247,939,920,733,000 | 36.062207 | 88 | 0.600022 | false |
only4hj/fast-rcnn | lib/roi_data_layer/minibatch.py | 1 | 22641 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
from fast_rcnn.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from utils.model import last_conv_size
from roi_data_layer.roidb import prepare_one_roidb_rpn, prepare_one_roidb_frcnn
from roidb import clear_one_roidb
def get_minibatch(roidb, num_classes, bbox_means, bbox_stds, proposal_file):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales, processed_ims = _get_image_blob(roidb, random_scale_inds)
if 'model_to_use' in roidb[0] and roidb[0]['model_to_use'] == 'rpn':
conv_h, scale_h = last_conv_size(im_blob.shape[2], cfg.MODEL_NAME)
conv_w, scale_w = last_conv_size(im_blob.shape[3], cfg.MODEL_NAME)
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0, 9, conv_h, conv_w), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 36, conv_h, conv_w), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
for im_i in xrange(num_images):
if cfg.TRAIN.LAZY_PREPARING_ROIDB:
prepare_one_roidb_rpn(roidb[im_i],
processed_ims[im_i].shape[0],
processed_ims[im_i].shape[1],
im_scales[im_i])
# Normalize bbox_targets
if cfg.TRAIN.NORMALIZE_BBOX:
bbox_targets = roidb[im_i]['bbox_targets']
cls_inds = np.where(bbox_targets[:, 0] > 0)[0]
if cls_inds.size > 0:
bbox_targets[cls_inds, 1:] -= bbox_means[0, :]
bbox_targets[cls_inds, 1:] /= bbox_stds[0, :]
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois_rpn(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes, conv_h, conv_w)
# Add to RoIs blob
if im_rois != None:
batch_ind = im_i * np.ones((im_rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, im_rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.vstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
# For debug visualizations
#_vis_minibatch_rpn(im_blob, conv_h, conv_w, rois_blob, labels_blob, roidb, bbox_targets_blob, bbox_loss_blob)
blobs = {'data': im_blob,
'labels': labels_blob}
else:
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
all_overlaps = []
for im_i in xrange(num_images):
if cfg.TRAIN.LAZY_PREPARING_ROIDB:
prepare_one_roidb_frcnn(roidb[im_i], proposal_file, num_classes)
# Normalize bbox_targets
if cfg.TRAIN.NORMALIZE_BBOX:
bbox_targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(bbox_targets[:, 0] == cls)[0]
bbox_targets[cls_inds, 1:] -= bbox_means[cls, :]
bbox_targets[cls_inds, 1:] /= bbox_stds[cls, :]
labels, overlaps, im_rois, bbox_targets, bbox_loss \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss))
#all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
#_vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs = {'data': im_blob,
'rois': rois_blob,
'labels': labels_blob}
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_loss_weights'] = bbox_loss_blob
return blobs
def clear_minibatch(roidb):
num_images = len(roidb)
for im_i in xrange(num_images):
clear_one_roidb(roidb[im_i])
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights
def get_img_rect(img_height, img_width, conv_height, conv_width, axis1, axis2, axis3):
anchors = np.array([[128*2, 128*1], [128*1, 128*1], [128*1, 128*2],
[256*2, 256*1], [256*1, 256*1], [256*1, 256*2],
[512*2, 512*1], [512*1, 512*1], [512*1, 512*2]])
scale_width = img_width / conv_width
scale_height = img_height / conv_height
img_center_x = img_width * axis3 / conv_width + scale_width / 2
img_center_y = img_height * axis2 / conv_height + scale_height / 2
anchor_size = anchors[axis1]
img_x1 = img_center_x - anchor_size[0] / 2
img_x2 = img_center_x + anchor_size[0] / 2
img_y1 = img_center_y - anchor_size[1] / 2
img_y2 = img_center_y + anchor_size[1] / 2
return [img_x1, img_y1, img_x2, img_y2]
def _sample_rois_rpn(roidb, fg_rois_per_image, rois_per_image, num_classes,
union_conv_height, union_conv_width):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
new_labels = np.zeros(labels.shape, dtype=np.int16)
new_labels.fill(-1)
bbox_target = roidb['bbox_targets']
new_bbox_target = np.zeros(bbox_target.shape, dtype=np.float32)
conv_width = roidb['conv_width']
conv_height = roidb['conv_height']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(labels > 0)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(labels == 0)[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
new_labels[fg_inds] = 1
new_labels[bg_inds] = 0
if 'rois' in roidb:
rois = roidb['rois'][fg_inds]
else:
rois = None
"""
print 'labels.shape %s' % labels.shape
print 'bbox_target.shape %s' % (bbox_target.shape, )
for fg_ind in fg_inds:
print 'label : %s ' % labels[fg_ind]
print 'bbox_target : %s ' % bbox_target[fg_ind]
axis1 = fg_ind / conv_height / conv_width
axis2 = fg_ind / conv_width % conv_height
axis3 = fg_ind % conv_width
im = cv2.imread(roidb['image'])
target_size = cfg.TRAIN.SCALES[0]
im, im_scale = prep_im_for_blob(im, 0, target_size,
cfg.TRAIN.MAX_SIZE,
cfg.TRAIN.MIN_SIZE)
img_height = im.shape[2]
img_width = im.shape[3]
proposal_rects = get_img_rect(img_height, img_width, conv_height, conv_width, axis1, axis2, axis3)
for proposal_rect in proposal_rects:
plt.imshow(im)
for ground_rect in ground_rects:
plt.gca().add_patch(
plt.Rectangle((ground_rect[0], ground_rect[1]), ground_rect[2] - ground_rect[0],
ground_rect[3] - ground_rect[1], fill=False,
edgecolor='b', linewidth=3)
)
plt.gca().add_patch(
plt.Rectangle((proposal_rect[0], proposal_rect[1]), proposal_rect[2] - proposal_rect[0],
proposal_rect[3] - proposal_rect[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.gca().add_patch(
plt.Rectangle((pred_rect[0], pred_rect[1]), pred_rect[2] - pred_rect[0],
pred_rect[3] - pred_rect[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show(block=False)
raw_input("")
plt.close()
"""
new_bbox_target[fg_inds] = bbox_target[fg_inds]
new_bbox_target, bbox_loss_weights = \
_get_bbox_regression_labels_rpn(new_bbox_target,
num_classes, labels)
"""
print 'label no 1 : %s' % len(np.where(new_labels == 1)[0])
print 'new_bbox_target no 1 : %s' % len(np.where(new_bbox_target != 0)[0])
print 'bbox_loss_weights no 1 : %s' % len(np.where(bbox_loss_weights > 0)[0])
"""
new_labels = new_labels.reshape((1, 9, conv_height, conv_width))
new_bbox_target = new_bbox_target.reshape((1, 9, conv_height, conv_width, 4))
new_bbox_target = new_bbox_target.transpose(0, 1, 4, 2, 3)
new_bbox_target = new_bbox_target.reshape((1, 36, conv_height, conv_width))
bbox_loss_weights = bbox_loss_weights.reshape((1, 9, conv_height, conv_width, 4))
bbox_loss_weights = bbox_loss_weights.transpose(0, 1, 4, 2, 3)
bbox_loss_weights = bbox_loss_weights.reshape((1, 36, conv_height, conv_width))
output_labels = np.zeros((1, 9, union_conv_height, union_conv_width))
output_bbox_targets = np.zeros((1, 36, union_conv_height, union_conv_width))
output_bbox_loss_weights = np.zeros((1, 36, union_conv_height, union_conv_width))
output_labels.fill(-1)
output_labels[:, :, 0:conv_height, 0:conv_width] = new_labels
output_bbox_targets[:, :, 0:conv_height, 0:conv_width] = new_bbox_target
output_bbox_loss_weights[:, :, 0:conv_height, 0:conv_width] = bbox_loss_weights
"""
for fg_ind in fg_inds:
if fg_ind == 6510:
axis1 = fg_ind / conv_height / conv_width
axis2 = fg_ind / conv_width % conv_height
axis3 = fg_ind % conv_width
print ''
print 'conv_size : %s, %s' % (conv_height, conv_width)
print 'axis : %s, %s, %s' % (axis1, axis2, axis3)
print 'output_labels[%s] : %s' % (fg_ind, output_labels[0, axis1, axis2, axis3])
print 'output_bbox_targets[%s] : %s' % (fg_ind, output_bbox_targets[0, axis1*4:axis1*4+4, axis2, axis3])
print 'output_bbox_loss_weights[%s] : %s' % (fg_ind, output_bbox_loss_weights[0, axis1*4:axis1*4+4, axis2, axis3])
"""
"""
# Generate positive rois based on index for debugging
anchors = [[128*2, 128*1], [128*1, 128*1], [128*1, 128*2],
[256*2, 256*1], [256*1, 256*1], [256*1, 256*2],
[512*2, 512*1], [512*1, 512*1], [512*1, 512*2]]
conv_scale_width = roidb['conv_scale_width']
conv_scale_height = roidb['conv_scale_height']
rois = np.zeros((len(fg_inds), 4), dtype=np.int16)
for i, fg_ind in enumerate(fg_inds):
center_x = fg_ind % conv_width
center_y = (fg_ind - center_x) / conv_width % conv_height
anchor = fg_ind / conv_height / conv_width
anchor_w = anchors[anchor][0]
anchor_h = anchors[anchor][1]
x1 = center_x * conv_scale_width - anchor_w / 2
y1 = center_y * conv_scale_height - anchor_h / 2
x2 = x1 + anchor_w
y2 = y1 + anchor_h
rois[i, :] = x1, y1, x2, y2
"""
"""
pos_labels = np.where(new_labels == 1)
i = 0
for d0, d1, d2, d3 in zip(pos_labels[0], pos_labels[1], pos_labels[2], pos_labels[3]):
print '[%s] label : %s, bbox_target : %s, bbox_loss_weights : %s' % (i, new_labels[d0, d1, d2, d3],
new_bbox_target[d0, d1*4 : d1*4+4, d2, d3],
bbox_loss_weights[d0, d1*4 : d1*4+4, d2, d3])
i += 1
"""
"""
print 'label no 2 : %s' % len(np.where(output_labels == 1)[0])
print 'new_bbox_target no 2 : %s' % len(np.where(output_bbox_targets != 0)[0])
print 'bbox_loss_weights no 2 : %s' % len(np.where(output_bbox_loss_weights > 0)[0])
"""
return output_labels, None, rois, output_bbox_targets, output_bbox_loss_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE,
cfg.TRAIN.MIN_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales, processed_ims
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _get_bbox_regression_labels_rpn(bbox_target_data, num_classes, labels):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
#print ''
#print 'len(inds) : %s' % len(inds)
for ind in inds:
bbox_targets[ind, :] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, :] = [1., 1., 1., 1.]
#print 'bbox_targets[ind, :] : %s - %s ' % (bbox_target_data[ind, 0], bbox_targets[ind, :])
return bbox_targets, bbox_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
def _vis_minibatch_rpn(im_blob, conv_h, conv_w, rois_blob, labels_blob, roidb, bbox_targets_blob, bbox_loss_blob):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(len(roidb)):
# DJDJ
#if roidb[i]['image'].endswith('000009.jpg') == False:
# continue
print 'image : %s' % roidb[i]['image']
resized_gt_boxes = roidb[int(i)]['resized_gt_boxes']
im = im_blob[i, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
for j in range(9):
for k in range(labels_blob.shape[2]):
for l in range(labels_blob.shape[3]):
label = labels_blob[i][j][k][l]
if label == -1:
continue
elif label == 1:
color = 'g'
elif label == 0:
#color = 'y'
continue
plt.imshow(im)
for resized_gt_box in resized_gt_boxes:
resized_gt_box = resized_gt_box.astype(np.int)
plt.gca().add_patch(
plt.Rectangle((resized_gt_box[0], resized_gt_box[1]), resized_gt_box[2] - resized_gt_box[0],
resized_gt_box[3] - resized_gt_box[1], fill=False,
edgecolor='b', linewidth=3)
)
proposal_rects = get_img_rect(im.shape[0], im.shape[1], conv_h, conv_w, j, k, l)
plt.gca().add_patch(
plt.Rectangle((proposal_rects[0], proposal_rects[1]), proposal_rects[2] - proposal_rects[0],
proposal_rects[3] - proposal_rects[1], fill=False,
edgecolor=color, linewidth=3)
)
plt.show(block=False)
raw_input("")
plt.close()
| mit | 2,063,657,557,521,492,200 | 40.619485 | 126 | 0.548209 | false |
Princeton-CDH/derrida-django | derrida/outwork/migrations/0001_initial.py | 1 | 1186 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 18:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('people', '0002_allow_neg_years_bc'),
('pages', '0004_auto_20170411_0504'),
]
operations = [
migrations.CreateModel(
name='Outwork',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pages.Page')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('orig_pubdate', models.DateField(blank=True, null=True, verbose_name='Original Publication Date')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.Person')),
],
options={
'ordering': ('_order',),
},
bases=('pages.page', models.Model),
),
]
| apache-2.0 | 759,727,622,976,371,800 | 34.939394 | 185 | 0.604553 | false |
phalcon/readthedocs.org | readthedocs/vcs_support/backends/hg.py | 1 | 3235 | import csv
from StringIO import StringIO
from projects.exceptions import ProjectImportError
from vcs_support.base import BaseVCS, VCSVersion
class Backend(BaseVCS):
supports_tags = True
supports_branches = True
fallback_branch = 'default'
def update(self):
super(Backend, self).update()
retcode = self.run('hg', 'status')[0]
if retcode == 0:
return self.pull()
else:
return self.clone()
def pull(self):
pull_output = self.run('hg', 'pull')
if pull_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg pull): %s"
% (self.repo_url, pull_output[0]))
)
update_output = self.run('hg', 'update', '-C')[0]
if update_output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg update): %s"
% (self.repo_url, pull_output[0]))
)
return update_output
def clone(self):
output = self.run('hg', 'clone', self.repo_url, '.')
if output[0] != 0:
raise ProjectImportError(
("Failed to get code from '%s' (hg clone): %s"
% (self.repo_url, output[0]))
)
return output
@property
def branches(self):
retcode, stdout = self.run('hg', 'branches', '-q')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_branches(stdout)
def parse_branches(self, data):
"""
stable
default
"""
names = [name.lstrip() for name in data.splitlines()]
return [VCSVersion(self, name, name) for name in names if name]
@property
def tags(self):
retcode, stdout = self.run('hg', 'tags')[:2]
# error (or no tags found)
if retcode != 0:
return []
return self.parse_tags(stdout)
def parse_tags(self, data):
"""
Parses output of show-ref --tags, eg:
tip 278:c4b2d21db51a
0.2.2 152:6b0364d98837
0.2.1 117:a14b7b6ffa03
0.1 50:30c2c6b3a055
"""
# parse the lines into a list of tuples (commit-hash, tag ref name)
raw_tags = csv.reader(StringIO(data), delimiter=' ')
vcs_tags = []
for row in raw_tags:
row = filter(lambda f: f != '', row)
if row == []:
continue
name, commit = row
if name == 'tip':
continue
revision, commit_hash = commit.split(':')
vcs_tags.append(VCSVersion(self, commit_hash, name))
return vcs_tags
def checkout(self, identifier=None):
super(Backend, self).checkout()
if not identifier:
identifier = 'tip'
retcode = self.run('hg', 'status')[0]
if retcode == 0:
self.run('hg', 'pull')
return self.run('hg', 'update', '-C', identifier)
else:
self.clone()
return self.run('hg', 'update', '-C', identifier)
| mit | -1,967,239,795,073,363,200 | 30.407767 | 75 | 0.4983 | false |
Crystalnix/house-of-life-chromium | chrome/test/functional/chromeos_security.py | 1 | 1213 | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional
import pyauto
class ChromeosSecurity(pyauto.PyUITest):
"""Security tests for chrome on ChromeOS.
Requires ChromeOS to be logged in.
"""
def ExtraChromeFlagsOnChromeOS(self):
"""Override default list of extra flags typicall used with automation.
See the default flags used with automation in pyauto.py.
Chrome flags for this test should be as close to reality as possible.
"""
return [
'--homepage=about:blank',
]
def testCannotViewLocalFiles(self):
"""Verify that local files cannot be accessed from the browser."""
urls_and_titles = {
'file:///': 'Index of /',
'file:///etc/': 'Index of /etc/',
self.GetFileURLForDataPath('title2.html'): 'Title Of Awesomeness',
}
for url, title in urls_and_titles.iteritems():
self.NavigateToURL(url)
self.assertNotEqual(title, self.GetActiveTabTitle(),
msg='Could access local file %s.' % url)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause | -6,535,854,804,755,249,000 | 27.880952 | 74 | 0.677659 | false |
sotlampr/personal-site | app/blog/views.py | 1 | 3882 | from datetime import datetime
from flask import abort, flash, request, render_template, redirect, url_for
from flask.ext.login import login_required, current_user
from sqlalchemy import desc
from . import blog
from .forms import PostForm, DeleteForm
from ..models import db, Post
@blog.route('/')
@blog.route('/index')
@blog.route('/page/<int:page>')
def index(page=1):
posts = (Post.query
.filter_by(is_published=True)
.order_by(desc(Post.timestamp))
.paginate(page, 3))
return render_template('blog/index.html', posts=posts)
@blog.route('/new', methods=['GET', 'POST'])
@login_required
def new():
form = PostForm(request.form)
if form.validate_on_submit():
post = Post(title=form.title.data, body=form.body.data,
timestamp=datetime.utcnow(),
user_id=int(current_user.get_id()),
is_published=form.is_published.data)
db.session.add(post)
db.session.commit()
if form.is_published.data:
flash("Post is now published.")
else:
flash("Post updated")
return redirect('blog/'+post.slug)
return render_template('blog/edit.html', form=form)
@blog.route('/<slug>/')
def show_post(slug):
post = Post.query.filter_by(slug=slug).first()
if post is not None:
if post.is_published:
return render_template('blog/post.html', post=post)
else:
if current_user.is_authenticated:
flash("This post is unpublished.")
return render_template('blog/post.html', post=post)
else:
abort(401)
else:
abort(404)
@blog.route('/<slug>/edit', methods=['GET', 'POST'])
@login_required
def edit_post(slug):
post = Post.query.filter_by(slug=slug).first()
if post is not None:
if request.method == 'GET':
form = PostForm(obj=post)
return render_template('blog/edit.html', form=form)
else:
form = PostForm(request.form)
post.title = form.title.data
post.body = form.body.data
post.is_published = form.is_published.data
post.user_id = current_user.get_id()
db.session.commit()
flash("Post updated.")
return redirect(url_for('blog.show_post', slug=post.slug))
else:
abort(404)
@blog.route('/<slug>/delete', methods=['GET', 'POST'])
@login_required
def delete_post(slug):
form = DeleteForm(request.form)
post = Post.query.filter_by(slug=slug).first()
if post is not None:
if form.validate_on_submit():
db.session.delete(post)
db.session.commit()
return redirect(url_for('blog.index'))
else:
return render_template("blog/delete.html", form=form)
else:
abort(404)
"""
@blog.route('/search/<search_terms>/')
def search():
return render_template('blog/search.html')
"""
@blog.route('/archive')
@blog.route('/archive/page/<int:page>')
def archive(page=1):
posts = (Post.query
.filter_by(is_published=True)
.order_by(desc(Post.timestamp))
.paginate(page, 10))
return render_template('blog/archive.html',
head_title="Blog Archive",
header_title="Archives",
posts=posts)
@blog.route('/unpublished')
@login_required
def show_unpublished():
posts = (Post.query
.filter_by(is_published=False)
.order_by(desc(Post.timestamp))
.paginate(1, 10))
return render_template('blog/archive.html',
head_title="Administration",
header_title="Unpublished posts",
posts=posts)
| mit | 6,123,719,996,085,297,000 | 29.328125 | 75 | 0.569809 | false |
b-jazz/stravamous | src/converter.py | 1 | 1697 | import logging
import os
import subprocess
def out_for_in(in_path):
return '{0}.gpx'.format(os.path.splitext(os.path.basename(in_path))[0])
class Converter(object):
def __init__(self, config, input_path):
self.config = config
self.logger = logging.getLogger()
self.input_file = input_path
self.output_file = os.path.expanduser(os.path.join(self.config.storage_root,
'converted_files',
out_for_in(self.input_file)))
self.logger.debug('Created converter object with input_file of {0} and output_file of {1}'.format(self.input_file, self.output_file))
self.gpx_text = None
def convert(self):
command = [self.config.gpsbabel_cmd,
'-i', 'garmin_fit',
'-f', self.input_file,
'-o', 'gpx,garminextensions',
'-F', self.output_file]
self.logger.debug('starting subprocess with command: {0}'.format(command))
try:
subprocess.call(command)
self.logger.debug('Happily done with the conversion. No exceptions.')
except Exception as exception:
self.logger.error('CONVERTER EXCEPTION: {0}'.format(exception))
# raise
else:
self.logger.debug('Opening {0} for read'.format(self.output_file))
try:
self.gpx_text = open(self.output_file, 'r').read()
except Exception as exception:
self.logger.error('open().read() exception: {0}, of type: {1}'.format(exception, exception.args))
raise
| lgpl-3.0 | -3,339,667,609,686,575,000 | 40.390244 | 141 | 0.551562 | false |
jesonyang001/qarepo | askbot/__init__.py | 1 | 3117 | """
:synopsis: the Django Q&A forum application
Functions in the askbot module perform various
basic actions on behalf of the forum application
"""
import os
import platform
VERSION = (0, 7, 51)
#keys are module names used by python imports,
#values - the package qualifier to use for pip
REQUIREMENTS = {
'akismet': 'akismet',
'avatar': 'django-avatar>=2.0',
'bs4': 'beautifulsoup4',
'coffin': 'Coffin>=0.3,<=0.3.8',
'compressor': 'django-compressor==1.2',
'django': 'django>=1.3.1,<1.6',
'django_countries': 'django-countries==1.0.5',
'djcelery': 'django-celery>=3.0.11',
'djkombu': 'django-kombu==0.9.4',
'followit': 'django-followit==0.0.7',
'html5lib': 'html5lib==0.90',
'jinja2': 'Jinja2',
'keyedcache': 'django-keyedcache',
'longerusername': 'longerusername',
'markdown2': 'markdown2',
'oauth2': 'oauth2',
'openid': 'python-openid',
'picklefield': 'django-picklefield==0.3.0',
'jwt': 'pyjwt',
'pystache': 'pystache==0.3.1',
'pytz': 'pytz==2013b',
'recaptcha_works': 'django-recaptcha-works',
'robots': 'django-robots',
'sanction': 'sanction==0.3.1',
'south': 'South>=0.7.1',
'threaded_multihost': 'django-threaded-multihost',
'tinymce': 'django-tinymce==1.5.1b2',
'unidecode': 'unidecode',
#'stopforumspam': 'stopforumspam'
}
if platform.system() != 'Windows':
REQUIREMENTS['lamson'] = 'Lamson'
#necessary for interoperability of django and coffin
try:
from askbot import patches
from askbot.deployment.assertions import assert_package_compatibility
assert_package_compatibility()
patches.patch_django()
patches.patch_coffin() # must go after django
except ImportError:
pass
def get_install_directory():
"""returns path to directory
where code of the askbot django application
is installed
"""
return os.path.dirname(__file__)
def get_path_to(relative_path):
"""returns absolute path to a file
relative to ``askbot`` directory
``relative_path`` must use only forward slashes
and must not start with a slash
"""
root_dir = get_install_directory()
assert(relative_path[0] != 0)
path_bits = relative_path.split('/')
return os.path.join(root_dir, *path_bits)
def get_version():
"""returns version of the askbot app
this version is meaningful for pypi only
"""
return '.'.join([str(subversion) for subversion in VERSION])
def get_database_engine_name():
"""returns name of the database engine,
independently of the version of django
- for django >=1.2 looks into ``settings.DATABASES['default']``,
(i.e. assumes that askbot uses database named 'default')
, and for django 1.1 and below returns settings.DATABASE_ENGINE
"""
import django
from django.conf import settings as django_settings
major_version = django.VERSION[0]
minor_version = django.VERSION[1]
if major_version == 1:
if minor_version > 1:
return django_settings.DATABASES['default']['ENGINE']
else:
return django_settings.DATABASE_ENGINE
| gpl-3.0 | -2,211,768,841,389,933,000 | 29.558824 | 73 | 0.655759 | false |
OmeGak/indico-plugins | vc_vidyo/indico_vc_vidyo/api/cache.py | 1 | 1207 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from suds.cache import Cache
from MaKaC.common.cache import GenericCache
DEFAULT_CACHE_TTL = 24 * 3600
class SudsCache(Cache):
_instance = None
def __init__(self, duration=DEFAULT_CACHE_TTL):
self._cache = GenericCache("SudsCache")
self._duration = duration
def get(self, key):
self._cache.get(key)
def put(self, key, val):
self._cache.set(key, val, self._duration)
def purge(self, key):
self._cache.delete(key)
| gpl-3.0 | 5,846,497,963,158,314,000 | 31.621622 | 78 | 0.710853 | false |
AndrewGoldstein/grasshopper | tests/test_models.py | 1 | 1954 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from grasshopper.user.models import Role, User
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
"""User tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = User('foo', '[email protected]')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = User(username='foo', email='[email protected]')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
user = User(username='foo', email='[email protected]')
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user = UserFactory(password='myprecious')
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
"""Check password."""
user = User.create(username='foo', email='[email protected]',
password='foobarbaz123')
assert user.check_password('foobarbaz123') is True
assert user.check_password('barfoobaz') is False
def test_full_name(self):
"""User full name."""
user = UserFactory(first_name='Foo', last_name='Bar')
assert user.full_name == 'Foo Bar'
def test_roles(self):
"""Add a role to a user."""
role = Role(name='admin')
role.save()
user = UserFactory()
user.roles.append(role)
user.save()
assert role in user.roles
| bsd-3-clause | 1,862,980,038,182,105,900 | 28.164179 | 63 | 0.589048 | false |
roboime/pyroboime | roboime/core/skills/gotolooking.py | 1 | 1250 | #
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
from .goto import Goto
class GotoLooking(Goto):
def __init__(self, robot, lookpoint=None, **kwargs):
"""
lookpoint: Where you want it to look, what were you expecting?
"""
super(GotoLooking, self).__init__(robot, **kwargs)
self._lookpoint = lookpoint
@property
def lookpoint(self):
if callable(self._lookpoint):
return self._lookpoint()
else:
return self._lookpoint
@lookpoint.setter
def lookpoint(self, value):
self._lookpoint = value
def _step(self):
#print self.lookpoint
self.angle = self.robot.angle_to_point(self.lookpoint)
super(GotoLooking, self)._step()
| agpl-3.0 | -3,651,936,077,924,898,000 | 31.051282 | 70 | 0.6704 | false |
rodluger/everest | docs/mcmc.py | 1 | 2721 | """MCMC example for transit fitting."""
import matplotlib.pyplot as pl
from everest import Everest, TransitModel
import numpy as np
import emcee
from tqdm import tqdm
from corner import corner
def lnprior(x):
"""Return the log prior given parameter vector `x`."""
per, t0, b = x
if b < -1 or b > 1:
return -np.inf
elif per < 7 or per > 10:
return -np.inf
elif t0 < 1978 or t0 > 1979:
return -np.inf
else:
return 0.
def lnlike(x, star):
"""Return the log likelihood given parameter vector `x`."""
ll = lnprior(x)
if np.isinf(ll):
return ll, (np.nan, np.nan)
per, t0, b = x
model = TransitModel('b', per=per, t0=t0, b=b, rhos=10.)(star.time)
like, d, vard = star.lnlike(model, full_output=True)
ll += like
return ll, (d,)
# Initialize the everest model
star = Everest(201635569)
# Set up the MCMC sampler
params = ['Period (days)', r't$_0$ (BJD - 2456811)', 'Impact parameter']
blobs = ['Depth (%)']
nsteps = 1000
nburn = 300
nwalk = 10
ndim = len(params)
nblobs = len(blobs)
sampler = emcee.EnsembleSampler(nwalk, ndim, lnlike, args=[star])
x0 = [[8.368 + 0.01 * np.random.randn(),
1978.4513 + 0.01 * np.random.randn(),
0. + 0.1 * np.random.randn()] for k in range(nwalk)]
blobs0 = [[0.] for k in range(nwalk)]
# Run!
for i in tqdm(sampler.sample(x0, iterations=nsteps, blobs0=blobs0),
total=nsteps):
pass
# Add the blobs to the chain for plotting
chain = np.concatenate((sampler.chain,
np.array(sampler.blobs).swapaxes(0, 1)), axis=2)
# Re-scale the transit time for prettier axes labels
chain[:, :, 1] -= 1978.
# Take the absolute value of the impact parameter for plotting
chain[:, :, 2] = np.abs(chain[:, :, 2])
# Re-scale the transit depth as a percentage
chain[:, :, 3] *= 100.
# Plot the chains
fig1, ax = pl.subplots(ndim + nblobs, figsize=(6, 7))
fig1.suptitle("K2-14b", fontsize=16, fontweight='bold')
ax[-1].set_xlabel("Iteration", fontsize=14)
for n in range(ndim + nblobs):
for k in range(nwalk):
ax[n].plot(chain[k, :, n], alpha=0.3, lw=1)
ax[n].set_ylabel((params + blobs)[n], fontsize=9)
ax[n].margins(0, None)
ax[n].axvline(nburn, color='b', alpha=0.5, lw=1, ls='--')
fig1.savefig("k2-14b_chains.png", bbox_inches='tight')
# Plot the posterior distributions
samples = chain[:, nburn:, :].reshape(-1, ndim + nblobs)
fig2 = corner(samples, labels=params + blobs)
fig2.suptitle("K2-14b", fontsize=16, fontweight='bold')
fig2.set_size_inches(6, 6)
for ax in fig2.axes:
for tick in ax.get_xticklabels() + ax.get_yticklabels():
tick.set_fontsize(7)
fig2.savefig("k2-14b_corner.png", bbox_inches='tight')
| mit | 202,969,213,362,555,870 | 28.901099 | 72 | 0.632488 | false |
cxcsds/ciao-contrib | crates_contrib/images.py | 1 | 4630 | #
# Copyright (C) 2012, 2015, 2016, 2019
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Image-specific Crates routines.
At present there is only one routine - imextent.
"""
from pytransform import LINEAR2DTransform
__all__ = ('imextent', )
def imextent(img, xmin, xmax, ymin, ymax, limits='center'):
"""Create a linear transform for the image axes.
Returns a 2D linear transform object that represents the
mapping from "pixel" units (e.g. logical values) to
a linearly scaled system (offset and scale change, no
rotation). One use of this is to mimic the extent
argument from matplotlib's imshow command, as discussed
in the examples below.
Parameters
----------
img : 2D NumPy array
xmin, xmax, ymin, ymax : float
The coordinates of the lower-left and upper-right
corners of the image in the transformed (non-logical)
system.
limits : {'center', 'edge'}
Do the coordinates (xmin, ..., ymax) refer to the
center of the pixels, or their edges. In FITS convention,
the bottom-left pixel is centered on 1,1 and the top-right
pixel is nx,ny (for a nx by ny grid). With limits='center'
xmin,xmax refers to the center of the lower-left pixel
(i.e. 1,1 in FITS terminology) whereas with limits='edge'
it refers to the bottom-left corner (0.5,0.5 in FITS).
Returns
-------
tr : pytransform.LINEAR2DTransform
The transform object containing the coordinate mapping.
Notes
-----
The logical coordinate system follows the FITS standard, so the
first pixel is (1,1) and not (0,0), and the X axis values are
given first.
Examples
--------
The following example creates a 40 pixel wide by 20 pixel high
image, zi, where the X axis goes from 40 to 60 and the Y
axis 10 to 20. The imextent call creates a transform object.
>>> yi, xi = np.mgrid[10:20:20j, 40:60:40j]
>>> zi = 100.0 / np.sqrt((xi - 45.62) ** 2 + (yi - 14.7) ** 2)
>>> tr = imextent(zi, 40, 60, 10, 20)
The transform object can be used to convert between logical
coordinates (where 1,1 refers to the center of the lower-left
pixel) and the data coordinates:
>>> print(tr.apply([[1,1], [40,20]]))
[[40 10]
[60 20]]
and the invert method goes from data to logical coordinates:
>>> print(tr.invert([[45.0, 15.0]]))
[[ 10.75 10.5 ]]
The following examples use a 4 pixel by 3 pixel image:
>>> img = np.arange(0, 12).reshape(3, 4)
The default value for the limits argument is 'center', which
means that the given coordinates - in this case 10,-10 and
13,-6 - refer to the center of the bottom-left and top-right
pixels:
>>> tr_cen = imextent(img, 10, 13, -10, -6, limits='center')
The alternative is limits='edge', where 10,-10 refers to the
bottom-left corner of the image and 13,-6 refers to the
top-right corner:
>>> tr_edge = imextent(img, 10, 13, -10, -6, limits='edge')
>>> print(tr_cen.apply([[1.0, 1.0]]))
[[ 10. -10.]]
>>> print(tr_edge.apply([[1.0, 1.0]]))
[[ 10.375 -9.33333333]]
"""
try:
(ny, nx) = img.shape
except AttributeError:
raise ValueError("First argument has no shape attribute.")
dx = (xmax - xmin) * 1.0
dy = (ymax - ymin) * 1.0
if limits == 'center':
dx /= (nx - 1.0)
dy /= (ny - 1.0)
x0 = xmin - dx
y0 = ymin - dy
elif limits == 'edge':
dx /= nx
dy /= ny
x0 = xmin - dx / 2.0
y0 = ymin - dy / 2.0
else:
raise ValueError("limits must be 'center' or 'edge', not '{}'".format(limits))
tr = LINEAR2DTransform()
tr.get_parameter('ROTATION').set_value(0.0)
tr.get_parameter('SCALE').set_value([dx, dy])
tr.get_parameter('OFFSET').set_value([x0, y0])
return tr
| gpl-3.0 | 6,630,351,362,508,218,000 | 30.496599 | 86 | 0.630886 | false |
marinho/django-plus | djangoplus/test_utils.py | 1 | 3702 | """Utilities for test modules"""
import os, unittest, doctest
from django.core.serializers import deserialize
from django.db.models import get_apps
from django.test.simple import get_tests
from django.core import management
try:
set
except:
from sets import Set as set
def load_fixture(path, file_type='json'):
"""Load a fixture file"""
management.call_command('loaddata', path, verbosity=0)
#fp = file(path)
#cont = fp.read()
#fp.close()
#
#for obj in deserialize(file_type, cont):
# obj.save()
def model_has_fields(model_class, fields):
"""Checks if a model class has all fields in fields list and returns a
list of fields that aren't in one of them.
This method returns an empty list ( [] ) when everything is ok"""
fields = set(fields)
model_fields = set(
[f.name for f in model_class._meta.fields]+\
[f.name for f in model_class._meta.many_to_many]
)
return list(fields - model_fields)
def assert_model_has_fields(model_class, fields):
if model_has_fields(model_class, fields):
return fields
def is_model_class_fk(model_class_from, field, model_class_to):
"""Returns True if field is ForeignKey to model class informed"""
return issubclass(
model_class_from._meta.get_field_by_name(field)[0].rel.to,
model_class_to,
)
def is_field_type(model_class_from, field, field_type, **kwargs):
"""Checks if a field of a model class if of the type informed.
If field_type value is a class, it compares just the class of field,
if field_type is an instance of a field type class, it compares the
max_length, max_digits and decimal_places, blank and null"""
field = model_class_from._meta.get_field_by_name(field)[0]
if field.__class__ != field_type:
return False
for k,v in kwargs.items():
if k == 'to':
if v != field.rel.to:
raise Exception('%s: %s'%(k, unicode(field.rel.to)))
elif v != getattr(field, k, None):
raise Exception('%s: %s'%(k, unicode(getattr(field, k, None))))
return True
def is_model_pk(model_class, field):
"""Checks if a field is the primary key of the model class"""
return model_class._meta.pk.name == field
def url_status_code(url, status_code=200, content=None, client=None, return_response=False):
"""Checks if the informed URL returns the wanted status_code"""
if not client:
from django.test.client import Client
client = Client()
resp = client.get(url)
if return_response:
return resp
ret = True
if status_code and status_code == resp.status_code:
ret = ret and True
if content and content == resp.status_code:
ret = ret and True
return ret
def assert_equal(arg1, arg2):
"""Returns the arguments if any of them is different to the others, otherelse, returns empty."""
if arg1 != arg2:
print arg1
print '<>'
print arg2
def assert_equal_numbers(arg1, arg2):
"""Does the same of assert_equal but converts both to float to ensure they are in the same
value type - as a number."""
assert_equal(float(arg1), float(arg2))
def assert_between(arg1, arg2, arg3):
"""Makes assertation, printing the values if the first is not greater or equal the second
one and lower or equal to the third onde."""
if arg1 < arg2 or arg2 > arg3:
print '%s is not between %s and %s'%(arg1, arg2, arg3)
def assert_true(arg1):
if not bool(arg1):
print '%s is not a True value'%arg1
def assert_false(arg1):
if bool(arg):
print '%s is not a False value'%arg1
| lgpl-3.0 | 1,829,607,644,906,898,000 | 30.641026 | 100 | 0.641545 | false |
nburn42/tensorflow | tensorflow/python/keras/optimizers.py | 1 | 29041 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.util.tf_export import tf_export
def clip_norm(g, c, n):
"""Clip a tensor by norm.
Arguments:
g: gradient tensor to clip.
c: clipping threshold.
n: norm of gradient tensor.
Returns:
Clipped gradient tensor.
"""
if c > 0:
condition = n >= c
then_expression = lambda: math_ops.scalar_mul(c / n, g)
else_expression = lambda: g
# saving the shape to avoid converting sparse tensor to dense
if isinstance(g, ops.Tensor):
g_shape = copy.copy(g.get_shape())
elif isinstance(g, ops.IndexedSlices):
g_shape = copy.copy(g.dense_shape)
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
g = control_flow_ops.cond(condition, then_expression, else_expression)
if isinstance(g, ops.Tensor):
g.set_shape(g_shape)
elif isinstance(g, ops.IndexedSlices):
g._dense_shape = g_shape # pylint: disable=protected-access
return g
@tf_export('keras.optimizers.Optimizer')
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(
sum([math_ops.reduce_sum(math_ops.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError(
'Length of the specified weight list (' + str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError(
'Optimizer weight shape ' + str(pv.shape) + ' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@tf_export('keras.optimizers.SGD')
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD
in the relevant direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.RMSprop')
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adagrad')
class Adagrad(Optimizer):
"""Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adadelta')
class Adadelta(Optimizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adam')
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
t = math_ops.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Adamax')
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
t = math_ops.cast(self.iterations, K.floatx()) + 1
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.optimizers.Nadam')
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
t = math_ops.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (
1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer): # pylint: disable=super-init-not-called
self.optimizer = optimizer
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribute_lib.has_distribution_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
self.updates = [state_ops.assign_add(self.iterations, 1)]
if not params:
return self.updates
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@tf_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@tf_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping
names (strings) to custom objects
(classes and functions)
to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'sgd': SGD,
'rmsprop': RMSprop,
'adagrad': Adagrad,
'adadelta': Adadelta,
'adam': Adam,
'adamax': Adamax,
'nadam': Nadam,
'tfoptimizer': TFOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@tf_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
- TensorFlow Optimizer instance
(it will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
# Wrap TF optimizer instances
if isinstance(identifier, tf_optimizer_module.Optimizer):
return TFOptimizer(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
if isinstance(identifier, Optimizer):
return identifier
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| apache-2.0 | -7,980,310,125,156,015,000 | 33.125734 | 80 | 0.621811 | false |
lintrace/GravityFalls3 | gravity_falls.py | 1 | 1529 | #!/usr/bin/python3
"""
Главный файл для запуска
Решение шифрограмм из книги "Дневник Гравити Фоллз 3"
"""
import atbash_chiper, caesar_cipher, vigenere_cipher
print('='*80)
print('Зашифровано шифром Цезаря, см. коментарии к строкам выше (место в книге)')
print('='*80)
for line in open('caesar.txt'):
if line[0] == '#':
print('-' * 80,'\n',line)
continue
if line[-1]=='\n':
line = line[:-1]
print('Из дневника:\t{0}\nРасшифровано:\t{1}\n'.format(line, caesar_cipher.caesar_dec(line, 23)), end ='')
print('='*80)
print('Текст из Дневника 3 Гравити Фоллз (самый длинный),\nхранился в шкатулке с нацарапанным на крышке словом "ПАЙНС"')
print('Текст зашифрован шифром Виженера, а слово "ПАЙНС" является паролем')
print('='*80)
print(vigenere_cipher.vigenere_file_dec('ПАЙНС.txt','ПАЙНС'))
print('='*80,'\n\n\n')
print('='*80)
print('### В книге встречается подсказка к расшифровке инопланетного шифра - закорючек\nСудя по всему, каждой закорючке соответствует своя цифра,\nкоторая соответствует номеру буквы в алфавите (шифр А1Я33)')
| gpl-3.0 | 8,400,138,516,059,571,000 | 34.766667 | 207 | 0.689655 | false |
Azure/azure-sdk-for-python | sdk/automation/azure-mgmt-automation/azure/mgmt/automation/operations/_job_operations.py | 1 | 30361 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class JobOperations(object):
"""JobOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.automation.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_output(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""Retrieve the job output identified by job name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The name of the job to be created.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "text/plain"
# Construct URL
url = self.get_output.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_output.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}/output'} # type: ignore
def get_runbook_content(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""Retrieve the runbook content of the job identified by job name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "text/powershell"
# Construct URL
url = self.get_runbook_content.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_runbook_content.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}/runbookContent'} # type: ignore
def suspend(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Suspend the job identified by job name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.suspend.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
suspend.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}/suspend'} # type: ignore
def stop(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Stop the job identified by jobName.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.stop.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}/stop'} # type: ignore
def get(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Job"
"""Retrieve the job identified by job name.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}'} # type: ignore
def create(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
parameters, # type: "_models.JobCreateParameters"
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Job"
"""Create a job of the runbook.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param parameters: The parameters supplied to the create job operation.
:type parameters: ~azure.mgmt.automation.models.JobCreateParameters
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Job, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.Job
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Job"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'JobCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Job', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}'} # type: ignore
def list_by_automation_account(
self,
resource_group_name, # type: str
automation_account_name, # type: str
filter=None, # type: Optional[str]
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.JobListResultV2"]
"""Retrieve a list of jobs.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobListResultV2 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.automation.models.JobListResultV2]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobListResultV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_automation_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('JobListResultV2', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_automation_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs'} # type: ignore
def resume(
self,
resource_group_name, # type: str
automation_account_name, # type: str
job_name, # type: str
client_request_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Resume the job identified by jobName.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_name: The job name.
:type job_name: str
:param client_request_id: Identifies this specific client request.
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.resume.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters['clientRequestId'] = self._serialize.header("client_request_id", client_request_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resume.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/jobs/{jobName}/resume'} # type: ignore
| mit | -6,312,953,112,437,856,000 | 48.609477 | 232 | 0.638319 | false |
thomas-schmid-ubnt/avocado | selftests/unit/test_restclient_response.py | 1 | 2038 | import unittest
from avocado.core.restclient import response
class ResultResponseTest(unittest.TestCase):
GOOD_DATA = ('{"count": 1, "next": null, "previous": null, '
'"results": [ { "name": "unknown" } ] }')
BAD_DATA_JSON = '{"count": 1'
BAD_DATA_COUNT = ('{"counter": 1, "next": null, "previous": null, '
'"results": [ { "name": "unknown" } ] }')
BAD_DATA_NEXT = ('{"count": 1, "NEXT": null, "previous": null, '
'"results": [ { "name": "unknown" } ] }')
BAD_DATA_PREVIOUS = ('{"count": 1, "next": null, "prev": null, '
'"results": [ { "name": "unknown" } ] }')
BAD_DATA_RESULTS = '{"count": 1, "next": null, "prev": null}'
def test_good_data(self):
r = response.ResultResponse(self.GOOD_DATA)
self.assertEquals(r.count, 1)
def test_bad_data_json(self):
self.assertRaises(response.InvalidJSONError,
response.ResultResponse,
self.BAD_DATA_JSON)
def test_bad_data_empty(self):
self.assertRaises(response.InvalidJSONError,
response.ResultResponse, '')
def test_bad_data_count(self):
self.assertRaises(response.InvalidResultResponseError,
response.ResultResponse,
self.BAD_DATA_COUNT)
def test_bad_data_next(self):
self.assertRaises(response.InvalidResultResponseError,
response.ResultResponse,
self.BAD_DATA_NEXT)
def test_bad_data_previous(self):
self.assertRaises(response.InvalidResultResponseError,
response.ResultResponse,
self.BAD_DATA_PREVIOUS)
def test_bad_data_results(self):
self.assertRaises(response.InvalidResultResponseError,
response.ResultResponse,
self.BAD_DATA_RESULTS)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,398,491,293,295,591,000 | 33.542373 | 71 | 0.539745 | false |
kmike/opencorpora-tools | tests/test_corpora.py | 1 | 10519 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
import tempfile
import shutil
from collections import OrderedDict
from opencorpora.reader import CorpusReader
TEST_DATA = os.path.join(os.path.dirname(__file__), 'annot.corpus.xml')
class BaseTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
cache_filename = os.path.join(self.temp_dir, 'corpora.cache')
self.corpus = CorpusReader(TEST_DATA, cache_filename=cache_filename)
def tearDown(self):
shutil.rmtree(self.temp_dir)
class CorporaTest(BaseTest):
def test_document_meta(self):
self.assertEqual(self.corpus.catalog(), [
('1', '"Частный корреспондент"'),
('2', '00021 Школа злословия'),
('3', '00022 Последнее восстание в Сеуле'),
('4', '00023 За кота - ответишь!'),
])
self.assertEqual(self.corpus.catalog('Тема:ЧасКор:Культура*'), [
('3', '00022 Последнее восстание в Сеуле'),
('4', '00023 За кота - ответишь!'),
])
self.assertEqual(self.corpus.fileids(), ['1', '2', '3', '4'])
def test_annotation_info(self):
self.assertEqual(
self.corpus.get_annotation_info(),
{"version": "0.12", "revision": "4579844"}
)
def test_raw_loading(self):
loaded_raw = self.corpus._get_doc_by_raw_offset(3)
loaded_line = self.corpus._get_doc_by_line_offset(3) # this is reliable
self.assertEqual(loaded_raw, loaded_line)
def test_single_doc_xml(self):
xml = self.corpus._document_xml(3)
tokens = xml.findall('paragraphs//token')
self.assertEqual(tokens[17].get('text'), 'арт-группы')
def test_doc_xml(self):
doc = self.corpus.documents()[2]
words = doc.words()
self.assertTrue(words)
self.assertEqual(words[17], 'арт-группы')
def test_docs_slicing(self):
docs = self.corpus.documents([1, 2])
self.assertEqual(len(docs), 2)
self.assertEqual(docs[0].raw(), self.corpus.get_document(1).raw())
self.assertEqual(docs[1].raw(), self.corpus.get_document(2).raw())
def test_titles(self):
titles = [doc.title() for doc in self.corpus.iter_documents()]
catalog_titles = list(OrderedDict(self.corpus.catalog()).values())
self.assertEqual(titles, catalog_titles)
def test_words(self):
words = self.corpus.words()
self.assertEqual(len(words), 2358)
# check some random tokens
self.assertEqual(words[344], 'социально-исторического')
self.assertEqual(words[430], ':')
self.assertEqual(words[967], 'Школа')
self.assertEqual(words[2225], '«')
self.assertEqual(words[2322], 'крэнк')
def test_words_slicing(self):
words2 = self.corpus.words('2')
self.assertEqual(len(words2), 1027)
words23 = self.corpus.words([2, 3])
self.assertEqual(len(words23), 1346)
words24 = self.corpus.words(['2', '4'])
self.assertEqual(len(words24), 2039)
words234 = self.corpus.words(['2', '3', '4'])
self.assertEqual(len(words234), 2039+(1346-1027))
def test_tagged_words(self):
words = self.corpus.tagged_words()
self.assertEqual(len(words), len(self.corpus.words()))
self.assertEqual(words[967], ('Школа', 'NOUN,inan,femn,sing,nomn'))
def test_parsed_words(self):
words = self.corpus.parsed_words()
self.assertEqual(len(words), len(self.corpus.words()))
self.assertEqual(words[967], ('Школа', [('школа', 'NOUN,inan,femn,sing,nomn')]))
def test_tagged_words_slicing(self):
words = self.corpus.tagged_words('3')
self.assertEqual(len(words), len(self.corpus.words('3')))
self.assertEqual(words[17], ('арт-группы', 'NOUN,inan,femn,plur,accs'))
def test_parsed_words_slicing(self):
words = self.corpus.parsed_words('3')
self.assertEqual(len(words), len(self.corpus.words('3')))
self.assertEqual(words[17], ('арт-группы', [
('арт-группа', 'NOUN,inan,femn,plur,accs'),
('арт-группа', 'NOUN,inan,femn,plur,nomn'),
('арт-группа', 'NOUN,inan,femn,sing,gent'),
]))
def test_paras(self):
paras = self.corpus.paras()
self.assertEqual(len(paras), 41)
for para in paras:
self.assertTrue(len(para))
def test_paras_slicing(self):
paras = self.corpus.paras(['3'])
self.assertEqual(len(paras), 6)
paras = self.corpus.paras(categories=['Автор:Яна Сарно'])
self.assertEqual(len(paras), 6)
def test_sents(self):
sents = self.corpus.sents()
self.assertEqual(len(sents), 102)
for sent in sents:
self.assertTrue(len(sent))
def test_sents_slicing(self):
sents = self.corpus.sents(['2', '3'])
self.assertEqual(len(sents), 58)
sents = self.corpus.sents(categories=['Автор:Яна Сарно'])
self.assertEqual(len(sents), 14)
sents = self.corpus.sents(categories='Автор:Яна Сарно')
self.assertEqual(len(sents), 14)
def test_raw(self):
raw = self.corpus.raw(categories='Автор:Яна Сарно')
self.assertEqual(len(raw), 2053)
self.assertTrue('биеннале' in raw)
self.assertEqual(raw, self.corpus.raw(3))
class CategoriesTest(BaseTest):
def test_categories(self):
cats = self.corpus.categories()
self.assertEqual(cats, [
'url:http://www.chaskor.ru',
'url:http://www.chaskor.ru/article/poslednee_vosstanie_v_seule_22',
'url:http://www.chaskor.ru/article/shkola_zlosloviya_uchit_prikusit_yazyk_21',
'url:http://www.chaskor.ru/article/za_kota_-_otvetish_23',
'Автор:Валентин Колесников',
'Автор:Роман Арбитман',
'Автор:Яна Сарно',
'Год:2008',
'Дата:12/08',
'Дата:24/09',
'Дата:25/08',
'Тема:ЧасКор:Культура',
'Тема:ЧасКор:Культура/Изобразительное искусство',
'Тема:ЧасКор:Культура/Масскульт',
'Тема:ЧасКор:Медиа',
'Тема:ЧасКор:Медиа/ТВ и радио',
'Тип:Газета'
])
def test_categories_fileids(self):
cats = self.corpus.categories(1)
self.assertEqual(cats, [
'url:http://www.chaskor.ru',
'Тип:Газета',
])
cats = self.corpus.categories([1, 3])
self.assertEqual(cats, [
'url:http://www.chaskor.ru',
'url:http://www.chaskor.ru/article/poslednee_vosstanie_v_seule_22',
'Автор:Яна Сарно',
'Год:2008',
'Дата:12/08',
'Тема:ЧасКор:Культура',
'Тема:ЧасКор:Культура/Изобразительное искусство',
'Тип:Газета',
])
def test_fileids_categories(self):
ids = self.corpus.fileids(['Тип:Газета'])
self.assertEqual(ids, ['1'])
ids = self.corpus.fileids(categories=['Автор:*']) # docs ids with authors
self.assertEqual(ids, ['2', '3', '4'])
def test_categories_patterns(self):
cats = self.corpus.categories([1, 3], ['Автор:*', 'Тема:*'])
self.assertEqual(cats, [
'Автор:Яна Сарно',
'Тема:ЧасКор:Культура',
'Тема:ЧасКор:Культура/Изобразительное искусство',
])
class DocumentTest(BaseTest):
def test_words(self):
self.assertEqual(self.corpus.get_document(1).words(), [])
words = self.corpus.get_document(2).words()
self.assertEqual(len(words), 1027)
self.assertEqual(words[9], 'градус')
def test_raw_sents(self):
sents = self.corpus.get_document(2).raw_sents()
self.assertEqual(len(sents), 44)
self.assertEqual(sents[1], 'Сохранится ли градус дискуссии в новом сезоне?')
class TaggedWordsTest(BaseTest):
def assertTaggedAreTheSame(self, obj):
words, tagged_words = obj.words(), obj.tagged_words()
for word, tagged_word in zip(words, tagged_words):
self.assertEqual(word, tagged_word[0])
self.assertEqual(len(words), len(tagged_words))
def test_corpus(self):
words = self.corpus.tagged_words()
self.assertEqual(words[:2], [
('«', 'PNCT'),
('Школа', 'NOUN,inan,femn,sing,nomn'),
])
self.assertTaggedAreTheSame(self.corpus)
def test_document(self):
doc = self.corpus.get_document(2)
words = doc.tagged_words()
self.assertEqual(words[:2], [
('«', 'PNCT'),
('Школа', 'NOUN,inan,femn,sing,nomn'),
])
self.assertTaggedAreTheSame(doc)
class ParsedWordsTest(BaseTest):
def assertParsedAreTheSame(self, obj):
words, parsed_words = obj.words(), obj.parsed_words()
for word, parsed_word in zip(words, parsed_words):
self.assertEqual(word, parsed_word[0])
self.assertEqual(len(words), len(parsed_words))
def test_corpus(self):
words = self.corpus.parsed_words()
self.assertEqual(words[:2], [
('«', [('«', 'PNCT')]),
('Школа', [('школа', 'NOUN,inan,femn,sing,nomn')]),
])
self.assertParsedAreTheSame(self.corpus)
def test_document(self):
doc = self.corpus.get_document(2)
words = doc.parsed_words()
self.assertEqual(words[:2], [
('«', [('«', 'PNCT')]),
('Школа', [('школа', 'NOUN,inan,femn,sing,nomn')]),
])
self.assertParsedAreTheSame(doc)
| mit | -6,981,211,611,246,779,000 | 32.363014 | 90 | 0.588586 | false |
jackdpage/libxpxpy | docs/conf.py | 1 | 9182 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# libxpxpy documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 8 10:16:11 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'libxpxpy'
copyright = '2016, Jack Page'
author = 'Jack Page'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxpxpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'libxpxpy.tex', 'libxpxpy Documentation',
'Jack Page', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'libxpxpy', 'libxpxpy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'libxpxpy', 'libxpxpy Documentation',
author, 'libxpxpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 3,913,105,727,664,243,700 | 31.330986 | 79 | 0.705838 | false |
ucb-sejits/ctree | ctree/c/codegen.py | 1 | 6213 | """
Code generator for C constructs.
"""
from ctree.codegen import CodeGenVisitor
from ctree.c.nodes import Op
from ctree.types import codegen_type, get_suffix
from ctree.precedence import UnaryOp, BinaryOp, TernaryOp, Cast
from ctree.precedence import get_precedence, is_left_associative
from numbers import Number
from ctree.nodes import CommonCodeGen
class CCodeGen(CommonCodeGen):
"""
Manages generation of C code.
"""
def _requires_parentheses(self, parent, node):
"""
Returns node as a string, optionally with parentheses around it if
needed to enforce precendence rules.
"""
if isinstance(node, (UnaryOp, BinaryOp, TernaryOp)) and\
isinstance(parent, (UnaryOp, BinaryOp, TernaryOp, Cast)):
prec = get_precedence(node)
parent_prec = get_precedence(parent)
is_not_last_child = isinstance(parent, UnaryOp) or\
isinstance(parent, Cast) or\
(isinstance(parent, BinaryOp) and node is parent.left) or\
(isinstance(parent, TernaryOp) and node is not parent.elze)
assoc_left = is_left_associative(parent)
if (prec < parent_prec) or \
(prec == parent_prec and (assoc_left is not is_not_last_child)):
return True
return False
# -------------------------------------------------------------------------
# visitor methods
def visit_MultiNode(self, node):
return self._genblock(node.body, insert_curly_brackets=False, increase_indent=False)
def visit_FunctionDecl(self, node):
params = ", ".join(map(str, node.params))
s = []
for attrib in node.attributes:
s.append("__attribute__ (({}))".format(attrib))
if node.kernel:
s.append("__kernel")
if node.static:
s.append("static")
if node.inline:
s.append("inline")
s.append("%s %s(%s)" % (codegen_type(node.return_type), node.name, params))
if node.defn:
s.append("%s" % self._genblock(node.defn))
return " ".join(s)
def visit_UnaryOp(self, node):
op = self._parenthesize(node, node.op)
arg = self._parenthesize(node, node.arg)
if isinstance(node.op, (Op.PostInc, Op.PostDec)):
return "%s %s" % (arg, op)
else:
return "%s %s" % (op, arg)
def visit_BinaryOp(self, node):
left = self._parenthesize(node, node.left)
right = self._parenthesize(node, node.right)
if isinstance(node.op, Op.ArrayRef):
return "%s[%s]" % (left, right)
else:
return "%s %s %s" % (left, node.op, right)
def visit_AugAssign(self, node):
return "%s %s= %s" % (node.target, node.op, node.value)
def visit_TernaryOp(self, node):
cond = self._parenthesize(node, node.cond)
then = self._parenthesize(node, node.then)
elze = self._parenthesize(node, node.elze)
return "%s ? %s : %s" % (cond, then, elze)
def visit_Cast(self, node):
value = self._parenthesize(node, node.value)
return "(%s) %s" % (codegen_type(node.type), value)
def visit_Constant(self, node):
if isinstance(node.value, str):
return "'%s'" % node.value[0]
else:
return str(node.value)
def visit_SymbolRef(self, node):
s = ""
if node._global:
s += "__global "
if node._local:
s += "__local "
if node._static:
s += "static "
if node._const:
s += "const "
if node.type is not None:
s += "%s " % codegen_type(node.type)
if node._restrict:
s += "restrict "
return "%s%s" % (s, node.name)
def visit_Block(self, node):
return self._genblock(node.body)
def visit_Return(self, node):
if node.value:
return "return %s" % node.value
else:
return "return"
def visit_If(self, node):
then = self._genblock(node.then)
if node.elze:
elze = self._genblock(node.elze)
return "if (%s) %s else %s" % (node.cond, then, elze)
else:
return "if (%s) %s" % (node.cond, then)
def visit_While(self, node):
body = self._genblock(node.body)
return "while (%s) %s" % (node.cond, body)
def visit_DoWhile(self, node):
body = self._genblock(node.body)
return "do %s while (%s)" % (body, node.cond)
def visit_For(self, node):
body = self._genblock(node.body)
s = ""
if node.pragma:
s += "#pragma %s\n" % node.pragma + self._tab()
return s + "for (%s; %s; %s) %s" % (node.init, node.test, node.incr, body)
def visit_FunctionCall(self, node):
args = ", ".join(map(str, node.args))
return "%s(%s)" % (node.func, args)
def visit_String(self, node):
return '"%s"' % '" "'.join(node.values)
def visit_CFile(self, node):
stmts = self._genblock(node.body, insert_curly_brackets=False, increase_indent=False)
return '// <file: %s>%s' % (node.get_filename(), stmts)
def visit_ArrayDef(self, node):
return "%s[%s] = " % (node.target, node.size) + self.visit(node.body)
def visit_Break(self, node):
return 'break'
def visit_Continue(self, node):
return 'continue'
def visit_Array(self, node):
return "{%s}" % ', '.join([i.codegen() for i in node.body])
def visit_Hex(self, node):
return hex(node.value) + get_suffix(node.ctype)
def visit_Number(self, node):
return str(node.value) + get_suffix(node.ctype)
def visit_Attribute(self, node):
s = self.visit(node.target)
return "{target} __attribute__({items})".format(target=s, items=", ".join(node.attributes))
def visit_Pragma(self, node):
stuff = self._genblock(node.body, insert_curly_brackets=node.braces)
if node.braces:
stuff = '\n\t'.join(stuff.split("\n"))
return '#pragma ' + node.pragma + '\n' + stuff
| bsd-2-clause | 4,827,430,226,630,825,000 | 33.325967 | 99 | 0.55062 | false |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_list.py | 3 | 5684 | import sys
from test import list_tests
import pickle
import unittest
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
self.assertEqual(list(x for x in range(10) if x % 2),
[1, 3, 5, 7, 9])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
list(sequence=[])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig):])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_reversed_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = reversed(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-1::-1])
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-2::-1])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), [])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 3,338,669,728,257,395,000 | 34.304348 | 77 | 0.521464 | false |
nuagenetworks/vspk-python | vspk/v6/nuegressauditacltemplate.py | 1 | 21660 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUEgressAuditACLEntryTemplatesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUEgressAuditACLTemplate(NURESTObject):
""" Represents a EgressAuditACLTemplate in the VSD
Notes:
An egress audit policy is a set of rules defining how network traffic is monitored and mirrored from a domain for Audit purposes
"""
__rest_name__ = "egressauditacltemplate"
__resource_name__ = "egressauditacltemplates"
## Constants
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PRIORITY_TYPE_TOP_AUDIT = "TOP_AUDIT"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a EgressAuditACLTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> egressauditacltemplate = NUEgressAuditACLTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'EgressAuditACLTemplate')
>>> egressauditacltemplate = NUEgressAuditACLTemplate(data=my_dict)
"""
super(NUEgressAuditACLTemplate, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._default_install_acl_implicit_rules = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._policy_state = None
self._creation_date = None
self._priority = None
self._priority_type = None
self._associated_live_entity_id = None
self._associated_virtual_firewall_policy_id = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_install_acl_implicit_rules", remote_name="defaultInstallACLImplicitRules", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=True)
self.expose_attribute(local_name="priority_type", remote_name="priorityType", attribute_type=str, is_required=False, is_unique=True, choices=[u'TOP_AUDIT'])
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_virtual_firewall_policy_id", remote_name="associatedVirtualFirewallPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.egress_audit_acl_entry_templates = NUEgressAuditACLEntryTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def default_install_acl_implicit_rules(self):
""" Get default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
return self._default_install_acl_implicit_rules
@default_install_acl_implicit_rules.setter
def default_install_acl_implicit_rules(self, value):
""" Set default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
self._default_install_acl_implicit_rules = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def priority_type(self):
""" Get priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
return self._priority_type
@priority_type.setter
def priority_type(self, value):
""" Set priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
self._priority_type = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_virtual_firewall_policy_id(self):
""" Get associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
return self._associated_virtual_firewall_policy_id
@associated_virtual_firewall_policy_id.setter
def associated_virtual_firewall_policy_id(self, value):
""" Set associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
self._associated_virtual_firewall_policy_id = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause | 4,749,001,960,045,595,000 | 32.792512 | 772 | 0.605263 | false |
mbartling/TAMU_senior_design | Python/gps2local.py | 1 | 1458 | #! /usr/bin/env python
from numpy import *
import sys
#p1 = [30.625621, -96.336753]
#p2 = [30.624388, -96.335755]
#p3 = [30.626050, -96.333368]
#p4 = [30.627195, -96.334945]
xres = 300
yres = 500
lat = []
lon = []
for line in sys.stdin:
line.strip()
(latS, lonS, dummy) = line.split(',')
lat.append(latS)
lon.append(lonS)
lat = array(lat, dtype='int32')
lon = array(lon, dtype='int32')
latmin = min(lat)
latmax = max(lat)
lonmin = min(lon)
lonmax = max(lon)
xlin = linspace(latmin,latmax,xres)
ylin = linspace(lonmin,lonmax,yres)
print xlin, ylin
#s1 = [30.625383, -96.336161]
#s2 = [30.624978, -96.335295]
#s3 = [30.625749, -96.334460]
#rssi1 = -16.2342
#rssi2 = -20.2342
#rssi3 = -22.2342
thresh = 0.15 #15% of minimum distance
#gamemap = zeros([len(xlin),len(ylin)])
#
#dx = xlin - s1[0]
#dy = ylin - s1[1]
#xloc = abs(dx) <= (1+thresh)*min(abs(dx)) #consider doing average
#yloc = abs(dy) <= (1+thresh)*min(abs(dy))
#gamemap[xloc,yloc] = rssi1
#
#dx = xlin - s2[0]
#dy = ylin - s2[1]
#xloc = abs(dx) <= (1+thresh)*min(abs(dx)) #consider doing average
#yloc = abs(dy) <= (1+thresh)*min(abs(dy))
#gamemap[xloc,yloc] = rssi2
#
#dx = xlin - s3[0]
#dy = ylin - s3[1]
#xloc = abs(dx) <= (1+thresh)*min(abs(dx)) #consider doing average
#yloc = abs(dy) <= (1+thresh)*min(abs(dy))
#gamemap[xloc,yloc] = rssi3
#
#temp = zeros([len(xlin),len(ylin)])
#mask = gamemap != 0
#temp[mask] =
#
| mit | -899,562,376,890,822,100 | 20.78125 | 66 | 0.593964 | false |
rasbt/python-machine-learning-book | code/optional-py-scripts/ch05.py | 1 | 19830 | # Sebastian Raschka, 2015 (http://sebastianraschka.com)
# Python Machine Learning - Code Examples
#
# Chapter 5 - Compressing Data via Dimensionality Reduction
#
# S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015.
# GitHub Repo: https://github.com/rasbt/python-machine-learning-book
#
# License: MIT
# https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from matplotlib.ticker import FormatStrFormatter
# for sklearn 0.18's alternative syntax
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.grid_search import train_test_split
from sklearn.lda import LDA
else:
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#############################################################################
print(50 * '=')
print('Section: Unsupervised dimensionality reduction'
' via principal component analysis')
print(50 * '-')
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
print('Wine data excerpt:\n\n:', df_wine.head())
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print('\nEigenvalues \n%s' % eigen_vals)
#############################################################################
print(50 * '=')
print('Section: Total and explained variance')
print(50 * '-')
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(1, 14), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
# plt.tight_layout()
# plt.savefig('./figures/pca1.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Feature Transformation')
print(50 * '-')
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis],
eigen_pairs[1][1][:, np.newaxis]))
print('Matrix W:\n', w)
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./figures/pca2.png', dpi=300)
plt.show()
print('Dot product:\n', X_train_std[0].dot(w))
#############################################################################
print(50 * '=')
print('Section: Principal component analysis in scikit-learn')
print(50 * '-')
pca = PCA()
X_train_pca = pca.fit_transform(X_train_std)
print('Variance explained ratio:\n', pca.explained_variance_ratio_)
plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./figures/pca3.png', dpi=300)
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./figures/pca4.png', dpi=300)
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
print('Explaind variance ratio:\n', pca.explained_variance_ratio_)
#############################################################################
print(50 * '=')
print('Section: Supervised data compression via linear discriminant analysis'
' - Computing the scatter matrices')
print(50 * '-')
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0))
print('MV %s: %s\n' % (label, mean_vecs[label - 1]))
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d)) # scatter matrix for each class
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter # sum class scatter matrices
print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1]))
print('Class label distribution: %s'
% np.bincount(y_train)[1:])
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0],
S_W.shape[1]))
mean_overall = np.mean(X_train_std, axis=0)
d = 13 # number of features
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train[y_train == i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1) # make column vector
mean_overall = mean_overall.reshape(d, 1) # make column vector
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1]))
#############################################################################
print(50 * '=')
print('Section: Selecting linear discriminants for the new feature subspace')
print(50 * '-')
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in decreasing order:\n')
for eigen_val in eigen_pairs:
print(eigen_val[0])
tot = sum(eigen_vals.real)
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
cum_discr = np.cumsum(discr)
plt.bar(range(1, 14), discr, alpha=0.5, align='center',
label='individual "discriminability"')
plt.step(range(1, 14), cum_discr, where='mid',
label='cumulative "discriminability"')
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
# plt.tight_layout()
# plt.savefig('./figures/lda1.png', dpi=300)
plt.show()
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real,
eigen_pairs[1][1][:, np.newaxis].real))
print('Matrix W:\n', w)
#############################################################################
print(50 * '=')
print('Section: Projecting samples onto the new feature space')
print(50 * '-')
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train == l, 0] * (-1),
X_train_lda[y_train == l, 1] * (-1),
c=c, label=l, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower right')
# plt.tight_layout()
# plt.savefig('./figures/lda2.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: LDA via scikit-learn')
print(50 * '-')
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./images/lda3.png', dpi=300)
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
# plt.tight_layout()
# plt.savefig('./images/lda4.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Implementing a kernel principal component analysis in Python')
print(50 * '-')
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
return X_pc
#############################################################################
print(50 * '=')
print('Section: Example 1: Separating half-moon shapes')
print(50 * '-')
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
# plt.tight_layout()
# plt.savefig('./figures/half_moon_1.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
# plt.tight_layout()
# plt.savefig('./figures/half_moon_2.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
# plt.tight_layout()
# plt.savefig('./figures/half_moon_3.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Example 2: Separating concentric circles')
print(50 * '-')
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
# plt.tight_layout()
# plt.savefig('./figures/circles_1.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
# plt.tight_layout()
# plt.savefig('./figures/circles_2.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
# plt.tight_layout()
# plt.savefig('./figures/circles_3.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Projecting new data points')
print(50 * '-')
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return alphas, lambdas
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[25]
print('New data point x_new:', x_new)
x_proj = alphas[25] # original projection
print('Original projection x_proj:', x_proj)
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
# projection of the "new" datapoint
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
print('Reprojection x_reproj:', x_reproj)
plt.scatter(alphas[y == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y == 1, 0], np.zeros((50)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black',
label='original projection of point X[25]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green',
label='remapped point X[25]', marker='x', s=500)
plt.legend(scatterpoints=1)
# plt.tight_layout()
# plt.savefig('./figures/reproject.png', dpi=300)
plt.show()
#############################################################################
print(50 * '=')
print('Section: Kernel principal component analysis in scikit-learn')
print(50 * '-')
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
# plt.tight_layout()
# plt.savefig('./figures/scikit_kpca.png', dpi=300)
plt.show()
| mit | -2,251,548,366,903,846,100 | 30.032864 | 79 | 0.595209 | false |
Dragon2fly/vpngate-with-proxy | old/config.py | 1 | 4265 | # -*- coding: utf-8 -*-
__author__ = 'duc_tin'
import ConfigParser
import re
import sys
def ctext(text, color):
""" Add color to printed text
:type text: str
:type color: str
"""
fcolor = {'p': '\033[95m', # purple
'b': '\033[94m', # blue
'g': '\033[92m', # green
'y': '\033[93m', # yellow
'r': '\033[91m', # red
'B': '\033[1m', # BOLD
'U': '\033[4m', # UNDERLINE
}
ENDC = '\033[0m'
tformat = ''.join([fcolor[fm] for fm in color])
return tformat + text + ENDC
def get_input(config_path, option):
if option[0] in ['c', 'config']:
proxy, port, sort_by, use_proxy, country, fix_dns = read_config(config_path)
while 1:
print ctext(' Current settings:', 'B')
print ctext(' 1. Proxy address:', 'yB'), proxy, ctext('\t2. port: ', 'yB'), port
print ctext(' 3. Sort servers by:', 'gB'), sort_by
print ctext(' 4. Use proxy:', 'rB'), use_proxy
print ctext(' 5. Country filter:', 'pB'), country
print ctext(' 6. Fix dns leaking:', 'bB'), fix_dns
user_input = raw_input('\nCommand or Enter to fetch server list: ')
if user_input == '':
print 'Process to vpn server list'
write_config(config_path, proxy, port, sort_by, use_proxy, country, fix_dns)
return
elif user_input == '1':
proxy = raw_input('Your http_proxy:')
elif user_input == '2':
user_input = 'abc'
while not user_input.strip().isdigit():
user_input = raw_input('Http proxy\'s port (eg: 8080): ')
port = user_input
elif user_input == '3':
while user_input not in ['speed', 'ping', 'score', 'up time', 'uptime']:
user_input = raw_input('Sort servers by (speed | ping | score | up time): ')
sort_by = 'up time' if user_input == 'uptime' else user_input
elif user_input == '4':
while user_input.lower() not in ['y', 'n', 'yes', 'no']:
user_input = raw_input('Use proxy to connect to vpn? (yes|no): ')
else:
use_proxy = 'no' if user_input in 'no' else 'yes'
elif user_input == '5':
while not re.match('^[a-z ]*$', user_input.lower().strip()):
user_input = raw_input('Country\'s name (eg: all(default), jp, japan):')
else:
country = 'all' if not user_input else user_input.lower()
elif user_input == '6':
while user_input.lower() not in ['y', 'n', 'yes', 'no']:
user_input = raw_input('Fix DNS:')
else:
fix_dns = 'no' if user_input in 'no' else 'yes'
elif user_input in ['q', 'quit', 'exit']:
print ctext('Goodbye'.center(40), 'gB')
sys.exit(0)
else:
print 'Invalid input'
else:
print 'Wrong argument. Do you mean "config"?'
def read_config(config_path):
parser = ConfigParser.SafeConfigParser()
parser.read(config_path)
use_proxy = parser.get('proxy', 'use proxy')
proxy = parser.get('proxy', 'address')
port = parser.get('proxy', 'port')
sort_by = parser.get('sort', 'key')
country = parser.get('country_filter', 'country')
fix_dns = parser.get('DNS_leak', 'fix_dns')
return proxy, port, sort_by, use_proxy, country, fix_dns
def write_config(config_path, proxy, port, parameter, use_proxy, country, fix_dns):
parser = ConfigParser.SafeConfigParser()
parser.add_section('proxy')
parser.set('proxy', 'use proxy', use_proxy)
parser.set('proxy', 'address', proxy)
parser.set('proxy', 'port', port)
parser.add_section('sort')
parser.set('sort', 'key', parameter)
parser.add_section('country_filter')
parser.set('country_filter', 'country', country)
parser.add_section('DNS_leak')
parser.set('DNS_leak', 'fix_dns', fix_dns)
with open(config_path, 'w+') as configfile:
parser.write(configfile) | gpl-2.0 | -464,604,793,038,710,300 | 35.775862 | 96 | 0.516061 | false |
gengwg/leetcode | 179_largest_number.py | 1 | 1445 | # -*- coding: utf-8 -*-
# 179. Largest Number
# Given a list of non negative integers,
# arrange them such that they form the largest number.
#
# For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
#
# Note: The result may be very large,
# so you need to return a string instead of an integer.
#
# Credits:
# Special thanks to @ts for adding this problem and creating all test cases.
#
# http://bookshadow.com/weblog/2015/01/13/leetcode-largest-number/
# 排序思路:
# 对于两个备选数字a和b,
# 如果str(a) + str(b) > str(b) + str(a),
# 则a在b之前,否则b在a之前
#
# 按照此原则对原数组从大到小排序即可
#
# 时间复杂度O(nlogn)
#
# 易错样例:
# Input: [0,0]
# Output: "00"
# Expected: "0"
class Solution:
# @param {integer[]} nums
# @return {string}
def largestNumber(self, nums):
nums = sorted([str(x) for x in nums], cmp=self.compare)
ans = ''.join(nums).lstrip('0')
return ans or '0'
def compare(self, a, b):
# 2nd [] is not a list. it is indexing: True = 1, False = 0
# [1, -1][1] == -1; [1, -1][1] == 1
# so this will sort a, b in reverted (descending) order
return [1, -1][a + b > b + a]
# equivalent to:
if a + b > b + a:
return -1
else:
return 1
if __name__ == '__main__':
print Solution().largestNumber([3, 30, 34, 5, 9])
| apache-2.0 | -4,658,683,190,685,438,000 | 25.098039 | 77 | 0.57701 | false |
alex-bauer/kelvin-power-challenge | src/features/features_saaf.py | 1 | 1680 | """Concatenate and resample SAAF data"""
import sys
import os
sys.path.append("../")
from utils.utils import *
folder = config.features_folder
if not os.path.exists(folder):
os.makedirs(folder)
def parse_saaf(filename, dropna=True):
df = pd.read_csv(config.data_folder + '/' + filename)
df = convert_time(df)
df = resample(df)
if dropna:
df = df.dropna()
return df
saaf_train1 = parse_saaf('/train_set/context--2008-08-22_2010-07-10--saaf.csv')
saaf_train2 = parse_saaf('/train_set/context--2010-07-10_2012-05-27--saaf.csv')
saaf_train3 = parse_saaf('/train_set/context--2012-05-27_2014-04-14--saaf.csv')
saaf_train = pd.concat([saaf_train1, saaf_train2, saaf_train3])
saaf_test = parse_saaf('/test_set/context--2014-04-14_2016-03-01--saaf.csv')
saaf_all = pd.concat([saaf_train, saaf_test])
#Binary indicator for solar conjunction
saaf_all['conjunction']=0
saaf_all.loc['2008-11-16':'2008-12-20','conjunction']=1
saaf_all.loc['2011-01-17':'2011-02-20','conjunction']=1
saaf_all.loc['2013-03-28':'2013-05-05','conjunction']=1
saaf_all.loc['2015-05-27':'2015-07-01','conjunction']=1
cols=['sa','sx','sy','sz']
#Averages over previous hours
for col in cols:
saaf_all[col+'_last_1']=saaf_all[col].shift(-1)
saaf_all[col+'_last_3']=saaf_all[col].rolling(3).mean()
saaf_all[col+'_last_24']=saaf_all[col].rolling(24).mean()
target = pd.read_pickle(config.data_folder + '/target.pkl')
target = target.join(saaf_all.reindex(target.index, method='nearest'))
saaf_all = target.drop(config.target_cols, axis=1)
saaf_all.fillna(method='ffill').fillna(method='bfill').to_pickle(config.features_folder + '/saaf.pkl')
print "Done." | mit | 8,595,800,424,774,629,000 | 27.982759 | 102 | 0.683333 | false |
heray1990/google-python-exercises | babynames/babynames.py | 1 | 2636 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
f = open(filename, 'rU')
file_strings = f.read()
nr_dict = {}
output_ls = []
tuples = re.findall(r'(Popularity in )(\d\d\d\d)', file_strings)
year = tuples[0][1]
output_ls.append(year)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>', file_strings)
for i in range(1,3):
for nr_tuple in tuples:
nr_dict[nr_tuple[i]] = nr_tuple[0]
for key in sorted(nr_dict.keys()):
output_ls.append(key + ' ' + nr_dict[key])
f.close()
return output_ls
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
for filename in args:
name_list = extract_names(filename)
if summary == False:
for name in name_list:
print name
else:
f = open(filename + '.summary', 'w')
for name in name_list:
f.write(name + '\n')
f.close()
if __name__ == '__main__':
main()
| apache-2.0 | 2,648,205,463,664,366,600 | 25.626263 | 82 | 0.647951 | false |
ryanjoneil/docker-image-construction | dicp/solvers/most_common.py | 1 | 1247 | from collections import defaultdict
class MostCommonHeuristic(object):
'''Heuristic that shares the most common command at any point'''
_slug = 'most-common'
def slug(self):
return MostCommonHeuristic._slug
def solve(self, problem, saver):
# Keep track of what hasn't been assigned and how many of each thing there are.
remaining = {i: set(problem.images[i]) for i in problem.images}
order = defaultdict(list)
self._assign(remaining, order)
saver(order)
def _assign(self, remaining, order):
if not remaining:
return
# Figure the most common command.
by_cmd = defaultdict(set)
for i, cmds in remaining.items():
for c in cmds:
by_cmd[c].add(i)
most_common = max(by_cmd, key=lambda p: len(by_cmd[p]))
# Add this to the schedule for any it applies to.
new_remain = {}
for i in by_cmd[most_common]:
order[i].append(most_common)
remaining[i].remove(most_common)
if remaining[i]:
new_remain[i] = set(remaining[i])
del remaining[i]
self._assign(new_remain, order)
self._assign(remaining, order)
| mit | 5,420,271,509,696,158,000 | 30.974359 | 87 | 0.588613 | false |
Balannen/LSMASOMM | atom3/Kernel/ErrorHandlers/exceptionStreamHook.py | 1 | 1233 | """
exceptionStreamHook.py
A custom stderr hook by Denis Dube, http://msdl.cs.mcgill.ca/people/denis/
"""
import tkMessageBox, sys
class exceptionStreamHook:
"""
This class pretends to be an open stderr file stream
Perhaps it should subclass File to be safer... but what would be the default
behaviour then??? Must I override each method on at a time? Bah, like anyone
uses that on stderr...
"""
def write( self, errorString ):
""" Simulates the write method of a file stream object """
# Send the error back where it belongs
sys.__stderr__.write( errorString )
#print ">"+errorString+"<"
# Dealing with a new exception
if( errorString[:9] == 'Exception' ):
tkMessageBox.showerror( 'Uncaught Exception',
'See console for details\n\n' + errorString )
def close( self, *args ): pass
def open( self, *args ): pass
def applyHook2stderr():
# Redirect error output stream to customized handler
sys.stderr = exceptionStreamHook()
if __name__ == '__main__':
print "Testing error redirect"
applyHook2stderr()
5/0
x=bs
print "Done" | gpl-3.0 | 469,603,371,962,662,600 | 23.729167 | 78 | 0.607461 | false |
aydoganserdar/python-rabbit-logging | setup.py | 1 | 1223 | from setuptools import setup, find_packages
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
]
INSTALL_REQUIRES = [
'pika==0.10.0'
]
setup(name='python-rabbitmq-logging',
version='0.0.3',
url='https://github.com/aydoganserdar/python-rabbit-logging',
description='Send logs to RabbitMQ from Python/Flask',
keywords='logging rabbitmq logs',
license='MIT',
author='Serdar AYDOGAN',
author_email="[email protected]",
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
extras_require={
'dev': ['check-manifest']
},
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=True)
| mit | -2,184,254,084,479,242,800 | 28.119048 | 67 | 0.629599 | false |
yuanming-hu/taichi | python/taichi/lang/shell.py | 1 | 1472 | import atexit
import functools
import os
import sys
from taichi.core.util import ti_core as _ti_core
import taichi as ti
try:
import sourceinspect as oinspect
except ImportError:
ti.warn('`sourceinspect` not installed!')
ti.warn(
'Without this package Taichi may not function well in Python IDLE interactive shell, '
'Blender scripting module and Python native shell.')
ti.warn('Please run `python3 -m pip install sourceinspect` to install.')
import inspect as oinspect
pybuf_enabled = False
_env_enable_pybuf = os.environ.get('TI_ENABLE_PYBUF', '1')
if not _env_enable_pybuf or int(_env_enable_pybuf):
# When using in Jupyter / IDLE, the sys.stdout will be their wrapped ones.
# While sys.__stdout__ should always be the raw console stdout.
pybuf_enabled = sys.stdout is not sys.__stdout__
_ti_core.toggle_python_print_buffer(pybuf_enabled)
def _shell_pop_print(old_call):
if not pybuf_enabled:
# zero-overhead!
return old_call
ti.info('Graphical python shell detected, using wrapped sys.stdout')
@functools.wraps(old_call)
def new_call(*args, **kwargs):
_taichi_skip_traceback = 1
ret = old_call(*args, **kwargs)
# print's in kernel won't take effect until ti.sync(), discussion:
# https://github.com/taichi-dev/taichi/pull/1303#discussion_r444897102
print(_ti_core.pop_python_print_buffer(), end='')
return ret
return new_call
| mit | 6,888,034,702,434,073,000 | 31 | 94 | 0.686821 | false |
folz/drop | docs/conf.py | 1 | 7698 | # -*- coding: utf-8 -*-
#
# drop documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'drop'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dropdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'drop.tex', u'drop Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'drop', u'drop Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'drop', u'drop Documentation',
u'ChangeToMyName', 'drop', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | -7,931,662,525,305,026,000 | 30.809917 | 80 | 0.70317 | false |
leeopop/2015-CS570-Project | lda_preprocess.py | 1 | 2532 | import csv
from loader import load_single_file
num_of_topics__criteria_for_cut_words = 20
num_of_appearance__criteria_for_cut_words = 10
output_num_of_words_per_topic = 50
def load_vocab():
vocab = {}
inverse_dict = load_single_file('keyword_table.csv')
num = max([int(x['unique']) for x in inverse_dict.values()]) + 1
for (key, val) in inverse_dict.items():
vocab[int(val['unique'])] = str(key)
return num, vocab
def check_line_is_useless(line, cut_topic, cut_word):
count = 0
for i in range(1,len(line)):
if int(line[i]) >= cut_word:
count+=1
if count >= cut_topic:
return True
return False
def load_lda(num_vocab, vocab, cut_topic, cut_word):
f = open("lda_output.txt","r")
line = f.readline()
line = line.split()
n_topics = len(line)-1
ret = []
removed_words = []
index = int(line[0])
for i in range(n_topics):
ret.append([0] * num_vocab)
if check_line_is_useless(line, cut_topic, cut_word):
print(vocab[index])
removed_words.append(vocab[index])
else:
for i in range(n_topics):
ret[i][index] = int(line[i+1])
for line in f:
line = line.split()
index = int(line[0])
if check_line_is_useless(line, cut_topic, cut_word):
print(vocab[index])
removed_words.append(vocab[index])
else:
for i in range(n_topics):
ret[i][index] = int(line[i+1])
for i in range(n_topics):
ret[i] = list(enumerate(ret[i]))
ret[i].sort(key=lambda item:item[1], reverse=True)
return removed_words, n_topics, ret
def write_csv(lda, vocab, removed_words, n_topic, n_word):
with open('lda.csv', 'w', encoding='utf-8') as writecsvfile:
writer = csv.writer(writecsvfile, delimiter=',', quotechar='|')
row = []
for i in range(n_topic):
row.append("topic" + str(i+1))
writer.writerow(row)
for i in range(n_word):
row = []
for j in range(n_topic):
row.append(vocab[lda[j][i][0]])
writer.writerow(row)
writer.writerow([])
removed_words.insert(0,'')
removed_words.insert(0,'removed_words')
writer.writerow(removed_words)
def main():
num_vocab,vocab = load_vocab()
print("reading vocabulary file finished!")
#remove_words = ['of', 'the', 'and', 'in', 'for', 'a', 'to', 'with', 'by', 'on','at', 'an']
removed_words, num_topic,lda = load_lda(num_vocab, vocab,
num_of_topics__criteria_for_cut_words,
num_of_appearance__criteria_for_cut_words)
print("processing lda file finished!")
write_csv(lda,vocab, removed_words, num_topic, output_num_of_words_per_topic)
print("writing lda file finished!")
if __name__ == '__main__':
main() | mit | 6,811,511,562,006,119,000 | 27.460674 | 92 | 0.651659 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/exploits/ZIBE/plugin_manager.py | 1 | 2051 | # uncompyle6 version 2.9.10
# Python bytecode 2.6 (62161)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: c:\Temp\build\ZIBE\plugin_manager.py
# Compiled at: 2013-02-27 18:02:46
from plugins import *
class PluginManager(object):
__plugins__ = {}
__handlers__ = {}
def __init__(self):
plugins = __import__('ZIBE').plugins
for member in plugins.__all__:
plugin_mod = getattr(plugins, member)
for type_name in dir(plugin_mod):
try:
t = getattr(plugin_mod, type_name)
if t.zibe_plugin is True:
self._add_plugin(t)
except AttributeError:
pass
def plugin_commands(self, plugin_name):
if self.__plugins__.has_key(plugin_name):
return self.__plugins__[plugin_name].get_command_handlers().keys()
return None
return None
def plugins(self):
ret = []
for p in self.__plugins__.keys():
ret.append((p, self.__plugins__[p].friendly_name))
return ret
def _add_plugin(self, t):
inst = t()
self.__plugins__[t.plugin_name] = inst
handlers = inst.get_command_handlers()
for k in handlers:
self.__handlers__[k] = handlers[k]
def handler_exists(self, cmd_name):
if self.__handlers__.has_key(cmd_name):
return True
return False
def get_handler_info(self, name):
if self.__handlers__.has_key(name) is False:
return None
return self.__handlers__[name]
def get_handler_func(self, cmd):
return self.__handlers__[cmd]['handler']
def invoke_cmd_handler(self, cmd, ctx, stdin, stdout, stderr, args):
if self.__handlers__.has_key(cmd):
record = self.__handlers__[cmd]
return record['handler'](stdin, stdout, stderr, ctx, args)
raise Exception('No command handler registered under that name') | unlicense | 1,598,297,665,892,283,400 | 32.096774 | 78 | 0.561677 | false |
alekz112/xlwings | xlwings/tests/test_xlwings.py | 1 | 33895 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import shutil
import pytz
import nose
from nose.tools import assert_equal, raises, assert_true, assert_false, assert_not_equal
from datetime import datetime, date
from xlwings import Application, Workbook, Sheet, Range, Chart, ChartType, RgbColor, Calculation
# Mac imports
if sys.platform.startswith('darwin'):
from appscript import k as kw
# TODO: uncomment the desired Excel installation or set to None for default installation
APP_TARGET = None
# APP_TARGET = '/Applications/Microsoft Office 2011/Microsoft Excel'
else:
APP_TARGET = None
# Optional dependencies
try:
import numpy as np
from numpy.testing import assert_array_equal
except ImportError:
np = None
try:
import pandas as pd
from pandas import DataFrame, Series
from pandas.util.testing import assert_frame_equal, assert_series_equal
except ImportError:
pd = None
# Test data
data = [[1, 2.222, 3.333],
['Test1', None, 'éöà'],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]]
test_date_1 = datetime(1962, 11, 3)
test_date_2 = datetime(2020, 12, 31, 12, 12, 20)
list_row_1d = [1.1, None, 3.3]
list_row_2d = [[1.1, None, 3.3]]
list_col = [[1.1], [None], [3.3]]
chart_data = [['one', 'two'], [1.1, 2.2]]
if np is not None:
array_1d = np.array([1.1, 2.2, np.nan, -4.4])
array_2d = np.array([[1.1, 2.2, 3.3], [-4.4, 5.5, np.nan]])
if pd is not None:
series_1 = pd.Series([1.1, 3.3, 5., np.nan, 6., 8.])
rng = pd.date_range('1/1/2012', periods=10, freq='D')
timeseries_1 = pd.Series(np.arange(len(rng)) + 0.1, rng)
timeseries_1[1] = np.nan
df_1 = pd.DataFrame([[1, 'test1'],
[2, 'test2'],
[np.nan, None],
[3.3, 'test3']], columns=['a', 'b'])
df_2 = pd.DataFrame([1, 3, 5, np.nan, 6, 8], columns=['col1'])
df_dateindex = pd.DataFrame(np.arange(50).reshape(10,5) + 0.1, index=rng)
# MultiIndex (Index)
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],
['x', 'x', 'x', 'x', 'y', 'y', 'y', 'y']]))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second', 'third'])
df_multiindex = pd.DataFrame([[1.1, 2.2], [3.3, 4.4], [5.5, 6.6], [7.7, 8.8], [9.9, 10.10],
[11.11, 12.12],[13.13, 14.14], [15.15, 16.16]], index=index)
# MultiIndex (Header)
header = [['Foo', 'Foo', 'Bar', 'Bar', 'Baz'], ['A', 'B', 'C', 'D', 'E']]
df_multiheader = pd.DataFrame([[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0],
[0.0, 1.0, 2.0, 3.0, 4.0]], columns=pd.MultiIndex.from_arrays(header))
# Test skips and fixtures
def _skip_if_no_numpy():
if np is None:
raise nose.SkipTest('numpy missing')
def _skip_if_no_pandas():
if pd is None:
raise nose.SkipTest('pandas missing')
def _skip_if_not_default_xl():
if APP_TARGET is not None:
raise nose.SkipTest('not Excel default')
def class_teardown(wb):
wb.close()
if sys.platform.startswith('win'):
Application(wb).quit()
class TestApplication:
def setUp(self):
# Connect to test file and make Sheet1 the active sheet
xl_file1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_workbook_1.xlsx')
self.wb = Workbook(xl_file1, app_visible=False, app_target=APP_TARGET)
Sheet('Sheet1').activate()
def tearDown(self):
class_teardown(self.wb)
def test_screen_updating(self):
Application(wkb=self.wb).screen_updating = False
assert_equal(Application(wkb=self.wb).screen_updating, False)
Application(wkb=self.wb).screen_updating = True
assert_equal(Application(wkb=self.wb).screen_updating, True)
def test_calculation(self):
Range('A1').value = 2
Range('B1').formula = '=A1 * 2'
app = Application(wkb=self.wb)
app.calculation = Calculation.xlCalculationManual
Range('A1').value = 4
assert_equal(Range('B1').value, 4)
app.calculation = Calculation.xlCalculationAutomatic
app.calculate() # This is needed on Mac Excel 2016 but not on Mac Excel 2011 (changed behaviour)
assert_equal(Range('B1').value, 8)
Range('A1').value = 2
assert_equal(Range('B1').value, 4)
class TestWorkbook:
def setUp(self):
# Connect to test file and make Sheet1 the active sheet
xl_file1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_workbook_1.xlsx')
self.wb = Workbook(xl_file1, app_visible=False, app_target=APP_TARGET)
Sheet('Sheet1').activate()
def tearDown(self):
class_teardown(self.wb)
def test_name(self):
assert_equal(self.wb.name, 'test_workbook_1.xlsx')
def test_active_sheet(self):
assert_equal(self.wb.active_sheet.name, 'Sheet1')
def test_current(self):
assert_equal(self.wb.xl_workbook, Workbook.current().xl_workbook)
def test_set_current(self):
wb2 = Workbook(app_visible=False, app_target=APP_TARGET)
assert_equal(Workbook.current().xl_workbook, wb2.xl_workbook)
self.wb.set_current()
assert_equal(Workbook.current().xl_workbook, self.wb.xl_workbook)
wb2.close()
def test_get_selection(self):
Range('A1').value = 1000
assert_equal(self.wb.get_selection().value, 1000)
def test_reference_two_unsaved_wb(self):
"""Covers GH Issue #63"""
wb1 = Workbook(app_visible=False, app_target=APP_TARGET)
wb2 = Workbook(app_visible=False, app_target=APP_TARGET)
Range('A1').value = 2. # wb2
Range('A1', wkb=wb1).value = 1. # wb1
assert_equal(Range('A1').value, 2.)
assert_equal(Range('A1', wkb=wb1).value, 1.)
wb1.close()
wb2.close()
def test_save_naked(self):
cwd = os.getcwd()
wb1 = Workbook(app_visible=False, app_target=APP_TARGET)
target_file_path = os.path.join(cwd, wb1.name + '.xlsx')
if os.path.isfile(target_file_path):
os.remove(target_file_path)
wb1.save()
assert_equal(os.path.isfile(target_file_path), True)
wb2 = Workbook(target_file_path, app_visible=False, app_target=APP_TARGET)
wb2.close()
if os.path.isfile(target_file_path):
os.remove(target_file_path)
def test_save_path(self):
cwd = os.getcwd()
wb1 = Workbook(app_visible=False, app_target=APP_TARGET)
target_file_path = os.path.join(cwd, 'TestFile.xlsx')
if os.path.isfile(target_file_path):
os.remove(target_file_path)
wb1.save(target_file_path)
assert_equal(os.path.isfile(target_file_path), True)
wb2 = Workbook(target_file_path, app_visible=False, app_target=APP_TARGET)
wb2.close()
if os.path.isfile(target_file_path):
os.remove(target_file_path)
def test_mock_caller(self):
# Can't really run this one with app_visible=False
_skip_if_not_default_xl()
Workbook.set_mock_caller(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_workbook_1.xlsx'))
wb = Workbook.caller()
Range('A1', wkb=wb).value = 333
assert_equal(Range('A1', wkb=wb).value, 333)
def test_unicode_path(self):
# pip3 seems to struggle with unicode filenames
src = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'unicode_path.xlsx')
dst = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ünicödé_päth.xlsx')
shutil.move(src, dst)
wb = Workbook(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ünicödé_päth.xlsx'), app_visible=False, app_target=APP_TARGET)
Range('A1').value = 1
wb.close()
shutil.move(dst, src)
def test_unsaved_workbook_reference(self):
wb = Workbook(app_visible=False, app_target=APP_TARGET)
Range('B2').value = 123
wb2 = Workbook(wb.name, app_visible=False, app_target=APP_TARGET)
assert_equal(Range('B2', wkb=wb2).value, 123)
wb2.close()
def test_delete_named_item(self):
Range('B10:C11').name = 'to_be_deleted'
assert_equal(Range('to_be_deleted').name, 'to_be_deleted')
del self.wb.names['to_be_deleted']
assert_not_equal(Range('B10:C11').name, 'to_be_deleted')
def test_names_collection(self):
Range('A1').name = 'name1'
Range('A2').name = 'name2'
assert_true('name1' in self.wb.names and 'name2' in self.wb.names)
Range('A3').name = 'name3'
assert_true('name1' in self.wb.names and 'name2' in self.wb.names and
'name3' in self.wb.names)
def test_active_workbook(self):
# TODO: add test over multiple Excel instances on Windows
Range('A1').value = 'active_workbook'
wb_active = Workbook.active(app_target=APP_TARGET)
assert_equal(Range('A1', wkb=wb_active).value, 'active_workbook')
def test_workbook_name(self):
Range('A10').value = 'name-test'
wb2 = Workbook('test_workbook_1.xlsx', app_visible=False, app_target=APP_TARGET)
assert_equal(Range('A10', wkb=wb2).value, 'name-test')
class TestSheet:
def setUp(self):
# Connect to test file and make Sheet1 the active sheet
xl_file1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_workbook_1.xlsx')
self.wb = Workbook(xl_file1, app_visible=False, app_target=APP_TARGET)
Sheet('Sheet1').activate()
def tearDown(self):
class_teardown(self.wb)
def test_activate(self):
Sheet('Sheet2').activate()
assert_equal(Sheet.active().name, 'Sheet2')
Sheet(3).activate()
assert_equal(Sheet.active().index, 3)
def test_name(self):
Sheet(1).name = 'NewName'
assert_equal(Sheet(1).name, 'NewName')
def test_index(self):
assert_equal(Sheet('Sheet1').index, 1)
def test_clear_content_active_sheet(self):
Range('G10').value = 22
Sheet.active().clear_contents()
cell = Range('G10').value
assert_equal(cell, None)
def test_clear_active_sheet(self):
Range('G10').value = 22
Sheet.active().clear()
cell = Range('G10').value
assert_equal(cell, None)
def test_clear_content(self):
Range('Sheet2', 'G10').value = 22
Sheet('Sheet2').clear_contents()
cell = Range('Sheet2', 'G10').value
assert_equal(cell, None)
def test_clear(self):
Range('Sheet2', 'G10').value = 22
Sheet('Sheet2').clear()
cell = Range('Sheet2', 'G10').value
assert_equal(cell, None)
def test_autofit(self):
Range('Sheet1', 'A1:D4').value = 'test_string'
Sheet('Sheet1').autofit()
Sheet('Sheet1').autofit('r')
Sheet('Sheet1').autofit('c')
Sheet('Sheet1').autofit('rows')
Sheet('Sheet1').autofit('columns')
def test_add_before(self):
new_sheet = Sheet.add(before='Sheet1')
assert_equal(Sheet(1).name, new_sheet.name)
def test_add_after(self):
Sheet.add(after=Sheet.count())
assert_equal(Sheet(Sheet.count()).name, Sheet.active().name)
Sheet.add(after=1)
assert_equal(Sheet(2).name, Sheet.active().name)
def test_add_default(self):
# TODO: test call without args properly
Sheet.add()
def test_add_named(self):
Sheet.add('test', before=1)
assert_equal(Sheet(1).name, 'test')
@raises(Exception)
def test_add_name_already_taken(self):
Sheet.add('Sheet1')
def test_count(self):
count = Sheet.count()
assert_equal(count, 3)
def test_all(self):
all_names = [i.name for i in Sheet.all()]
assert_equal(all_names, ['Sheet1', 'Sheet2', 'Sheet3'])
class TestRange:
def setUp(self):
# Connect to test file and make Sheet1 the active sheet
xl_file1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_range_1.xlsx')
self.wb = Workbook(xl_file1, app_visible=False, app_target=APP_TARGET)
Sheet('Sheet1').activate()
def tearDown(self):
class_teardown(self.wb)
def test_cell(self):
params = [('A1', 22),
((1,1), 22),
('A1', 22.2222),
((1,1), 22.2222),
('A1', 'Test String'),
((1,1), 'Test String'),
('A1', 'éöà'),
((1,1), 'éöà'),
('A2', test_date_1),
((2,1), test_date_1),
('A3', test_date_2),
((3,1), test_date_2)]
for param in params:
yield self.check_cell, param[0], param[1]
def check_cell(self, address, value):
# Active Sheet
Range(address).value = value
cell = Range(address).value
assert_equal(cell, value)
# SheetName
Range('Sheet2', address).value = value
cell = Range('Sheet2', address).value
assert_equal(cell, value)
# SheetIndex
Range(3, address).value = value
cell = Range(3, address).value
assert_equal(cell, value)
def test_range_address(self):
""" Style: Range('A1:C3') """
address = 'C1:E3'
# Active Sheet
Range(address[:2]).value = data # assign to starting cell only
cells = Range(address).value
assert_equal(cells, data)
# Sheetname
Range('Sheet2', address).value = data
cells = Range('Sheet2', address).value
assert_equal(cells, data)
# Sheetindex
Range(3, address).value = data
cells = Range(3, address).value
assert_equal(cells, data)
def test_range_index(self):
""" Style: Range((1,1), (3,3)) """
index1 = (1,3)
index2 = (3,5)
# Active Sheet
Range(index1, index2).value = data
cells = Range(index1, index2).value
assert_equal(cells, data)
# Sheetname
Range('Sheet2', index1, index2).value = data
cells = Range('Sheet2', index1, index2).value
assert_equal(cells, data)
# Sheetindex
Range(3, index1, index2).value = data
cells = Range(3, index1, index2).value
assert_equal(cells, data)
def test_named_range_value(self):
value = 22.222
# Active Sheet
Range('cell_sheet1').value = value
cells = Range('cell_sheet1').value
assert_equal(cells, value)
Range('range_sheet1').value = data
cells = Range('range_sheet1').value
assert_equal(cells, data)
# Sheetname
Range('Sheet2', 'cell_sheet2').value = value
cells = Range('Sheet2', 'cell_sheet2').value
assert_equal(cells, value)
Range('Sheet2', 'range_sheet2').value = data
cells = Range('Sheet2', 'range_sheet2').value
assert_equal(cells, data)
# Sheetindex
Range(3, 'cell_sheet3').value = value
cells = Range(3, 'cell_sheet3').value
assert_equal(cells, value)
Range(3, 'range_sheet3').value = data
cells = Range(3, 'range_sheet3').value
assert_equal(cells, data)
def test_array(self):
_skip_if_no_numpy()
# 1d array
Range('Sheet6', 'A1').value = array_1d
cells = Range('Sheet6', 'A1:D1', asarray=True).value
assert_array_equal(cells, array_1d)
# 2d array
Range('Sheet6', 'A4').value = array_2d
cells = Range('Sheet6', 'A4', asarray=True).table.value
assert_array_equal(cells, array_2d)
# 1d array (atleast_2d)
Range('Sheet6', 'A10').value = array_1d
cells = Range('Sheet6', 'A10:D10', asarray=True, atleast_2d=True).value
assert_array_equal(cells, np.atleast_2d(array_1d))
# 2d array (atleast_2d)
Range('Sheet6', 'A12').value = array_2d
cells = Range('Sheet6', 'A12', asarray=True, atleast_2d=True).table.value
assert_array_equal(cells, array_2d)
def sheet_ref(self):
Range(Sheet(1), 'A20').value = 123
assert_equal(Range(1, 'A20').value, 123)
Range(Sheet(1), (2,2), (4,4)).value = 321
assert_equal(Range(1, (2,2)).value, 321)
def test_vertical(self):
Range('Sheet4', 'A10').value = data
if sys.platform.startswith('win') and self.wb.xl_app.Version == '14.0':
Range('Sheet4', 'A12:B12').xl_range.NumberFormat = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = Range('Sheet4', 'A10').vertical.value
assert_equal(cells, [row[0] for row in data])
def test_horizontal(self):
Range('Sheet4', 'A20').value = data
cells = Range('Sheet4', 'A20').horizontal.value
assert_equal(cells, data[0])
def test_table(self):
Range('Sheet4', 'A1').value = data
if sys.platform.startswith('win') and self.wb.xl_app.Version == '14.0':
Range('Sheet4', 'A3:B3').xl_range.NumberFormat = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = Range('Sheet4', 'A1').table.value
assert_equal(cells, data)
def test_list(self):
# 1d List Row
Range('Sheet4', 'A27').value = list_row_1d
cells = Range('Sheet4', 'A27:C27').value
assert_equal(list_row_1d, cells)
# 2d List Row
Range('Sheet4', 'A29').value = list_row_2d
cells = Range('Sheet4', 'A29:C29', atleast_2d=True).value
assert_equal(list_row_2d, cells)
# 1d List Col
Range('Sheet4', 'A31').value = list_col
cells = Range('Sheet4', 'A31:A33').value
assert_equal([i[0] for i in list_col], cells)
# 2d List Col
cells = Range('Sheet4', 'A31:A33', atleast_2d=True).value
assert_equal(list_col, cells)
def test_is_cell(self):
assert_equal(Range('A1').is_cell(), True)
assert_equal(Range('A1:B1').is_cell(), False)
assert_equal(Range('A1:A2').is_cell(), False)
assert_equal(Range('A1:B2').is_cell(), False)
def test_is_row(self):
assert_equal(Range('A1').is_row(), False)
assert_equal(Range('A1:B1').is_row(), True)
assert_equal(Range('A1:A2').is_row(), False)
assert_equal(Range('A1:B2').is_row(), False)
def test_is_column(self):
assert_equal(Range('A1').is_column(), False)
assert_equal(Range('A1:B1').is_column(), False)
assert_equal(Range('A1:A2').is_column(), True)
assert_equal(Range('A1:B2').is_column(), False)
def test_is_table(self):
assert_equal(Range('A1').is_table(), False)
assert_equal(Range('A1:B1').is_table(), False)
assert_equal(Range('A1:A2').is_table(), False)
assert_equal(Range('A1:B2').is_table(), True)
def test_formula(self):
Range('A1').formula = '=SUM(A2:A10)'
assert_equal(Range('A1').formula, '=SUM(A2:A10)')
def test_current_region(self):
values = [[1.,2.],[3.,4.]]
Range('A20').value = values
assert_equal(Range('B21').current_region.value, values)
def test_clear_content(self):
Range('Sheet4', 'G1').value = 22
Range('Sheet4', 'G1').clear_contents()
cell = Range('Sheet4', 'G1').value
assert_equal(cell, None)
def test_clear(self):
Range('Sheet4', 'G1').value = 22
Range('Sheet4', 'G1').clear()
cell = Range('Sheet4', 'G1').value
assert_equal(cell, None)
def test_dataframe_1(self):
_skip_if_no_pandas()
df_expected = df_1
Range('Sheet5', 'A1').value = df_expected
cells = Range('Sheet5', 'B1:C5').value
df_result = DataFrame(cells[1:], columns=cells[0])
assert_frame_equal(df_expected, df_result)
def test_dataframe_2(self):
""" Covers GH Issue #31"""
_skip_if_no_pandas()
df_expected = df_2
Range('Sheet5', 'A9').value = df_expected
cells = Range('Sheet5', 'B9:B15').value
df_result = DataFrame(cells[1:], columns=[cells[0]])
assert_frame_equal(df_expected, df_result)
def test_dataframe_multiindex(self):
_skip_if_no_pandas()
df_expected = df_multiindex
Range('Sheet5', 'A20').value = df_expected
cells = Range('Sheet5', 'D20').table.value
multiindex = Range('Sheet5', 'A20:C28').value
ix = pd.MultiIndex.from_tuples(multiindex[1:], names=multiindex[0])
df_result = DataFrame(cells[1:], columns=cells[0], index=ix)
assert_frame_equal(df_expected, df_result)
def test_dataframe_multiheader(self):
_skip_if_no_pandas()
df_expected = df_multiheader
Range('Sheet5', 'A52').value = df_expected
cells = Range('Sheet5', 'B52').table.value
df_result = DataFrame(cells[2:], columns=pd.MultiIndex.from_arrays(cells[:2]))
assert_frame_equal(df_expected, df_result)
def test_dataframe_dateindex(self):
_skip_if_no_pandas()
df_expected = df_dateindex
Range('Sheet5', 'A100').value = df_expected
if sys.platform.startswith('win') and self.wb.xl_app.Version == '14.0':
Range('Sheet5', 'A100').vertical.xl_range.NumberFormat = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = Range('Sheet5', 'B100').table.value
index = Range('Sheet5', 'A101').vertical.value
df_result = DataFrame(cells[1:], index=index, columns=cells[0])
assert_frame_equal(df_expected, df_result)
def test_series_1(self):
_skip_if_no_pandas()
series_expected = series_1
Range('Sheet5', 'A32').value = series_expected
cells = Range('Sheet5', 'B32:B37').value
series_result = Series(cells)
assert_series_equal(series_expected, series_result)
def test_timeseries_1(self):
_skip_if_no_pandas()
series_expected = timeseries_1
Range('Sheet5', 'A40').value = series_expected
if sys.platform.startswith('win') and self.wb.xl_app.Version == '14.0':
Range('Sheet5', 'A40').vertical.xl_range.NumberFormat = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = Range('Sheet5', 'B40:B49').value
date_index = Range('Sheet5', 'A40:A49').value
series_result = Series(cells, index=date_index)
assert_series_equal(series_expected, series_result)
def test_none(self):
""" Covers GH Issue #16"""
# None
Range('Sheet1', 'A7').value = None
assert_equal(None, Range('Sheet1', 'A7').value)
# List
Range('Sheet1', 'A7').value = [None, None]
assert_equal(None, Range('Sheet1', 'A7').horizontal.value)
def test_scalar_nan(self):
"""Covers GH Issue #15"""
_skip_if_no_numpy()
Range('Sheet1', 'A20').value = np.nan
assert_equal(None, Range('Sheet1', 'A20').value)
def test_atleast_2d_scalar(self):
"""Covers GH Issue #53a"""
Range('Sheet1', 'A50').value = 23
result = Range('Sheet1', 'A50', atleast_2d=True).value
assert_equal([[23]], result)
def test_atleast_2d_scalar_as_array(self):
"""Covers GH Issue #53b"""
_skip_if_no_numpy()
Range('Sheet1', 'A50').value = 23
result = Range('Sheet1', 'A50', atleast_2d=True, asarray=True).value
assert_equal(np.array([[23]]), result)
def test_column_width(self):
Range('Sheet1', 'A1:B2').column_width = 10.0
result = Range('Sheet1', 'A1').column_width
assert_equal(10.0, result)
Range('Sheet1', 'A1:B2').value = 'ensure cells are used'
Range('Sheet1', 'B2').column_width = 20.0
result = Range('Sheet1', 'A1:B2').column_width
if sys.platform.startswith('win'):
assert_equal(None, result)
else:
assert_equal(kw.missing_value, result)
def test_row_height(self):
Range('Sheet1', 'A1:B2').row_height = 15.0
result = Range('Sheet1', 'A1').row_height
assert_equal(15.0, result)
Range('Sheet1', 'A1:B2').value = 'ensure cells are used'
Range('Sheet1', 'B2').row_height = 20.0
result = Range('Sheet1', 'A1:B2').row_height
if sys.platform.startswith('win'):
assert_equal(None, result)
else:
assert_equal(kw.missing_value, result)
def test_width(self):
"""Width depends on default style text size, so do not test absolute widths"""
Range('Sheet1', 'A1:D4').column_width = 10.0
result_before = Range('Sheet1', 'A1').width
Range('Sheet1', 'A1:D4').column_width = 12.0
result_after = Range('Sheet1', 'A1').width
assert_true(result_after > result_before)
def test_height(self):
Range('Sheet1', 'A1:D4').row_height = 60.0
result = Range('Sheet1', 'A1:D4').height
assert_equal(240.0, result)
def test_autofit_range(self):
# TODO: compare col/row widths before/after - not implemented yet
Range('Sheet1', 'A1:D4').value = 'test_string'
Range('Sheet1', 'A1:D4').autofit()
Range('Sheet1', 'A1:D4').autofit('r')
Range('Sheet1', 'A1:D4').autofit('c')
Range('Sheet1', 'A1:D4').autofit('rows')
Range('Sheet1', 'A1:D4').autofit('columns')
def test_autofit_col(self):
# TODO: compare col/row widths before/after - not implemented yet
Range('Sheet1', 'A1:D4').value = 'test_string'
Range('Sheet1', 'A:D').autofit()
Range('Sheet1', 'A:D').autofit('r')
Range('Sheet1', 'A:D').autofit('c')
Range('Sheet1', 'A:D').autofit('rows')
Range('Sheet1', 'A:D').autofit('columns')
def test_autofit_row(self):
# TODO: compare col/row widths before/after - not implemented yet
Range('Sheet1', 'A1:D4').value = 'test_string'
Range('Sheet1', '1:1000000').autofit()
Range('Sheet1', '1:1000000').autofit('r')
Range('Sheet1', '1:1000000').autofit('c')
Range('Sheet1', '1:1000000').autofit('rows')
Range('Sheet1', '1:1000000').autofit('columns')
def test_number_format_cell(self):
format_string = "mm/dd/yy;@"
Range('Sheet1', 'A1').number_format = format_string
result = Range('Sheet1', 'A1').number_format
assert_equal(format_string, result)
def test_number_format_range(self):
format_string = "mm/dd/yy;@"
Range('Sheet1', 'A1:D4').number_format = format_string
result = Range('Sheet1', 'A1:D4').number_format
assert_equal(format_string, result)
def test_get_address(self):
res = Range((1,1),(3,3)).get_address()
assert_equal(res, '$A$1:$C$3')
res = Range((1,1),(3,3)).get_address(False)
assert_equal(res, '$A1:$C3')
res = Range((1,1),(3,3)).get_address(True, False)
assert_equal(res, 'A$1:C$3')
res = Range((1,1),(3,3)).get_address(False, False)
assert_equal(res, 'A1:C3')
res = Range((1,1),(3,3)).get_address(include_sheetname=True)
assert_equal(res, 'Sheet1!$A$1:$C$3')
res = Range('Sheet2', (1,1),(3,3)).get_address(include_sheetname=True)
assert_equal(res, 'Sheet2!$A$1:$C$3')
res = Range((1,1),(3,3)).get_address(external=True)
assert_equal(res, '[test_range_1.xlsx]Sheet1!$A$1:$C$3')
def test_hyperlink(self):
address = 'www.xlwings.org'
# Naked address
Range('A1').add_hyperlink(address)
assert_equal(Range('A1').value, address)
hyperlink = Range('A1').hyperlink
if not hyperlink.endswith('/'):
hyperlink += '/'
assert_equal(hyperlink, 'http://' + address + '/')
# Address + FriendlyName
Range('A2').add_hyperlink(address, 'test_link')
assert_equal(Range('A2').value, 'test_link')
hyperlink = Range('A2').hyperlink
if not hyperlink.endswith('/'):
hyperlink += '/'
assert_equal(hyperlink, 'http://' + address + '/')
def test_hyperlink_formula(self):
Range('B10').formula = '=HYPERLINK("http://xlwings.org", "xlwings")'
assert_equal(Range('B10').hyperlink, 'http://xlwings.org')
def test_color(self):
rgb = (30, 100, 200)
Range('A1').color = rgb
assert_equal(rgb, Range('A1').color)
Range('A2').color = RgbColor.rgbAqua
assert_equal((0, 255, 255), Range('A2').color)
Range('A2').color = None
assert_equal(Range('A2').color, None)
Range('A1:D4').color = rgb
assert_equal(rgb, Range('A1:D4').color)
def test_size(self):
assert_equal(Range('A1:C4').size, 12)
def test_shape(self):
assert_equal(Range('A1:C4').shape, (4, 3))
def test_len(self):
assert_equal(len(Range('A1:C4')), 4)
def test_iterator(self):
Range('A20').value = [[1., 2.], [3., 4.]]
l = []
for i in Range('A20:B21'):
l.append(i.value)
assert_equal(l, [1., 2., 3., 4.])
Range('Sheet2', 'A20').value = [[1., 2.], [3., 4.]]
l = []
for i in Range('Sheet2', 'A20:B21'):
l.append(i.value)
assert_equal(l, [1., 2., 3., 4.])
def test_resize(self):
r = Range('A1').resize(4, 5)
assert_equal(r.shape, (4, 5))
r = Range('A1').resize(row_size=4)
assert_equal(r.shape, (4, 1))
r = Range('A1:B4').resize(column_size=5)
assert_equal(r.shape, (1, 5))
def test_offset(self):
o = Range('A1:B3').offset(3, 4)
assert_equal(o.get_address(), '$E$4:$F$6')
o = Range('A1:B3').offset(row_offset=3)
assert_equal(o.get_address(), '$A$4:$B$6')
o = Range('A1:B3').offset(column_offset=4)
assert_equal(o.get_address(), '$E$1:$F$3')
def test_date(self):
date_1 = date(2000, 12, 3)
Range('X1').value = date_1
date_2 = Range('X1').value
assert_equal(date_1, date(date_2.year, date_2.month, date_2.day))
def test_row(self):
assert_equal(Range('B3:F5').row, 3)
def test_column(self):
assert_equal(Range('B3:F5').column, 2)
def test_last_cell(self):
assert_equal(Range('B3:F5').last_cell.row, 5)
assert_equal(Range('B3:F5').last_cell.column, 6)
def test_get_set_named_range(self):
Range('A100').name = 'test1'
assert_equal(Range('A100').name, 'test1')
Range('A200:B204').name = 'test2'
assert_equal(Range('A200:B204').name, 'test2')
def test_integers(self):
"""Covers GH 227"""
Range('A99').value = 2147483647 # max SInt32
assert_equal(Range('A99').value, 2147483647)
Range('A100').value = 2147483648 # SInt32 < x < SInt64
assert_equal(Range('A100').value, 2147483648)
Range('A101').value = 10000000000000000000 # long
assert_equal(Range('A101').value, 10000000000000000000)
def test_numpy_datetime(self):
_skip_if_no_numpy()
Range('A55').value = np.datetime64('2005-02-25T03:30Z')
assert_equal(Range('A55').value, datetime(2005, 2, 25, 3, 30))
def test_dataframe_timezone(self):
_skip_if_no_pandas()
dt = np.datetime64(1434149887000, 'ms')
ix = pd.DatetimeIndex(data=[dt], tz='GMT')
df = pd.DataFrame(data=[1], index=ix, columns=['A'])
Range('A1').value = df
assert_equal(Range('A2').value, datetime(2015, 6, 12, 22, 58, 7))
def test_datetime_timezone(self):
eastern = pytz.timezone('US/Eastern')
dt_naive = datetime(2002, 10, 27, 6, 0, 0)
dt_tz = eastern.localize(dt_naive)
Range('F34').value = dt_tz
assert_equal(Range('F34').value, dt_naive)
class TestChart:
def setUp(self):
# Connect to test file and make Sheet1 the active sheet
xl_file1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_chart_1.xlsx')
self.wb = Workbook(xl_file1, app_visible=False, app_target=APP_TARGET)
Sheet('Sheet1').activate()
def tearDown(self):
class_teardown(self.wb)
def test_add_keywords(self):
name = 'My Chart'
chart_type = ChartType.xlLine
Range('A1').value = chart_data
chart = Chart.add(chart_type=chart_type, name=name, source_data=Range('A1').table)
chart_actual = Chart(name)
name_actual = chart_actual.name
chart_type_actual = chart_actual.chart_type
assert_equal(name, name_actual)
if sys.platform.startswith('win'):
assert_equal(chart_type, chart_type_actual)
else:
assert_equal(kw.line_chart, chart_type_actual)
def test_add_properties(self):
name = 'My Chart'
chart_type = ChartType.xlLine
Range('Sheet2', 'A1').value = chart_data
chart = Chart.add('Sheet2')
chart.chart_type = chart_type
chart.name = name
chart.set_source_data(Range('Sheet2', 'A1').table)
chart_actual = Chart('Sheet2', name)
name_actual = chart_actual.name
chart_type_actual = chart_actual.chart_type
assert_equal(name, name_actual)
if sys.platform.startswith('win'):
assert_equal(chart_type, chart_type_actual)
else:
assert_equal(kw.line_chart, chart_type_actual)
if __name__ == '__main__':
nose.main()
| apache-2.0 | 8,551,476,815,334,439,000 | 33.782341 | 142 | 0.574001 | false |
Zomboided/VPN-Manager | resetVPN.py | 1 | 4136 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module resets all VPN connections
import xbmcgui
import xbmcaddon
from libs.common import resetVPNConnections, stopService, startService, DIALOG_SPEED, getVPNRequestedProfile, setAPICommand
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID, getName
debugTrace("-- Entered resetVPN.py --")
if not getID() == "":
# Get info about the addon that this script is pretending to be attached to
addon = xbmcaddon.Addon(getID())
addon_name = getName()
success = True
# Reset the VPN connection values stored in the settings.xml
if xbmcgui.Dialog().yesno(addon_name, "Updating the VPN settings will reset all VPN connections. Connections must be re-validated before use.\nContinue?"):
# Display dialog to show what's going on
progress = xbmcgui.DialogProgress()
progress_title = "Resetting VPN connections"
progress.create(addon_name,progress_title)
if not getVPNRequestedProfile() == "":
progress.close()
xbmcgui.Dialog().ok(addon_name, "Connection to VPN being attempted and will be aborted. Try again in a few seconds.")
setAPICommand("Disconnect")
success = False
if success:
# Stop the VPN monitor
xbmc.sleep(100)
progress.update(0, progress_title, "Pausing VPN monitor...")
xbmc.sleep(100)
if not stopService():
progress.close()
# Display error result in an ok dialog
errorTrace("resetVPN.py", "VPN monitor service is not running, can't reset VPNs")
xbmcgui.Dialog().ok(progress_title, "Error, Service not running. Check log and re-enable.")
success = False
# Disconnect and reset all connections
if success:
progress.update(20, progress_title, "VPN monitor paused")
xbmc.sleep(DIALOG_SPEED)
progress.update(40, progress_title, "Stopping any active VPN connection...")
xbmc.sleep(100)
resetVPNConnections(addon)
# Reset any validated values
addon.setSetting("vpn_provider_validated", "")
addon.setSetting("vpn_username_validated", "")
addon.setSetting("vpn_password_validated", "")
# Restart the VPN monitor
if success:
progress.update(60, progress_title, "VPN connections have been reset")
xbmc.sleep(DIALOG_SPEED)
progress.update(80, progress_title, "Restarting VPN monitor...")
xbmc.sleep(100)
if not startService():
progress.close()
errorTrace("resetVPN.py", "VPN monitor service is not running, connections have been reset")
xbmcgui.Dialog().ok(progress_title, "Error, cannot restart service. Check log and re-enable.")
success = False
else:
# Close out the final progress dialog
progress.update(100, progress_title, "VPN monitor restarted")
xbmc.sleep(DIALOG_SPEED)
progress.close()
command = "Addon.OpenSettings(" + getID() + ")"
xbmc.executebuiltin(command)
else:
errorTrace("resetVPN.py", "VPN service is not ready")
debugTrace("-- Exit resetVPN.py --") | gpl-2.0 | 245,625,491,115,489,120 | 43.010638 | 160 | 0.629594 | false |
jskDr/keraspp | old/gan_cnn_mse.py | 1 | 6490 | ################################
# 공통 패키지 불러오기
################################
from keras.datasets import mnist
import numpy as np
from PIL import Image
import math
import os
import keras.backend as K
K.set_image_data_format('channels_first')
print(K.image_data_format)
################################
# GAN 모델링
################################
from keras import models, layers, optimizers
def mean_squared_error(y_true, y_pred):
return K.mean(K.square(y_pred - y_true), axis=(1,2,3))
class GAN(models.Sequential):
def __init__(self, input_dim=64):
"""
self, self.generator, self.discriminator are all models
"""
super().__init__()
self.input_dim = input_dim
self.generator = self.GENERATOR()
self.discriminator = self.DISCRIMINATOR()
self.add(self.generator)
self.discriminator.trainable = False
self.add(self.discriminator)
self.compile_all()
def compile_all(self):
# Compiling stage
d_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)
self.generator.compile(loss=mean_squared_error, optimizer="SGD")
self.compile(loss='binary_crossentropy', optimizer=g_optim)
self.discriminator.trainable = True
self.discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
def GENERATOR(self):
input_dim = self.input_dim
model = models.Sequential()
model.add(layers.Dense(1024, activation='tanh', input_dim=input_dim))
model.add(layers.Dense(128 * 7 * 7, activation='tanh'))
model.add(layers.BatchNormalization())
model.add(layers.Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
model.add(layers.UpSampling2D(size=(2, 2)))
model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh'))
model.add(layers.UpSampling2D(size=(2, 2)))
model.add(layers.Conv2D(1, (5, 5), padding='same', activation='tanh'))
return model
def DISCRIMINATOR(self):
model = models.Sequential()
model.add(layers.Conv2D(64, (5, 5), padding='same', activation='tanh',
input_shape=(1, 28, 28)))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(128, (5, 5), activation='tanh'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='tanh'))
model.add(layers.Dense(1, activation='sigmoid'))
return model
def get_z(self, ln):
input_dim = self.input_dim
return np.random.uniform(-1, 1, (ln, input_dim))
def train_both(self, x):
ln = x.shape[0]
# First trial for training discriminator
z = self.get_z(ln)
w = self.generator.predict(z, verbose=0)
xw = np.concatenate((x, w))
y2 = [1] * ln + [0] * ln
d_loss = self.discriminator.train_on_batch(xw, y2)
# Second trial for training generator
z = self.get_z(ln)
self.discriminator.trainable = False
g_loss = self.train_on_batch(z, [1] * ln)
self.discriminator.trainable = True
return d_loss, g_loss
################################
# GAN 학습하기
################################
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[2:]
image = np.zeros((height * shape[0], width * shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0],
j * shape[1]:(j + 1) * shape[1]] = img[0, :, :]
return image
def get_x(X_train, index, BATCH_SIZE):
return X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
def save_images(generated_images, output_fold, epoch, index):
image = combine_images(generated_images)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save(
output_fold + '/' +
str(epoch) + "_" + str(index) + ".png")
def load_data():
(X_train, y_train), (_, _) = mnist.load_data()
return X_train[:10]
def train(args):
BATCH_SIZE = args.batch_size
epochs = args.epochs
output_fold = args.output_fold
input_dim = args.input_dim
os.makedirs(output_fold, exist_ok=True)
print('Output_fold is', output_fold)
X_train = load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])
gan = GAN(input_dim)
d_loss_ll = []
g_loss_ll = []
for epoch in range(epochs):
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))
d_loss_l = []
g_loss_l = []
for index in range(int(X_train.shape[0] / BATCH_SIZE)):
x = get_x(X_train, index, BATCH_SIZE)
d_loss, g_loss = gan.train_both(x)
d_loss_l.append(d_loss)
g_loss_l.append(g_loss)
if epoch % 10 == 0 or epoch == epochs - 1:
z = gan.get_z(x.shape[0])
w = gan.generator.predict(z, verbose=0)
save_images(w, output_fold, epoch, 0)
d_loss_ll.append(d_loss_l)
g_loss_ll.append(g_loss_l)
gan.generator.save_weights(output_fold + '/' + 'generator', True)
gan.discriminator.save_weights(output_fold + '/' + 'discriminator', True)
np.savetxt(output_fold + '/' + 'd_loss', d_loss_ll)
np.savetxt(output_fold + '/' + 'g_loss', g_loss_ll)
################################
# GAN 예제 실행하기
################################
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2,
help='Batch size for the networks')
parser.add_argument('--epochs', type=int, default=1000,
help='Epochs for the networks')
parser.add_argument('--output_fold', type=str, default='GAN_OUT',
help='Output fold to save the results')
parser.add_argument('--input_dim', type=int, default=2,
help='Input dimension for the generator.')
args = parser.parse_args()
train(args)
if __name__ == '__main__':
main() | mit | 1,528,142,904,994,182,000 | 30.758621 | 81 | 0.57431 | false |
StrongBoy998/ihome-project | Tornado_Project/utils/image_storage.py | 1 | 1089 | # -*- coding: utf-8 -*-
import qiniu.config
import logging
from qiniu import Auth, put_data, etag, urlsafe_base64_encode
#需要填写你的 Access Key 和 Secret Key
access_key = 'btZDjv_qjI4O1P5-KKeaZXBGLJcM-AZfigN8HjQf'
secret_key = 'j2Sgq1Pz-1O90OoFSKr24Xa80mVWqzpqbo-byiN5'
#要上传的空间
bucket_name = 'ihome'
def storage(data):
"""
七牛云存储上传文件接口
"""
if not data:
return None
try:
#构建鉴权对象
q = Auth(access_key, secret_key)
#生成上传 Token,可以指定过期时间等
token = q.upload_token(bucket_name)
ret, info = put_data(token, None, data)
except Exception as e:
logging.error(e)
raise Exception("上传文件到七牛错误")
if info and info.status_code != 200:
raise Exception("上传文件到七牛错误")
return ret["key"]
if __name__ == '__main__':
file_name = raw_input("输入上传的文件")
file = open(file_name, 'rb')
data = file.read()
key = storage(data)
print key
file.close()
| apache-2.0 | -76,605,809,266,359,420 | 18.408163 | 61 | 0.615142 | false |
OpenSPA/dvbapp | lib/python/Plugins/SystemPlugins/FastScan/plugin.py | 1 | 15297 | # -*- coding: utf-8 -*-
from Plugins.Plugin import PluginDescriptor
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.config import config, ConfigSelection, ConfigYesNo, getConfigListEntry, ConfigSubsection, ConfigText
from Components.ConfigList import ConfigListScreen
from Components.NimManager import nimmanager
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ProgressBar import ProgressBar
from Components.ServiceList import refreshServiceList
from Components.ActionMap import ActionMap
from enigma import eFastScan, eDVBFrontendParametersSatellite, eTimer
import os
config.misc.fastscan = ConfigSubsection()
config.misc.fastscan.last_configuration = ConfigText(default="()")
config.misc.fastscan.auto = ConfigSelection(default="true", choices=[("true", _("yes")), ("false", _("no")), ("multi", _("multi"))])
config.misc.fastscan.autoproviders = ConfigText(default="()")
class FastScanStatus(Screen):
skin = """
<screen position="150,115" size="420,180" title="Fast Scan">
<widget name="frontend" pixmap="skin_default/icons/scan-s.png" position="5,5" size="64,64" transparent="1" alphatest="on" />
<widget name="scan_state" position="10,120" zPosition="2" size="400,30" font="Regular;18" />
<widget name="scan_progress" position="10,155" size="400,15" pixmap="skin_default/progress_big.png" borderWidth="2" borderColor="#cccccc" />
</screen>"""
def __init__(self, session, scanTuner=0, transponderParameters=None, scanPid=900, keepNumbers=False, keepSettings=False, providerName='Favorites'):
Screen.__init__(self, session)
self.setTitle(_("Fast Scan"))
self.scanPid = scanPid
self.scanTuner = scanTuner
self.transponderParameters = transponderParameters
self.keepNumbers = keepNumbers
self.keepSettings = keepSettings
self.providerName = providerName
self.isDone = False
self.onClose.append(self.__onClose)
self["frontend"] = Pixmap()
self["scan_progress"] = ProgressBar()
self["scan_state"] = Label(_("scan state"))
if self.session.pipshown:
from Screens.InfoBar import InfoBar
InfoBar.instance and hasattr(InfoBar.instance, "showPiP") and InfoBar.instance.showPiP()
self.prevservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.ok,
"cancel": self.cancel
})
self.onFirstExecBegin.append(self.doServiceScan)
def __onClose(self):
self.scan.scanCompleted.get().remove(self.scanCompleted)
self.scan.scanProgress.get().remove(self.scanProgress)
del self.scan
def doServiceScan(self):
self["scan_state"].setText(_('Scanning %s...') % (self.providerName))
self["scan_progress"].setValue(0)
self.scan = eFastScan(self.scanPid, self.providerName, self.transponderParameters, self.keepNumbers, self.keepSettings)
self.scan.scanCompleted.get().append(self.scanCompleted)
self.scan.scanProgress.get().append(self.scanProgress)
fstfile = None
fntfile = None
for root, dirs, files in os.walk('/tmp/'):
for f in files:
if f.endswith('.bin'):
if '_FST' in f:
fstfile = os.path.join(root, f)
elif '_FNT' in f:
fntfile = os.path.join(root, f)
if fstfile and fntfile:
self.scan.startFile(fntfile, fstfile)
os.unlink(fstfile)
os.unlink(fntfile)
else:
self.scan.start(self.scanTuner)
def scanProgress(self, progress):
self["scan_progress"].setValue(progress)
def scanCompleted(self, result):
self.isDone = True
if result < 0:
self["scan_state"].setText(_('Scanning failed!'))
else:
self["scan_state"].setText(ngettext('List version %d, found %d channel', 'List version %d, found %d channels', result) % (self.scan.getVersion(), result))
def restoreService(self):
if self.prevservice:
self.session.nav.playService(self.prevservice)
def ok(self):
if self.isDone:
self.cancel()
def cancel(self):
if self.isDone:
refreshServiceList()
self.restoreService()
self.close()
class FastScanScreen(ConfigListScreen, Screen):
skin = """
<screen position="100,115" size="520,290" title="Fast Scan">
<widget name="config" position="10,10" size="500,250" scrollbarMode="showOnDemand" />
<widget name="introduction" position="10,265" size="500,25" font="Regular;20" halign="center" />
</screen>"""
providers = [
('Canal Digitaal', (1, 900, True)),
('TV Vlaanderen', (1, 910, True)),
('TéléSAT', (0, 920, True)),
('HD Austria', (0, 950, False)),
('Fast Scan Deutschland', (0, 960, False)),
('Skylink Czech Republic', (1, 30, False)),
('Skylink Slovak Republic', (1, 31, False)),
('AustriaSat Magyarország Eutelsat 9E', (2, 951, False)),
('AustriaSat Magyarország Astra 3', (1, 951, False)),
('TéléSAT Astra3', (1, 920, True)),
('HD Austria Astra3', (1, 950, False)),
('Fast Scan Deutschland Astra3', (1, 960, False)),
('Canal Digitaal Astra 1', (0, 900, True)),
('TV Vlaanderen Astra 1', (0, 910, True))]
transponders = ((12515000, 22000000, eDVBFrontendParametersSatellite.FEC_5_6, 192,
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_QPSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_Off),
(12070000, 27500000, eDVBFrontendParametersSatellite.FEC_3_4, 235,
eDVBFrontendParametersSatellite.Polarisation_Horizontal, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S, eDVBFrontendParametersSatellite.Modulation_QPSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_Off),
(12074000, 27500000, eDVBFrontendParametersSatellite.FEC_3_4, 90,
eDVBFrontendParametersSatellite.Polarisation_Vertical, eDVBFrontendParametersSatellite.Inversion_Unknown,
eDVBFrontendParametersSatellite.System_DVB_S2, eDVBFrontendParametersSatellite.Modulation_8PSK,
eDVBFrontendParametersSatellite.RollOff_alpha_0_35, eDVBFrontendParametersSatellite.Pilot_On))
def __init__(self, session, nimList):
Screen.__init__(self, session)
self.setTitle(_("Fast Scan"))
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keySave,
"cancel": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
providerList = list(x[0] for x in self.providers)
lastConfiguration = eval(config.misc.fastscan.last_configuration.value)
if not lastConfiguration or not tuple(x for x in self.providers if x[0] == lastConfiguration[1]):
lastConfiguration = (nimList[0][0], providerList[0], True, True, False)
self.scan_nims = ConfigSelection(default = lastConfiguration[0], choices = nimList)
self.scan_provider = ConfigSelection(default = lastConfiguration[1], choices = providerList)
self.scan_hd = ConfigYesNo(default = lastConfiguration[2])
self.scan_keepnumbering = ConfigYesNo(default = lastConfiguration[3])
self.scan_keepsettings = ConfigYesNo(default = lastConfiguration[4])
self.tunerEntry = getConfigListEntry(_("Tuner"), self.scan_nims)
self.scanProvider = getConfigListEntry(_("Provider"), self.scan_provider)
self.scanHD = getConfigListEntry(_("HD list"), self.scan_hd)
self.config_autoproviders = {}
auto_providers = config.misc.fastscan.autoproviders.value.split(",")
for provider in self.providers:
self.config_autoproviders[provider[0]] = ConfigYesNo(default=provider[0] in auto_providers )
self.list = []
ConfigListScreen.__init__(self, self.list)
self.createSetup()
self.finished_cb = None
self["introduction"] = Label(_("Select your provider, and press OK to start the scan"))
def createSetup(self):
self.list = []
self.list.append(self.tunerEntry)
self.list.append(self.scanProvider)
self.list.append(self.scanHD)
self.list.append(getConfigListEntry(_("Use fastscan channel numbering"), self.scan_keepnumbering))
self.list.append(getConfigListEntry(_("Use fastscan channel names"), self.scan_keepsettings))
self.list.append(getConfigListEntry(_("Enable auto fast scan"), config.misc.fastscan.auto))
if config.misc.fastscan.auto.value == "multi":
for provider in self.providers:
self.list.append(getConfigListEntry(_("Enable auto fast scan for %s") % provider[0], self.config_autoproviders[provider[0]]))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.createSetup()
def saveConfiguration(self):
config.misc.fastscan.last_configuration.value = `(self.scan_nims.value, self.scan_provider.value, self.scan_hd.value, self.scan_keepnumbering.value, self.scan_keepsettings.value)`
auto_providers = []
for provider in self.providers:
if self.config_autoproviders[provider[0]].value:
auto_providers.append(provider[0])
config.misc.fastscan.autoproviders.value = ",".join(auto_providers)
config.misc.fastscan.save()
def keySave(self):
self.saveConfiguration()
self.close()
def keyGo(self):
self.saveConfiguration()
self.startScan()
def getTransponderParameters(self, number):
transponderParameters = eDVBFrontendParametersSatellite()
transponderParameters.frequency = self.transponders[number][0]
transponderParameters.symbol_rate = self.transponders[number][1]
transponderParameters.fec = self.transponders[number][2]
transponderParameters.orbital_position = self.transponders[number][3]
transponderParameters.polarisation = self.transponders[number][4]
transponderParameters.inversion = self.transponders[number][5]
transponderParameters.system = self.transponders[number][6]
transponderParameters.modulation = self.transponders[number][7]
transponderParameters.rolloff = self.transponders[number][8]
transponderParameters.pilot = self.transponders[number][9]
transponderParameters.is_id = 0 #-1
transponderParameters.pls_mode = eDVBFrontendParametersSatellite.PLS_Root
transponderParameters.pls_code = 1
return transponderParameters
def startScan(self):
parameters = tuple(x[1] for x in self.providers if x[0] == self.scan_provider.value)[0]
pid = parameters[1]
if self.scan_hd.value and parameters[2]:
pid += 1
if self.scan_nims.value:
self.session.open(FastScanStatus, scanTuner = int(self.scan_nims.value),
transponderParameters = self.getTransponderParameters(parameters[0]),
scanPid = pid, keepNumbers = self.scan_keepnumbering.value, keepSettings = self.scan_keepsettings.value,
providerName = self.scan_provider.getText())
def keyCancel(self):
self.close()
class FastScanAutoScreen(FastScanScreen):
def __init__(self, session, lastConfiguration):
print "[AutoFastScan] start %s" % lastConfiguration[1]
Screen.__init__(self, session)
self.skinName="Standby"
self["actions"] = ActionMap( [ "StandbyActions" ],
{
"power": self.Power,
"discrete_on": self.Power
}, -1)
self.onClose.append(self.__onClose)
parameters = tuple(x[1] for x in self.providers if x[0] == lastConfiguration[1])
if parameters:
parameters = parameters[0]
pid = parameters[1]
if lastConfiguration[2] and parameters[2]:
pid += 1
self.scan = eFastScan(pid, lastConfiguration[1], self.getTransponderParameters(parameters[0]), lastConfiguration[3], lastConfiguration[4])
self.scan.scanCompleted.get().append(self.scanCompleted)
self.scan.start(int(lastConfiguration[0]))
else:
self.scan = None
self.close(True)
def __onClose(self):
if self.scan:
self.scan.scanCompleted.get().remove(self.scanCompleted)
del self.scan
def scanCompleted(self, result):
print "[AutoFastScan] completed result = ", result
refreshServiceList()
self.close(result>0)
def Power(self):
from Screens.Standby import inStandby
inStandby.Power()
print "[AutoFastScan] aborted due to power button pressed"
self.close(True)
def createSummary(self):
from Screens.Standby import StandbySummary
return StandbySummary
def FastScanMain(session, **kwargs):
if session.nav.RecordTimer.isRecording():
session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to scan."), MessageBox.TYPE_ERROR)
else:
nimList = []
# collect all nims which are *not* set to "nothing"
for n in nimmanager.nim_slots:
if not n.isCompatible("DVB-S"):
continue
if n.config_mode == "nothing":
continue
if n.config_mode in ("loopthrough", "satposdepends"):
root_id = nimmanager.sec.getRoot(n.slot_id, int(n.config.connectedTo.value))
if n.type == nimmanager.nim_slots[root_id].type: # check if connected from a DVB-S to DVB-S2 Nim or vice versa
continue
nimList.append((str(n.slot), n.friendly_full_description))
if nimList:
session.open(FastScanScreen, nimList)
else:
session.open(MessageBox, _("No suitable sat tuner found!"), MessageBox.TYPE_ERROR)
Session = None
FastScanAutoStartTimer = eTimer()
autoproviders = []
def restartScanAutoStartTimer(reply=False):
if not reply:
print "[AutoFastScan] Scan was not succesfully retry in one hour"
FastScanAutoStartTimer.startLongTimer(3600)
else:
global autoproviders
if autoproviders:
provider = autoproviders.pop(0)
if provider:
lastConfiguration = eval(config.misc.fastscan.last_configuration.value)
lastConfiguration = (lastConfiguration[0], provider, lastConfiguration[2], lastConfiguration[3], lastConfiguration[4])
Session.openWithCallback(restartScanAutoStartTimer, FastScanAutoScreen, lastConfiguration)
return
FastScanAutoStartTimer.startLongTimer(86400)
def FastScanAuto():
lastConfiguration = eval(config.misc.fastscan.last_configuration.value)
if not lastConfiguration or Session.nav.RecordTimer.isRecording():
restartScanAutoStartTimer()
else:
if config.misc.fastscan.auto.value == "multi":
global autoproviders
autoproviders = config.misc.fastscan.autoproviders.value.split(",")
if autoproviders:
provider = autoproviders.pop(0)
if provider:
lastConfiguration = (lastConfiguration[0], provider, lastConfiguration[2], lastConfiguration[3], lastConfiguration[4])
Session.openWithCallback(restartScanAutoStartTimer, FastScanAutoScreen, lastConfiguration)
FastScanAutoStartTimer.callback.append(FastScanAuto)
def leaveStandby():
FastScanAutoStartTimer.stop()
def standbyCountChanged(value):
if config.misc.fastscan.auto.value != "false" and config.misc.fastscan.last_configuration.value:
from Screens.Standby import inStandby
inStandby.onClose.append(leaveStandby)
FastScanAutoStartTimer.startLongTimer(90)
def startSession(session, **kwargs):
global Session
Session = session
config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call=False)
def FastScanStart(menuid, **kwargs):
if menuid == "scan":
return [(_("Fast Scan"), FastScanMain, "fastscan", None)]
else:
return []
def Plugins(**kwargs):
if (nimmanager.hasNimType("DVB-S")):
return [PluginDescriptor(name=_("Fast Scan"), description="Scan Dutch/Belgian sat provider", where = PluginDescriptor.WHERE_MENU, fnc=FastScanStart),
PluginDescriptor(where=[PluginDescriptor.WHERE_SESSIONSTART], fnc=startSession)]
else:
return []
| gpl-2.0 | -1,174,297,479,387,992,600 | 38.511628 | 181 | 0.74534 | false |
fapable/pygameproject2 | Menu/Opties.py | 1 | 1096 | import pygame
import Game as dm
def opties():
pygame.mixer.music.load("music.mp3")
pygame.mixer.music.play(loops=0, start=0.0)
size = width, height = 340,240
screen = pygame.display.set_mode(size)
redSquare = pygame.image.load('achtergrondSpelers.png').convert()
white = (255,255,255)
w = 700
h = 420
screen = pygame.display.set_mode((w, h))
screen.fill((white))
x = 0
y = 0
screen.blit(redSquare, (x, y))
pygame.display.flip()
black = (0,0,0)
pygame.mixer.music.load("music.mp3")
pygame.mixer.music.play(loops=0, start=0.0)
choose = dm.dumbmenu(screen,[
'Geluid uit',
'Taal wijzigen -> Engels',
'Terug naar het menu']
, 180,150,None,35,1.4,black ,black)
if choose == 0:
print ("Je hebt gekozen voor Geluid uit'.")
elif choose == 1:
print ("Je hebt gekozen voor Taal wijzigen naar het engels''.")
elif choose == 2:
print( "Je hebt gekozen voor Terug naar het men'.")
| apache-2.0 | -6,016,865,642,873,361,000 | 27.102564 | 71 | 0.556569 | false |
elsuizo/Kivy_work | Game_1/game1.py | 1 | 1067 | #= -------------------------------------------------------------------------
# @file game1.py
#
# @date 11/17/15 23:12:50
# @author Martin Noblia
# @email [email protected]
#
# @brief
#
# @detail
#
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
#---------------------------------------------------------------------------=#
from kivy.app import App
from kivy.uix.widget import Widget
class Game(Widget):
pass
class GameApp(App):
def build(self):
return Game()
if __name__ == "__main__":
GameApp().run()
| mit | -6,981,795,805,820,989,000 | 27.078947 | 78 | 0.614808 | false |
OnroerendErfgoed/pyramid_urireferencer | tests/test_renderers.py | 1 | 2787 | # -*- coding: utf-8 -*-
from pyramid_urireferencer.models import (
RegistryResponse,
ApplicationResponse,
Item
)
class TestRenderers:
def test_empty_registry_renderer(self):
rr = RegistryResponse('http://id.example.org/foo/1', True, False, 0, [])
from pyramid_urireferencer.renderers import registry_adapter
r = registry_adapter(rr, {})
assert r['query_uri'] == 'http://id.example.org/foo/1'
assert r['success']
assert not r['has_references']
assert r['count'] == 0
assert len(r['applications']) == 0
def test_registry_renderer_one_app_no_results(self):
ar = ApplicationResponse(
'My app',
'http://something.example.org',
'http://somethingelse.example.org',
True,
False,
0,
[]
)
rr = RegistryResponse('http://id.example.org/foo/1', True, False, 0, [ar])
from pyramid_urireferencer.renderers import registry_adapter
r = registry_adapter(rr, {})
assert r['query_uri'] == 'http://id.example.org/foo/1'
assert r['success']
assert not r['has_references']
assert r['count'] == 0
assert len(r['applications']) == 1
assert 'title' in r['applications'][0]
def test_empty_application_renderer(self):
ar = ApplicationResponse(
'My app',
'http://something.example.org',
'http://somethingelse.example.org/references',
True,
False,
0,
[]
)
from pyramid_urireferencer.renderers import application_adapter
r = application_adapter(ar, {})
assert r['uri'] == 'http://something.example.org'
assert r['service_url'] == 'http://somethingelse.example.org/references'
assert r['success']
assert not r['has_references']
assert r['count'] == 0
assert len(r['items']) == 0
def test_application_renderer_one_item(self):
ar = ApplicationResponse(
'My app',
'http://something.example.org',
'http://somethingelse.example.org/references',
True,
False,
0,
[Item('http://something.example.org/thingy/thing', 'My item')]
)
from pyramid_urireferencer.renderers import application_adapter
r = application_adapter(ar, {})
assert r['uri'] == 'http://something.example.org'
assert r['service_url'] == 'http://somethingelse.example.org/references'
assert r['success']
assert not r['has_references']
assert r['count'] == 0
assert len(r['items']) == 1
assert 'title' in r['items'][0]
assert 'uri' in r['items'][0]
| mit | 6,088,043,973,341,636,000 | 33.8375 | 82 | 0.558306 | false |
TomiBelan/multiparser | example_lr.py | 1 | 1164 | from grammar import Grammar
from makelr import make_lr_parser
preface = "from toycalc.lex import tokenizer"
rules = {
# stat -> HELLO | IF cond THEN stat | IF cond THEN stat ELSE stat
"stat": [(("HELLO",), "lambda ctx, a: 'hello'"),
(("IF", "cond", "THEN", "stat"),
"lambda ctx, a, b, c, d: ('if', b, d, None)"),
(("IF", "cond", "THEN", "stat", "ELSE", "stat"),
"lambda ctx, a, b, c, d, e, f: ('if', b, d, f)")],
# cond -> cond AND cond | cond OR cond | TRUE | FALSE
"cond": [(("cond", "AND", "cond"), "lambda ctx, a, b, c: (a, 'and', c)"),
(("cond", "OR", "cond"), "lambda ctx, a, b, c: (a, 'or', c)"),
(("TRUE",), "lambda ctx, a: True"),
(("FALSE",), "lambda ctx, a: False")],
}
properties = {
"default_start": "stat",
"precedence": [
[
("left", ["OR"]),
("left", ["AND"]),
],
[
("precedence", [("stat", ("IF", "cond", "THEN", "stat"))]),
("precedence", ["ELSE"]),
],
],
}
my_grammar = Grammar(preface, properties, rules)
print(make_lr_parser(my_grammar))
| apache-2.0 | -8,957,619,027,390,037,000 | 31.333333 | 77 | 0.450172 | false |
leonardoo/django-pipeline | tests/tests/test_compiler.py | 1 | 5061 | from __future__ import unicode_literals
import sys
from unittest import skipIf
from django.test import TestCase
from pipeline.collector import default_collector
from pipeline.compilers import Compiler, CompilerBase, SubProcessCompiler
from pipeline.exceptions import CompilerError
from tests.utils import _, pipeline_settings
class FailingCompiler(SubProcessCompiler):
output_extension = 'junk'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (("/usr/bin/env", "false",),)
return self.execute_command(command)
class InvalidCompiler(SubProcessCompiler):
output_extension = 'junk'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (
("this-exists-nowhere-as-a-command-and-should-fail",),
infile,
outfile
)
return self.execute_command(command)
class CopyingCompiler(SubProcessCompiler):
output_extension = 'junk'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (
"cp",
infile,
outfile
)
return self.execute_command(command)
class LineNumberingCompiler(SubProcessCompiler):
output_extension = 'junk'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (("/usr/bin/env", "cat"), ("-n",), infile,)
return self.execute_command(command, stdout_captured=outfile)
class DummyCompiler(CompilerBase):
output_extension = 'js'
def match_file(self, path):
return path.endswith('.coffee')
def compile_file(self, infile, outfile, outdated=False, force=False):
return
@pipeline_settings(COMPILERS=['tests.tests.test_compiler.DummyCompiler'])
class DummyCompilerTest(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_output_path(self):
output_path = self.compiler.output_path("js/helpers.coffee", "js")
self.assertEqual(output_path, "js/helpers.js")
def test_compilers_class(self):
compilers_class = self.compiler.compilers
self.assertEqual(compilers_class[0], DummyCompiler)
def test_compile(self):
paths = self.compiler.compile([
_('pipeline/js/dummy.coffee'),
_('pipeline/js/application.js'),
])
self.assertEqual([_('pipeline/js/dummy.js'), _('pipeline/js/application.js')], list(paths))
def tearDown(self):
default_collector.clear()
@skipIf(sys.platform.startswith("win"), "requires posix platform")
@pipeline_settings(COMPILERS=['tests.tests.test_compiler.LineNumberingCompiler'])
class CompilerStdoutTest(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_output_path(self):
output_path = self.compiler.output_path("js/helpers.coffee", "js")
self.assertEqual(output_path, "js/helpers.js")
def test_compile(self):
paths = self.compiler.compile([_('pipeline/js/dummy.coffee')])
self.assertEqual([_('pipeline/js/dummy.junk')], list(paths))
def tearDown(self):
default_collector.clear()
@skipIf(sys.platform.startswith("win"), "requires posix platform")
@pipeline_settings(COMPILERS=['tests.tests.test_compiler.CopyingCompiler'])
class CompilerSelfWriterTest(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_output_path(self):
output_path = self.compiler.output_path("js/helpers.coffee", "js")
self.assertEqual(output_path, "js/helpers.js")
def test_compile(self):
paths = self.compiler.compile([_('pipeline/js/dummy.coffee')])
default_collector.collect()
self.assertEqual([_('pipeline/js/dummy.junk')], list(paths))
def tearDown(self):
default_collector.clear()
@pipeline_settings(COMPILERS=['tests.tests.test_compiler.InvalidCompiler'])
class InvalidCompilerTest(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_compile(self):
self.assertRaises(CompilerError, self.compiler.compile, [_('pipeline/js/dummy.coffee')])
def tearDown(self):
default_collector.clear()
@skipIf(sys.platform.startswith("win"), "requires posix platform")
@pipeline_settings(COMPILERS=['tests.tests.test_compiler.FailingCompiler'])
class FailingCompilerTest(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_compile(self):
self.assertRaises(CompilerError, self.compiler.compile, [_('pipeline/js/dummy.coffee')])
def tearDown(self):
default_collector.clear()
| mit | 3,162,126,970,480,120,300 | 29.672727 | 99 | 0.669828 | false |
torehc/sinucha | web/sinucha/control/models.py | 1 | 5334 | from django.db import models
from django.contrib.auth.models import User
import datetime
from django.db.models.signals import post_save
from django.dispatch import receiver
class User_Data(models.Model):
USER = 'user'
ADMIN = 'admin'
ROL_CHOICES = (
(USER, 'USER'),
(ADMIN, 'ADMIN'),
)
tagRfid = models.CharField(max_length=64, blank=False)
chatid = models.DecimalField(max_digits=12, decimal_places=0)
balance_actual = models.DecimalField(max_digits=5, decimal_places=2, default=0)
username = models.CharField(max_length=20, blank=True)
user = models.ForeignKey(User, null=True, blank=True)
rol = models.CharField(max_length=5,
choices=ROL_CHOICES,
default=USER,
blank=False,
)
def __str__(self):
return '{}'.format(self.username)
@staticmethod
def check_user_chatid(chatid):
if (User_Data.objects.filter(chatid=chatid)):
return True
else:
return False
@staticmethod
def register_user(chatid, rfid):
if(User_Data.check_user_chatid(chatid)):
return False
else:
create_user = User_Data.objects.create(
tagRfid = rfid,
chatid = chatid,
)
return True
@staticmethod
def check_user_balance(rfid,barcode):
user = User_Data.objects.get(tagRfid=rfid)
item = Item.objects.get(barcode=barcode)
if(user.balance_actual >= item.price_sale):
user.balance_actual -= item.price_sale
user.save()
item.stock = (item.stock)-1
item.save()
Sale_History.create_sale(item,user)
return True
else:
return False
class Balance(models.Model):
CASH = 'cash'
PAYPAL = 'PayPal'
TYPE_PAYMENT = (
(CASH, 'CASH'),
(PAYPAL, 'PAYPAL'),
)
user = models.ForeignKey(User_Data, blank=False)
amount_entered = models.DecimalField(max_digits=5, decimal_places=2, default=0, blank=False)
type_amount = models.CharField(max_length=6,
choices=TYPE_PAYMENT,
default=CASH,
blank=False,
)
date = models.DateTimeField( default=datetime.datetime.now() )
def __str__(self):
return '{}: +{}'.format(self.user, self.amount_entered)
class Item(models.Model):
name = models.CharField(max_length=30, blank=False)
barcode = models.CharField(max_length=30, blank=False)
price_sale = models.DecimalField(max_digits=5, decimal_places=2, default=0)
stock = models.IntegerField(blank=False, default=0)
def __str__(self):
return '{}'.format(self.name)
class Shopping_History(models.Model):
MERCADONA = 'mercadona'
LIDL = 'lidl'
OTRO = 'otro'
TYPE_SUPERMARKET = (
(MERCADONA, 'MERCADONA'),
(LIDL, 'LIDL'),
(OTRO, 'OTRO'),
)
item = models.ForeignKey(Item, blank=False)
date = models.DateTimeField( default=datetime.datetime.now() )
units = models.IntegerField(default=0)
unit_purchase_price = models.DecimalField(max_digits=5, decimal_places=2, default=0)
supermarket = models.CharField(max_length=9,
choices=TYPE_SUPERMARKET,
default=OTRO,
blank=False,
)
def __str__(self):
return '{} - {}'.format(self.item, self.date)
class Sale_History(models.Model):
item = models.ForeignKey(Item, blank=False)
user = models.ForeignKey(User_Data, blank=False)
date = models.DateTimeField( default=datetime.datetime.now() )
price_sale = models.DecimalField(max_digits=5, decimal_places=2, default=0)
price_cost = models.DecimalField(max_digits=5, decimal_places=2, default=0)
def __str__(self):
return '{}: {} - {}'.format(self.item, self.user, self.price_sale)
@staticmethod
def create_sale(item,user):
bought_item = Shopping_History.objects.filter(item=item).last()
create_sale = Sale_History.objects.create(
item=item,
user=user,
price_sale=item.price_sale,
price_cost=bought_item.unit_purchase_price,
)
@receiver(post_save, sender=Shopping_History, dispatch_uid="create_stock_item")
def create_stock(sender, instance, **kwargs):
object_product = Item.objects.get(id=instance.item.id)
object_product.stock += instance.units
object_product.save()
@receiver(post_save, sender=Balance, dispatch_uid="add_payment_user")
def update_user_balance(sender, instance, **kwargs):
user = User_Data.objects.get(id=instance.user.id)
user.balance_actual += instance.amount_entered
user.save()
#import pdb; pdb.set_trace() | gpl-3.0 | 2,278,878,880,838,755,800 | 30.382353 | 96 | 0.553056 | false |
joequant/pyswagger | pyswagger/tests/v2_0/test_circular.py | 1 | 3163 | from pyswagger import SwaggerApp, utils, primitives, errs
from ..utils import get_test_data_folder
from ...scanner import CycleDetector
from ...scan import Scanner
import unittest
import os
import six
class CircularRefTestCase(unittest.TestCase):
""" test for circular reference guard """
def test_path_item_prepare_with_cycle(self):
app = SwaggerApp.load(get_test_data_folder(
version='2.0',
which=os.path.join('circular', 'path_item')
))
# should raise nothing
app.prepare()
def test_path_item(self):
folder = get_test_data_folder(
version='2.0',
which=os.path.join('circular', 'path_item')
)
def _pf(s):
return six.moves.urllib.parse.urlunparse((
'file',
'',
folder,
'',
'',
s))
app = SwaggerApp.create(folder)
s = Scanner(app)
c = CycleDetector()
s.scan(root=app.raw, route=[c])
self.assertEqual(sorted(c.cycles['path_item']), sorted([[
_pf('/paths/~1p1'),
_pf('/paths/~1p2'),
_pf('/paths/~1p3'),
_pf('/paths/~1p4'),
_pf('/paths/~1p1')
]]))
def test_schema(self):
folder = get_test_data_folder(
version='2.0',
which=os.path.join('circular', 'schema')
)
def _pf(s):
return six.moves.urllib.parse.urlunparse((
'file',
'',
folder,
'',
'',
s))
app = SwaggerApp.load(folder)
app.prepare(strict=False)
s = Scanner(app)
c = CycleDetector()
s.scan(root=app.raw, route=[c])
self.maxDiff = None
self.assertEqual(sorted(c.cycles['schema']), sorted([
[_pf('/definitions/s10'), _pf('/definitions/s11'), _pf('/definitions/s9'), _pf('/definitions/s10')],
[_pf('/definitions/s5'), _pf('/definitions/s5')],
[_pf('/definitions/s1'), _pf('/definitions/s2'), _pf('/definitions/s3'), _pf('/definitions/s4'), _pf('/definitions/s1')],
[_pf('/definitions/s12'), _pf('/definitions/s13'), _pf('/definitions/s12')],
[_pf('/definitions/s6'), _pf('/definitions/s7'), _pf('/definitions/s6')],
[_pf('/definitions/s14'), _pf('/definitions/s15'), _pf('/definitions/s14')]
]))
def test_deref(self):
app = SwaggerApp.create(get_test_data_folder(
version='2.0',
which=os.path.join('circular', 'schema'),
),
strict=False
)
s = app.resolve('#/definitions/s1')
self.assertRaises(errs.CycleDetectionError, utils.deref, s)
def test_primfactory(self):
app = SwaggerApp.create(get_test_data_folder(
version='2.0',
which=os.path.join('circular', 'schema'),
),
strict=False
)
s = app.resolve('#/definitions/s1')
self.assertRaises(errs.CycleDetectionError, app.prim_factory.produce, s, {})
| mit | 7,400,816,715,443,850,000 | 30.009804 | 133 | 0.510907 | false |
rocktavious/DevToolsLib | DTL/api/bases.py | 1 | 3894 | import inspect
#------------------------------------------------------------
#------------------------------------------------------------
class BaseStruct(object):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
self.deserialize(*args, **kwds)
#------------------------------------------------------------
def _get_repr_format(self):
return r'{0}({1})'.format(type(self).__name__, self._get_init_params_format())
#------------------------------------------------------------
def _get_init_params_format(self):
param_format = ''
params = inspect.getargspec(self.deserialize)[0]
params_count = len(params[1:])
for i in range(params_count):
param_format += '{0}='.format(params[i+1]) +'{'+str(i)+'}'
if i != params_count-1:
param_format += ', '
return param_format
#------------------------------------------------------------
def _get_repr(self):
try:
return self._get_repr_format().format(*self.serialize())
except:
return r'{0}({1})'.format(type(self).__name__, self.serialize())
#------------------------------------------------------------
def __str__(self):
return self._get_repr()
#------------------------------------------------------------
def __repr__(self):
return self._get_repr()
#------------------------------------------------------------
def __eq__(self, other):
if isinstance(other, type(self)) :
return self.serialize() == other.serialize()
else:
try:
coerced = self.__class__()
coerced.deserialize(other)
except:
return False
return self == coerced
#------------------------------------------------------------
def __ne__(self, other):
return not self.__eq__(other)
#------------------------------------------------------------
def add_quotes(self, data):
'''Convenience method to help in serialization of strings'''
return r"r'{0}'".format(data)
#------------------------------------------------------------
def serialize(self):
'''Returns the arg list in which deserialize can recreate this object'''
return (None,)
#------------------------------------------------------------
def deserialize(self, *args, **kwds):
'''If provided the info from serialize, this should should beable to construct the object
deserialize must provide all of the args for the spec because the format is pulled from this function'''
pass
#------------------------------------------------------------
#------------------------------------------------------------
class BaseDict(BaseStruct, dict):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
super(BaseDict, self).__init__(*args, **kwds)
#------------------------------------------------------------
def _set_data(self, datadict):
for key, value in datadict.items():
self.__setitem__(key, value)
#------------------------------------------------------------
def set_default(self, default={}):
'''Allows the user to specify default values that should appear in the data'''
for key, value in default.items():
if not self.has_key(key):
self.__setitem__(key, eval(value))
#------------------------------------------------------------
def serialize(self):
return (dict(self),)
#------------------------------------------------------------
def deserialize(self, datadict={}):
self._set_data(datadict=datadict)
| mit | -2,309,170,053,516,535,000 | 38.734694 | 112 | 0.362866 | false |
tensorflow/lingvo | lingvo/tasks/asr/decoder_utils_test.py | 1 | 7263 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for decoder utility functions."""
import lingvo.compat as tf
from lingvo.core import rnn_cell
from lingvo.core import symbolic
from lingvo.core import test_utils
from lingvo.tasks.asr import decoder
from lingvo.tasks.asr import decoder_utils
FLAGS = tf.flags.FLAGS
class DecoderUtilsSetRnnCellNodesTest(test_utils.TestCase):
def testSetRnnCellNodes(self):
decoder_p = decoder.AsrDecoder.Params()
base_rnn_p = rnn_cell.LSTMCellSimple.Params().Set(num_output_nodes=4)
# rnn_cell_dim > 0.
decoder_p.rnn_cell_dim = 8
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_output_nodes, decoder_p.rnn_cell_dim)
# rnn_cell_dim <= 0.
decoder_p.rnn_cell_dim = 0
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_output_nodes, base_rnn_p.num_output_nodes)
# rnn_cell_dim is a symbol.
decoder_p.rnn_cell_dim = symbolic.Symbol("rnn_cell_dim")
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertIs(rnn_p.num_output_nodes, decoder_p.rnn_cell_dim)
# rnn_cell_hidden_dim > 0.
decoder_p.rnn_cell_hidden_dim = 16
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_hidden_nodes, decoder_p.rnn_cell_hidden_dim)
# rnn_cell_hidden_dim <= 0.
decoder_p.rnn_cell_hidden_dim = 0
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertEqual(rnn_p.num_hidden_nodes, base_rnn_p.num_hidden_nodes)
# rnn_cell_hidden_dim is a symbol.
decoder_p.rnn_cell_hidden_dim = symbolic.Symbol("rnn_cell_hidden_dim")
rnn_p = base_rnn_p.Copy()
decoder_utils.SetRnnCellNodes(decoder_p, rnn_p)
self.assertIs(rnn_p.num_hidden_nodes, decoder_p.rnn_cell_hidden_dim)
class DecoderUtilsTokenizeTest(test_utils.TestCase):
def testTokenize(self):
s = "onetoken"
self.assertEqual(["onetoken"], decoder_utils.Tokenize(s))
s = "two tokens"
self.assertEqual(["two", "tokens"], decoder_utils.Tokenize(s))
s = " extra spaces are filtered "
self.assertEqual(["extra", "spaces", "are", "filtered"],
decoder_utils.Tokenize(s))
class DecoderUtilsComputeWerTest(test_utils.TestCase):
def testInvalidInputsExtraHyps(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(hyps=["one", "two"], refs=["one"]).eval()
def testInvalidInputsExtraRefs(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(hyps=["one"], refs=["one", "two"]).eval()
def testInvalidInputsWrongRank(self):
with self.session():
with self.assertRaises(Exception):
decoder_utils.ComputeWer(
hyps=[["one"], ["two"]], refs=[["one"], ["two"]]).eval()
def testBasic(self):
with self.session():
self.assertAllEqual(
decoder_utils.ComputeWer(hyps=["one"], refs=["one"]).eval(), [[0, 1]])
self.assertAllEqual(
decoder_utils.ComputeWer(hyps=["one two"], refs=["one two"]).eval(),
[[0, 2]])
def testMultiples(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["one", "two pigs"], refs=["one", "three pink pigs"])
self.assertAllEqual(wer.shape, [2, 2])
self.assertAllEqual(wer.eval(), [[0, 1], [2, 3]])
def testConsecutiveWhiteSpace(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["one two", "one two", "two pigs"],
refs=["one two", "one two ", "three pink pigs"])
self.assertAllEqual(wer.shape, [3, 2])
self.assertAllEqual(wer.eval(), [[0, 2], [0, 2], [2, 3]])
def testEmptyRefsAndHyps(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["", "one two", ""], refs=["", "", "three four five"])
self.assertAllEqual(wer.shape, [3, 2])
self.assertAllEqual(wer.eval(), [[0, 0], [2, 0], [3, 3]])
def testDifferencesInCaseAreCountedAsErrors(self):
with self.session():
wer = decoder_utils.ComputeWer(
hyps=["ONE two", "one two"], refs=["one two", "ONE two"])
self.assertAllEqual(wer.shape, [2, 2])
self.assertAllEqual(wer.eval(), [[1, 2], [1, 2]])
class DecoderUtilsFilterTest(test_utils.TestCase):
def testFilterEpsilon(self):
s = "no epsilon"
self.assertEqual(s, decoder_utils.FilterEpsilon(s))
s = "<epsilon>epsilon tokens are<epsilon>removed<epsilon>"
self.assertEqual("epsilon tokens are removed",
decoder_utils.FilterEpsilon(s))
def testFilterNoise(self):
s = "no noise"
self.assertEqual(s, decoder_utils.FilterNoise(s))
s = "<noise> noise tokens are <noise> removed <noise>"
self.assertEqual("noise tokens are removed", decoder_utils.FilterNoise(s))
class DecoderUtilsEditDistanceTest(test_utils.TestCase):
def testEditDistance1(self):
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b d e f g h"
self.assertEqual((0, 0, 1, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c i d e f g h"
self.assertEqual((1, 0, 0, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c i e f g h"
self.assertEqual((0, 1, 0, 1), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g j h"
hyp = "a b c i d e f g h"
self.assertEqual((1, 0, 1, 2), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g j h"
hyp = "a b c i e f g h k"
self.assertEqual((1, 1, 1, 3), decoder_utils.EditDistance(ref, hyp))
ref = ""
hyp = ""
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = ""
hyp = "a b c"
self.assertEqual((3, 0, 0, 3), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d"
hyp = ""
self.assertEqual((0, 0, 4, 4), decoder_utils.EditDistance(ref, hyp))
def testEditDistanceInIds(self):
ref = [0, 1, 2, 3, 9]
hyp = [0, 2, 3, 5, 6]
self.assertEqual((1, 1, 1, 3), decoder_utils.EditDistanceInIds(ref, hyp))
def testEditDistanceSkipsEmptyTokens(self):
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
ref = "a b c d e f g h"
hyp = "a b c d e f g h"
self.assertEqual((0, 0, 0, 0), decoder_utils.EditDistance(ref, hyp))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 9,188,687,105,552,122,000 | 33.259434 | 80 | 0.640644 | false |
TheCacophonyProject/Full_Noise | test/test_tracks.py | 1 | 4984 | class TestTracks:
def test_can_add_and_delete_tracks(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
user = helper.admin_user()
track0 = user.can_add_track_to_recording(recording)
user.can_see_track(track0)
track1 = user.can_add_track_to_recording(recording)
user.can_see_track(track1)
user.delete_track(track0)
user.cannot_see_track(track0)
user.can_see_track(track1)
def test_cant_add_track_to_other_users_recording(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
random_user = helper.given_new_user(self, "random")
random_user.cannot_add_track_to_recording(recording)
def test_cant_delete_other_users_track(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
owner = helper.admin_user()
track = owner.can_add_track_to_recording(recording)
random_user = helper.given_new_user(self, "random")
random_user.cannot_delete_track(track)
def test_track_tags(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
user = helper.admin_user()
track = user.can_add_track_to_recording(recording)
# Add a track tag and ensure the user can see it.
user.can_tag_track(track)
user.can_see_track(track)
# Add another track tag and ensure the user can see that too.
tag = user.can_tag_track(track)
user.can_see_track(track)
user.can_delete_track_tag(tag)
user.can_see_track(track)
def test_cant_add_track_tag_to_other_users_recording(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
owner = helper.admin_user()
track = owner.can_add_track_to_recording(recording)
random_user = helper.given_new_user(self, "random")
random_user.cannot_tag_track(track)
def test_cant_delete_other_users_tag_track(self, helper):
recording = helper.given_new_device(self, "tracks").has_recording()
owner = helper.admin_user()
track = owner.can_add_track_to_recording(recording)
tag = owner.can_tag_track(track)
random_user = helper.given_new_user(self, "random")
random_user.cannot_delete_track_tag(tag)
def test_replace_tags(self, helper):
admin_user = helper.admin_user()
sharer = helper.given_new_device(self, "Sharer")
sylvia = helper.given_new_user(self, "Sylvia")
admin_user.add_to_device(sylvia, sharer)
recording = sharer.has_recording()
track = admin_user.can_add_track_to_recording(recording)
admin_user.can_tag_track(track, automatic=True, what="Possum", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], [])
sylvia.can_tag_track(track, automatic=False, what="Cat", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Cat"])
sylvia.can_tag_track(track, automatic=False, what="Rodent", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Rodent"])
admin_user.can_tag_track(track, automatic=False, what="Cat", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Cat", "Rodent"])
admin_user.can_tag_track(track, automatic=False, what="Part", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Cat", "Rodent", "Part"])
admin_user.can_tag_track(track, automatic=False, what="Part", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Cat", "Rodent", "Part"])
sylvia.can_tag_track(track, automatic=False, what="Part", replace=True)
self.track_tag_is(sylvia, recording.id_, ["Possum"], ["Cat", "Rodent", "Part", "Part"])
sylvia.can_tag_track(track, automatic=False, what="Poor Tracking", replace=True)
self.track_tag_is(
sylvia, recording.id_, ["Possum"], ["Cat", "Rodent", "Part", "Part", "Poor Tracking"]
)
sylvia.can_tag_track(track, automatic=False, what="Mustelid", replace=True)
self.track_tag_is(
sylvia, recording.id_, ["Possum"], ["Cat", "Mustelid", "Part", "Part", "Poor Tracking"]
)
admin_user.can_tag_track(track, automatic=True, what="Rat", replace=True)
self.track_tag_is(
sylvia, recording.id_, ["Rat"], ["Cat", "Mustelid", "Part", "Part", "Poor Tracking"]
)
def track_tag_is(self, user, recording_id, ai_tag, manual_tags):
track = user.get_tracks(recording_id)[0]
track_tags = track.get("TrackTags", [])
rec_ai_tag = [tag["what"] for tag in track_tags if tag["automatic"]]
assert rec_ai_tag == ai_tag
rec_manual_tags = [tag["what"] for tag in track_tags if not tag["automatic"]]
for tag in rec_manual_tags:
assert tag in manual_tags
| agpl-3.0 | 231,439,124,357,377,300 | 41.598291 | 99 | 0.629013 | false |
aristanetworks/arista-ovs-nova | nova/utils.py | 1 | 38397 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
import os
import pyclbr
import random
import re
import shlex
import shutil
import signal
import socket
import struct
import sys
import tempfile
import time
import weakref
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
from eventlet import semaphore
import netaddr
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opt(
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'))
CONF.import_opt('glance_host', 'nova.config')
CONF.import_opt('glance_port', 'nova.config')
CONF.import_opt('glance_protocol', 'nova.config')
CONF.import_opt('instance_usage_audit_period', 'nova.config')
CONF.import_opt('monkey_patch', 'nova.config')
CONF.import_opt('rootwrap_config', 'nova.config')
CONF.import_opt('service_down_time', 'nova.config')
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
corresponding filter to etc/nova/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
exception.ProcessExecutionError unless
program exits with one of these code.
:param delay_on_retry: True | False. Defaults to True. If set to
True, wait a short amount of time
before retrying.
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is run with rootwrap.
:raises exception.NovaException: on receiving unknown arguments
:raises exception.ProcessExecutionError:
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
if len(kwargs):
raise exception.NovaException(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
cmd = ['sudo', 'nova-rootwrap', CONF.rootwrap_config] + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except exception.ProcessExecutionError, exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise exception.NovaException(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
msg = _('process_input not supported over SSH')
raise exception.NovaException(msg)
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
before: Give the audit period most recently completed before
<timestamp>. Defaults to now.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous."""
if not unit:
unit = CONF.instance_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
if before is not None:
rightnow = before
else:
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
msg = _('Link Local address is not found.:%s') % if_str
raise exception.NovaException(msg)
except Exception as ex:
msg = _("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals()
raise exception.NovaException(msg)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias.lower()] = canonical_email.lower()
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
msg = _('Invalid backend: %s') % backend_name
raise exception.NovaException(msg)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def to_bytes(text, default=0):
"""Try to turn a string into a number of bytes. Looks at the last
characters of the text to determine what conversion is needed to
turn the input text into a byte number.
Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end)
"""
# Take off everything not number 'like' (which should leave
# only the byte 'identifier' left)
mult_key_org = text.lstrip('-1234567890')
mult_key = mult_key_org.lower()
mult_key_len = len(mult_key)
if mult_key.endswith("b"):
mult_key = mult_key[0:-1]
try:
multiplier = BYTE_MULTIPLIERS[mult_key]
if mult_key_len:
# Empty cases shouldn't cause text[0:-0]
text = text[0:-mult_key_len]
return int(text) * multiplier
except KeyError:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
except ValueError:
return default
def delete_if_exists(pathname):
"""delete a file, but ignore file not found error"""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.NovaException('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.NovaException('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def diff_dict(orig, new):
"""
Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def parse_server_string(server_str):
"""
Parses the given server_string and returns a list of host and port.
If it's not a combination of host part and port, the port element
is a null string. If the input is invalid expression, return a null
list.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except Exception:
LOG.error(_('Invalid server_string: %s'), server_str)
return ('', '')
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true' or \
val.lower() == 'yes' or \
val.lower() == 'y'
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not. """
val = str(val).lower()
return val == 'true' or val == 'false' or \
val == 'yes' or val == 'no' or \
val == 'y' or val == 'n' or \
val == '1' or val == '0'
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def is_valid_cidr(address):
"""Check if the provided ipv4 or ipv6 address is a valid
CIDR address or not"""
try:
# Validate the correct CIDR Address
netaddr.IPNetwork(address)
except netaddr.core.AddrFormatError:
return False
except UnboundLocalError:
# NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
# https://github.com/drkjam/netaddr/issues/2)
return False
# Prior validation partially verify /xx part
# Verify it here
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def timefunc(func):
"""Decorator that logs how long a particular function took to execute"""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
try:
return func(*args, **kwargs)
finally:
total_time = time.time() - start_time
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
dict(name=func.__name__, total_time=total_time))
return inner
def generate_glance_url():
"""Generate the URL to glance."""
return "%s://%s:%d" % (CONF.glance_protocol, CONF.glance_host,
CONF.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def total_seconds(td):
"""Local total_seconds implementation for compatibility with python 2.6"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
td.microseconds) / 10.0 ** 6
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
for chunk in iter(lambda: file_like_object.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
setattr(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
del obj[attr]
else:
setattr(obj, attr, old_value)
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError, e:
LOG.error(_('Could not remove tmpdir: %s'), str(e))
def strcmp_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first"""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def mkfs(fs, path, label=None):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
#add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
execute(*args)
def last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaining byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError, e:
if e.errno == 22:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
| apache-2.0 | -4,965,048,132,937,103,000 | 31.104515 | 79 | 0.587963 | false |
larsbutler/swift | swift/obj/expirer.py | 1 | 12641 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import urllib
from random import random
from time import time
from os.path import join
from swift import gettext_ as _
import hashlib
from eventlet import sleep, Timeout
from eventlet.greenpool import GreenPool
from swift.common.daemon import Daemon
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, dump_recon_cache, split_path
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_PRECONDITION_FAILED
from swift.container.reconciler import direct_delete_container_entry
MAX_OBJECTS_TO_CACHE = 100000
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
conf_path, 'Swift Object Expirer', request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %(time)ds; '
'%(objects)d objects expired') % {
'time': elapsed, 'objects': self.report_objects})
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %(time)ds; '
'%(objects)d objects expired') % {
'time': elapsed, 'objects': self.report_objects})
self.report_last_time = time()
def iter_cont_objs_to_expire(self):
"""
Yields (container, obj) tuples to be deleted
"""
obj_cache = {}
cnt = 0
all_containers = set()
for c in self.swift.iter_containers(self.expiring_objects_account):
container = str(c['name'])
timestamp = int(container)
if timestamp > int(time()):
break
all_containers.add(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
try:
cust_account, cust_cont, cust_obj = \
split_path('/' + actual_obj, 3, 3, True)
cache_key = '%s/%s' % (cust_account, cust_cont)
except ValueError:
cache_key = None
if self.processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % self.processes != self.process:
continue
if cache_key not in obj_cache:
obj_cache[cache_key] = []
obj_cache[cache_key].append((container, obj))
cnt += 1
if cnt > MAX_OBJECTS_TO_CACHE:
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
cnt -= 1
else:
del obj_cache[key]
while obj_cache:
for key in obj_cache.keys():
if obj_cache[key]:
yield obj_cache[key].pop()
else:
del obj_cache[key]
for container in all_containers:
yield (container, None)
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = set([])
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; '
'%(containers)s possible containers; '
'%(objects)s possible objects') % {
'containers': containers, 'objects': objects})
for container, obj in self.iter_cont_objs_to_expire():
containers_to_delete.add(container)
if not obj:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(container)s '
'%(err)s') % {'container': container,
'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
def run_forever(self, *args, **kwargs):
"""
Executes passes forever, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
sleep(random() * self.interval)
while True:
begin = time()
try:
self.run_once(*args, **kwargs)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
elapsed = time() - begin
if elapsed < self.interval:
sleep(random() * (self.interval - elapsed))
def get_process_values(self, kwargs):
"""
Sets self.processes and self.process from the kwargs if those
values exist, otherwise, leaves those values as they were set in
the config file.
:param kwargs: Keyword args passed into the run_forever(), run_once()
methods. They have values specified on the command
line when the daemon is run.
"""
if kwargs.get('processes') is not None:
self.processes = int(kwargs['processes'])
if kwargs.get('process') is not None:
self.process = int(kwargs['process'])
if self.process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if self.processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if self.processes and self.process >= self.processes:
raise ValueError(
'process must be less than or equal to processes')
def delete_object(self, actual_obj, timestamp, container, obj):
start_time = time()
try:
try:
self.delete_actual_object(actual_obj, timestamp)
except UnexpectedResponse as err:
if err.resp.status_int not in {HTTP_NOT_FOUND,
HTTP_PRECONDITION_FAILED}:
raise
if float(timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(container, obj)
self.report_objects += 1
self.logger.increment('objects')
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
_('Exception while deleting object %(container)s %(obj)s'
' %(err)s') % {'container': container,
'obj': obj, 'err': str(err)})
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, container, obj):
"""
Issue a delete object request to the container for the expiring object
queue entry.
"""
direct_delete_container_entry(self.swift.container_ring,
self.expiring_objects_account,
container, obj)
def delete_actual_object(self, actual_obj, timestamp):
"""
Deletes the end-user object indicated by the actual object name given
'<account>/<container>/<object>' if and only if the X-Delete-At value
of the object is exactly the timestamp given.
:param actual_obj: The name of the end-user object to delete:
'<account>/<container>/<object>'
:param timestamp: The timestamp the X-Delete-At value must match to
perform the actual delete.
"""
path = '/v1/' + urllib.parse.quote(actual_obj.lstrip('/'))
self.swift.make_request('DELETE', path,
{'X-If-Delete-At': str(timestamp),
'X-Timestamp': str(timestamp)},
(2,))
| apache-2.0 | -1,666,015,285,613,331,700 | 40.445902 | 79 | 0.537616 | false |
Rctue/nao-lib | gestures/sta.py | 1 | 3412 | names = list()
times = list()
keys = list()
names.append("HeadYaw")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("HeadPitch")
times.append([ 2.00000])
keys.append([ [ -0.30000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LShoulderPitch")
times.append([ 2.00000])
keys.append([ [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LShoulderRoll")
times.append([ 2.00000])
keys.append([ [ 0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowYaw")
times.append([ 2.00000])
keys.append([ [ -1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LElbowRoll")
times.append([ 2.00000])
keys.append([ [ -1.04720, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LWristYaw")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHand")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderPitch")
times.append([ 2.00000])
keys.append([ [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RShoulderRoll")
times.append([ 2.00000])
keys.append([ [ -0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowYaw")
times.append([ 2.00000])
keys.append([ [ 1.39626, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RElbowRoll")
times.append([ 2.00000])
keys.append([ [ 1.04720, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RWristYaw")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RHand")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHipYawPitch")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHipRoll")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LHipPitch")
times.append([ 2.00000])
keys.append([ [ -0.43633, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LKneePitch")
times.append([ 2.00000])
keys.append([ [ 0.69813, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LAnklePitch")
times.append([ 2.00000])
keys.append([ [ -0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("LAnkleRoll")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RHipRoll")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RHipPitch")
times.append([ 2.00000])
keys.append([ [ -0.43633, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RKneePitch")
times.append([ 2.00000])
keys.append([ [ 0.69813, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RAnklePitch")
times.append([ 2.00000])
keys.append([ [ -0.34907, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
names.append("RAnkleRoll")
times.append([ 2.00000])
keys.append([ [ 0.00000, [ 3, -0.66667, 0.00000], [ 3, 0.00000, 0.00000]]])
try:
motion = ALProxy("ALMotion")
moveId = motion.post.angleInterpolationBezier(names, times, keys);
except BaseException, err:
pass
| gpl-2.0 | 7,349,790,136,719,622,000 | 30.018182 | 76 | 0.622509 | false |
krivenko/som | python/som.py | 1 | 1706 | ##############################################################################
#
# SOM: Stochastic Optimization Method for Analytic Continuation
#
# Copyright (C) 2016-2020 Igor Krivenko <[email protected]>
#
# SOM is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# SOM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# SOM. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Main module of SOM
"""
from core import SomCore
import numpy as np
class Som(SomCore):
"""Stochastic Optimization Method"""
def __init__(self, g, s = None, kind = "FermionGf", norms = np.array([])):
if s is None:
s = g.copy()
s.data[:,Ellipsis] = np.eye(s.target_shape[0])
if isinstance(norms,float) or isinstance(norms,int):
norms = norms * np.ones((g.target_shape[0],))
SomCore.__init__(self, g, s, kind, norms)
def count_good_solutions(hist, upper_lim = 1):
"""
Given a histogram of objective function values,
count the number of solutions with D/D_{min} <= 1 + upper_lim
"""
d_max = hist.limits[0] * (1 + upper_lim)
return int(sum(c for n, c in enumerate(hist.data) if hist.mesh_point(n) <= d_max))
| gpl-3.0 | -1,575,309,946,060,850,000 | 36.911111 | 86 | 0.611958 | false |
atumanov/ray | python/ray/exceptions.py | 1 | 3236 | import os
import colorama
try:
import setproctitle
except ImportError:
setproctitle = None
class RayError(Exception):
"""Super class of all ray exception types."""
pass
class RayTaskError(RayError):
"""Indicates that a task threw an exception during execution.
If a task throws an exception during execution, a RayTaskError is stored in
the object store for each of the task's outputs. When an object is
retrieved from the object store, the Python method that retrieved it checks
to see if the object is a RayTaskError and if it is then an exception is
thrown propagating the error message.
Attributes:
function_name (str): The name of the function that failed and produced
the RayTaskError.
traceback_str (str): The traceback from the exception.
"""
def __init__(self, function_name, traceback_str):
"""Initialize a RayTaskError."""
if setproctitle:
self.proctitle = setproctitle.getproctitle()
else:
self.proctitle = "ray_worker"
self.pid = os.getpid()
self.host = os.uname()[1]
self.function_name = function_name
self.traceback_str = traceback_str
assert traceback_str is not None
def __str__(self):
"""Format a RayTaskError as a string."""
lines = self.traceback_str.split("\n")
out = []
in_worker = False
for line in lines:
if line.startswith("Traceback "):
out.append("{}{}{} (pid={}, host={})".format(
colorama.Fore.CYAN, self.proctitle, colorama.Fore.RESET,
self.pid, self.host))
elif in_worker:
in_worker = False
elif "ray/worker.py" in line or "ray/function_manager.py" in line:
in_worker = True
else:
out.append(line)
return "\n".join(out)
class RayWorkerError(RayError):
"""Indicates that the worker died unexpectedly while executing a task."""
def __str__(self):
return "The worker died unexpectedly while executing this task."
class RayActorError(RayError):
"""Indicates that the actor died unexpectedly before finishing a task.
This exception could happen either because the actor process dies while
executing a task, or because a task is submitted to a dead actor.
"""
def __str__(self):
return "The actor died unexpectedly before finishing this task."
class UnreconstructableError(RayError):
"""Indicates that an object is lost and cannot be reconstructed.
Note, this exception only happens for actor objects. If actor's current
state is after object's creating task, the actor cannot re-run the task to
reconstruct the object.
Attributes:
object_id: ID of the object.
"""
def __init__(self, object_id):
self.object_id = object_id
def __str__(self):
return ("Object {} is lost (either evicted or explicitly deleted) and "
+ "cannot be reconstructed.").format(self.object_id.hex())
RAY_EXCEPTION_TYPES = [
RayError,
RayTaskError,
RayWorkerError,
RayActorError,
UnreconstructableError,
]
| apache-2.0 | -4,839,456,202,281,432,000 | 29.819048 | 79 | 0.635352 | false |
mysociety/barnetplanning | alerts/forms.py | 1 | 2895 | import urllib2
import simplejson
from django import forms
from django.contrib.localflavor.uk.forms import UKPostcodeField
from models import Alert
# Due to a bug in UKPostcodeField, can't override error message. This is
# fixed in: http://code.djangoproject.com/ticket/12017
# So remove this extra class when we have a recent enough Django.
class MyUKPostcodeField(UKPostcodeField):
default_error_messages = {
'invalid': 'We need your complete UK postcode.'
}
widget = forms.TextInput(attrs={'size':'8'})
class AlertForm(forms.ModelForm):
email = forms.EmailField(label='Your email address', error_messages={'required': 'Please enter your email address.'})
postcode = MyUKPostcodeField(required=False)
ward_mapit_id = forms.TypedChoiceField(required=False, coerce=int, initial=None)
def __init__(self, *args, **kwargs):
super(AlertForm, self).__init__(*args, **kwargs)
self.fields['radius'].label = 'If you chose a postcode, how far around your postcode would you like to receive alerts for?'
# Because radius is not compulsory on the model, choices has puts in a blank row for leaving
# it out. We don't want that, hence the [1:]
self.fields['radius'].widget = forms.RadioSelect(choices=self.fields['radius'].choices[1:])
# Make a dictionary of ward name to id
mapit_response = urllib2.urlopen("http://mapit.mysociety.org/area/2489/children.json")
mapit_data = simplejson.load(mapit_response)
ward_choices = [(int(value), mapit_data[value]['name']) for value in mapit_data]
ward_choices.sort(key=lambda x: x[1])
# FIXME - at some point in the future, should work out why None doesn't work here,
# and get rid of the clean_ward_mapit_id method.
ward_choices.insert(0, (-1, 'Select'))
self.fields['ward_mapit_id'].choices = ward_choices
self.fields['ward_mapit_id'].label = 'Ward'
def clean_ward_mapit_id(self):
"""We can't use None directly in the form, as it gets stringified into 'None'.
Instead, we use -1 as the signifier of nothing chosen, and turn it into None here."""
ward_id = self.cleaned_data['ward_mapit_id']
if ward_id == -1:
return None
else:
return ward_id
def clean(self):
cleaned_data = super(AlertForm, self).clean()
postcode = cleaned_data.get('postcode')
ward_mapit_id = cleaned_data.get('ward_mapit_id')
if postcode and ward_mapit_id:
raise forms.ValidationError('You cannot enter both a postcode and a ward.')
if not postcode and not ward_mapit_id:
raise forms.ValidationError('Please enter a postcode or a ward.')
return cleaned_data
class Meta:
model = Alert
fields = ('postcode', 'ward_mapit_id', 'email', 'radius')
| agpl-3.0 | -3,994,539,111,728,237,600 | 40.357143 | 131 | 0.656995 | false |
SHA2017-badge/micropython-esp32 | tests/basics/struct2.py | 1 | 1284 | # test ustruct with a count specified before the type
try:
import ustruct as struct
except:
try:
import struct
except ImportError:
print("SKIP")
raise SystemExit
print(struct.calcsize('0s'))
print(struct.unpack('0s', b''))
print(struct.pack('0s', b'123'))
print(struct.calcsize('2s'))
print(struct.unpack('2s', b'12'))
print(struct.pack('2s', b'123'))
print(struct.calcsize('2H'))
print(struct.unpack('<2H', b'1234'))
print(struct.pack('<2H', 258, 515))
print(struct.calcsize('0s1s0H2H'))
print(struct.unpack('<0s1s0H2H', b'01234'))
print(struct.pack('<0s1s0H2H', b'abc', b'abc', 258, 515))
# check that unknown types raise an exception
try:
struct.unpack('z', b'1')
except:
print('Exception')
try:
struct.pack('z', (b'1',))
except:
print('Exception')
try:
struct.calcsize('0z')
except:
print('Exception')
# check that a count without a type specifier raises an exception
try:
struct.calcsize('1')
except:
print('Exception')
try:
struct.pack('1')
except:
print('Exception')
try:
struct.pack_into('1', bytearray(4), 0, 'xx')
except:
print('Exception')
try:
struct.unpack('1', 'xx')
except:
print('Exception')
try:
struct.unpack_from('1', 'xx')
except:
print('Exception')
| mit | -4,925,401,632,700,900,000 | 17.608696 | 65 | 0.641745 | false |
jiaphuan/models | research/slim/nets/mobilenet/mobilenet.py | 1 | 16178 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import contextlib2
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary mapping function to default_dict
Yields:
context manager
"""
with contextlib2.ExitStack() as stack:
_ = [
stack.enter_context(slim.arg_scope(func, **default_arg))
for func, default_arg in defaults.items()
]
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier)
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode).
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
slim.arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability
bn_decay: decay for the batch norm moving averages.
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'is_training': is_training,
'decay': bn_decay,
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
slim.arg_scope([slim.batch_norm], **batch_norm_params), \
slim.arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| apache-2.0 | 7,967,390,960,214,579,000 | 36.710956 | 80 | 0.665657 | false |
Autodesk/molecular-design-toolkit | moldesign/interfaces/nbo_interface.py | 1 | 9611 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import moldesign as mdt
from .. import units as u
from .. import utils
SIGMA_UTF = u"\u03C3"
PI_UTF = u"\u03C0"
def run_nbo(mol, requests=('nlmo', 'nbo'),
image='nbo',
engine=None):
wfn = mol.wfn
inputs = {'in.47': make_nbo_input_file(mol, requests)}
command = 'gennbo.i4.exe in.47'
engine = utils.if_not_none(engine, mdt.compute.config.get_engine())
imagename = mdt.compute.get_image_path(image)
job = engine.launch(imagename,
command,
inputs=inputs,
name="nbo, %s" % mol.name)
mdt.helpers.display_log(job.get_display_object(), "nbo, %s"%mol.name)
job.wait()
parsed_data = parse_nbo(job.get_output('FILE.10'),
len(mol.wfn.aobasis))
for orbtype, data in parsed_data.items():
if orbtype[0] == 'P': # copy data from the orthogonal orbitals
orthdata = parsed_data[orbtype[1:]]
for key in 'bond_names iatom jatom stars bondnums num_bonded_atoms'.split():
data[key] = orthdata[key]
data.occupations = [None for orb in data.coeffs]
add_orbitals(mol, wfn, data, orbtype)
wfn._nbo_job = job
def add_orbitals(mol, wfn, orbdata, orbtype):
orbs = []
for i in range(len(orbdata.coeffs)):
bond = None
atoms = [mol.atoms[orbdata.iatom[i] - 1]]
if orbdata.bond_names[i] == 'RY':
bname = '%s Ryd*' % atoms[0].name
nbotype = 'rydberg'
utf_name = bname
elif orbdata.bond_names[i] == 'LP':
bname = '%s lone pair' % atoms[0].name
nbotype = 'lone pair'
utf_name = bname
elif orbdata.bond_names[i] == 'LV':
bname = '%s lone vacancy' % atoms[0].name
nbotype = 'lone vacancy'
utf_name = bname
elif orbdata.num_bonded_atoms[i] == 1:
bname = '%s Core' % atoms[0].name
nbotype = 'core'
utf_name = bname
else:
atoms.append(mol.atoms[orbdata.jatom[i] - 1])
bond = mdt.Bond(*atoms)
if orbdata.bondnums[i] == 1: # THIS IS NOT CORRECT
nbotype = 'sigma'
utf_type = SIGMA_UTF
else:
nbotype = 'pi'
utf_type = PI_UTF
bname = '%s%s (%s - %s)' % (nbotype, orbdata.stars[i],
atoms[0].name, atoms[1].name)
utf_name = '%s%s (%s - %s)' % (utf_type, orbdata.stars[i],
atoms[0].name, atoms[1].name)
name = '%s %s' % (bname, orbtype)
orbs.append(mdt.Orbital(orbdata.coeffs[i],
wfn=wfn, occupation=orbdata.occupations[i],
atoms=atoms, name=name,
nbotype=nbotype,
bond=bond,
unicode_name=utf_name,
_data=orbdata))
return wfn.add_orbitals(orbs, orbtype=orbtype)
def make_nbo_input_file(mol, requests):
"""
:param mol:
:type mol: moldesign.molecules.Molecule
:return:
"""
# Units: angstroms, hartrees
wfn = mol.wfn
orbs = wfn.molecular_orbitals
nbofile = []
# TODO: check for open shell wfn (OPEN keyword)
# TODO: check normalization, orthogonalization
nbofile.append(" $GENNBO BODM NATOMS=%d NBAS=%d $END" %
(mol.num_atoms, len(wfn.aobasis)))
commands = ['NBOSUM']
for r in requests:
commands.append('AO%s=W10' % r.upper())
if r[0] != 'P': commands.append('%s' % r.upper())
nbofile.append('$NBO %s $END' % (' '.join(commands)))
nbofile.append("$COORD\n %s" % mol.name)
for iatom, atom in enumerate(mol.atoms):
#TODO: deal with pseudopotential electrons
x, y, z = list(map(repr, atom.position.value_in(u.angstrom)))
nbofile.append("%d %d %s %s %s" % (atom.atnum, atom.atnum,
x, y, z))
nbofile.append("$END")
nbofile.append("$BASIS")
nbofile.append(' CENTER = ' +
' '.join(str(1+bfn.atom.index) for bfn in wfn.aobasis))
nbofile.append(" LABEL = " +
' '.join(str(AOLABELS[bfn.orbtype]) for bfn in wfn.aobasis))
nbofile.append('$END')
#TODO: deal with CI wavefunctions ($WF keyword)
nbofile.append('$OVERLAP')
append_matrix(nbofile, wfn.aobasis.overlaps)
nbofile.append('$END')
nbofile.append('$DENSITY')
append_matrix(nbofile, wfn.density_matrix)
nbofile.append('$END')
return '\n '.join(nbofile)
def parse_nbo(f, nbasis):
lines = f.__iter__()
parsed = {}
while True:
try:
l = next(lines)
except StopIteration:
break
fields = l.split()
if fields[1:5] == 'in the AO basis:'.split():
orbname = fields[0]
assert orbname[-1] == 's'
orbname = orbname[:-1]
next(lines)
if orbname[0] == 'P': # these are pre-orthogonal orbitals, it only prints the coefficients
coeffs = _parse_wrapped_matrix(lines, nbasis)
parsed[orbname] = utils.DotDict(coeffs=np.array(coeffs))
else: # there's more complete information available
parsed[orbname] = read_orbital_set(lines, nbasis)
return parsed
def read_orbital_set(lineiter, nbasis):
# First, get the actual matrix
mat = _parse_wrapped_matrix(lineiter, nbasis)
# First, occupation numbers
occupations = list(map(float,_get_wrapped_separated_vals(lineiter, nbasis)))
# Next, a line of things that always appear to be ones (for spin orbitals maybe?)
oneline = _get_wrapped_separated_vals(lineiter, nbasis)
for x in oneline: assert x == '1'
# next is number of atoms involved in the bond
num_bonded_atoms = list(map(int, _get_wrapped_separated_vals(lineiter, nbasis)))
bond_names = _get_wrapped_separated_vals(lineiter, nbasis)
# Next indicates whether real or virtual
stars = _get_wrapped_column_vals(lineiter, nbasis)
for s in stars: assert (s == '' or s == '*')
# number of bonds between this pair of atoms
bondnums = list(map(int, _get_wrapped_separated_vals(lineiter, nbasis)))
# first atom index (1-based)
iatom = list(map(int, _get_wrapped_separated_vals(lineiter, nbasis)))
jatom = list(map(int, _get_wrapped_separated_vals(lineiter, nbasis)))
# The rest appears to be 0 most of the time ...
return utils.DotDict(coeffs=np.array(mat),
iatom=iatom, jatom=jatom, bondnums=bondnums,
bond_names=bond_names,
num_bonded_atoms=num_bonded_atoms,
stars=stars, occupations=occupations)
def _parse_wrapped_matrix(lineiter, nbasis):
mat = []
for i in range(nbasis):
currline = list(map(float, _get_wrapped_separated_vals(lineiter, nbasis)))
assert len(currline) == nbasis
mat.append(currline)
return mat
def _get_wrapped_separated_vals(lineiter, nbasis):
vals = []
while True:
l = next(lineiter)
vals.extend(l.split())
if len(vals) == nbasis:
break
assert len(vals) < nbasis
return vals
def _get_wrapped_column_vals(lineiter, nbasis):
vals = []
while True:
l = next(lineiter.next)[1:]
lenl = len(l)
for i in range(20):
if lenl <= 3*i + 1: break
vals.append(l[3*i: 3*i + 3].strip())
if len(vals) == nbasis:
break
assert len(vals) < nbasis
return vals
def append_matrix(l, mat):
for row in mat:
icol = 0
while icol < len(row):
l.append(' ' + ' '.join(map(repr, row[icol:icol + 6])))
icol += 6
AOLABELS = {'s': 1, 'px': 101, 'py': 102, 'pz': 103,
"dxx": 201, "dxy": 202, "dxz": 203, "dyy": 204, "dyz": 205, "dzz": 206,
"fxxx": 301, "fxxy": 302, "fxxz": 303, "fxyy": 304, "fxyz": 305,
"fxzz": 306, "fyyy": 307, "fyyz": 308, "fyzz": 309, "fzzz": 310,
"gxxxx": 401, "gxxxy": 402, "gxxxz": 403, "gxxyy": 404, "gxxyz": 405,
"gxxzz": 406, "gxyyy": 407, "gxyyz": 408, "gxyzz": 409, "gxzzz": 410,
"gyyyy": 411, "gyyyz": 412, "gyyzz": 413, "gyzzz": 414, "gzzzz": 415, # end of cartesian
# start of spherical:
'p(x)': 151, 'p(y)': 152, 'p(z)': 153,
"d(xy)": 251, "d(xz)": 252, "d(yz)": 253, "d(x2-y2)": 254, "d(z2)": 255,
"f(z(5z2-3r2))": 351, "f(x(5z2-r2))": 352, "f(y(5z2-r2))": 353, "f(z(x2-y2))": 354, "f(xyz)": 355,
"f(x(x2-3y2))": 356, "f(y(3x2-y2))": 357}
| apache-2.0 | 3,648,654,696,899,533,000 | 35.405303 | 110 | 0.553949 | false |
wufangjie/leetcode | 187. Repeated DNA Sequences.py | 1 | 1241 | class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
more_than_once, once = set(), set()
for i in range(len(s) - 9):
t = s[i:i+10]
if t not in more_than_once:
if t in once:
more_than_once.add(t)
else:
once.add(t)
return list(more_than_once)
# if overlap is not permit
# more_than_once, once = {}, {}
# for i in range(len(s) - 9):
# t = s[i:i+10]
# if t not in more_than_once:
# if t in once:
# more_than_once[t] = [once[t], i]
# else:
# once[t] = i
# else:
# more_than_once[t][1] = i
# return list(k for k, v in more_than_once.items() if v[1] - v[0] >= 10)
if __name__ == '__main__':
assert sorted(Solution().findRepeatedDnaSequences("AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT")) == ['AAAAACCCCC', 'CCCCCAAAAA']
assert Solution().findRepeatedDnaSequences('AAAAAAAAAAA') == ['AAAAAAAAAA']
# assert Solution().findRepeatedDnaSequences('AAAAAAAAAAAAAAAAAAAA') == ['AAAAAAAAAA']
| gpl-3.0 | -132,350,777,096,766,640 | 36.606061 | 122 | 0.48751 | false |
ging/keystone | keystone/contrib/keystone_scim/routers.py | 1 | 11710 | #
# Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the SCIM API."""
from keystone.common import wsgi
import controllers
class ScimRouter(wsgi.ExtensionRouter):
PATH_PREFIX = '/OS-SCIM'
def add_routes(self, mapper):
user_controller = controllers.ScimUserV3Controller()
role_controller = controllers.ScimRoleV3Controller()
group_controller = controllers.ScimGroupV3Controller()
scim_info_controller = controllers.ScimInfoController()
org_controller = controllers.ScimOrganizationV3Controller()
# Users v1.1
mapper.connect(self.PATH_PREFIX + '/v1/Users',
controller=user_controller,
action='list_users',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Users',
controller=user_controller,
action='create_user',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v1/Users/{user_id}',
controller=user_controller,
action='get_user',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Users/{user_id}',
controller=user_controller,
action='patch_user',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v1/Users/{user_id}',
controller=user_controller,
action='put_user',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v1/Users/{user_id}',
controller=user_controller,
action='delete_user',
conditions=dict(method=['DELETE']))
# Users /v2
mapper.connect(self.PATH_PREFIX + '/v2/Users',
controller=user_controller,
action='list_users',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Users',
controller=user_controller,
action='create_user',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v2/Users/{user_id}',
controller=user_controller,
action='get_user',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Users/{user_id}',
controller=user_controller,
action='patch_user',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v2/Users/{user_id}',
controller=user_controller,
action='put_user',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v2/Users/{user_id}',
controller=user_controller,
action='delete_user',
conditions=dict(method=['DELETE']))
# Roles v1.1
mapper.connect(self.PATH_PREFIX + '/v1/Roles',
controller=role_controller,
action='scim_list_roles',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Roles',
controller=role_controller,
action='scim_create_role',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v1/Roles/{role_id}',
controller=role_controller,
action='scim_get_role',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Roles/{role_id}',
controller=role_controller,
action='scim_patch_role',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v1/Roles/{role_id}',
controller=role_controller,
action='scim_put_role',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v1/Roles/{role_id}',
controller=role_controller,
action='scim_delete_role',
conditions=dict(method=['DELETE']))
# Roles /v2
mapper.connect(self.PATH_PREFIX + '/v2/Roles',
controller=role_controller,
action='scim_list_roles',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Roles',
controller=role_controller,
action='scim_create_role',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v2/Roles/{role_id}',
controller=role_controller,
action='scim_get_role',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Roles/{role_id}',
controller=role_controller,
action='scim_patch_role',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v2/Roles/{role_id}',
controller=role_controller,
action='scim_put_role',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v2/Roles/{role_id}',
controller=role_controller,
action='scim_delete_role',
conditions=dict(method=['DELETE']))
# Groups v1.1
mapper.connect(self.PATH_PREFIX + '/v1/Groups',
controller=group_controller,
action='list_groups',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Groups',
controller=group_controller,
action='create_group',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v1/Groups/{group_id}',
controller=group_controller,
action='get_group',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Groups/{group_id}',
controller=group_controller,
action='patch_group',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v1/Groups/{group_id}',
controller=group_controller,
action='put_group',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v1/Groups/{group_id}',
controller=group_controller,
action='delete_group',
conditions=dict(method=['DELETE']))
# Groups
mapper.connect(self.PATH_PREFIX + '/v2/Groups',
controller=group_controller,
action='list_groups',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Groups',
controller=group_controller,
action='create_group',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v2/Groups/{group_id}',
controller=group_controller,
action='get_group',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Groups/{group_id}',
controller=group_controller,
action='patch_group',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v2/Groups/{group_id}',
controller=group_controller,
action='put_group',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v2/Groups/{group_id}',
controller=group_controller,
action='delete_group',
conditions=dict(method=['DELETE']))
# SCIM Info
mapper.connect(self.PATH_PREFIX + '/v1/ServiceProviderConfigs',
controller=scim_info_controller,
action='scim_get_service_provider_configs',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v1/Schemas',
controller=scim_info_controller,
action='scim_get_schemas',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/ServiceProviderConfigs',
controller=scim_info_controller,
action='scim_get_service_provider_configs',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Schemas',
controller=scim_info_controller,
action='scim_get_schemas',
conditions=dict(method=['GET']))
# Organizations
mapper.connect(self.PATH_PREFIX + '/v2/Organizations',
controller=org_controller,
action='list_organizations',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Organizations',
controller=org_controller,
action='create_organization',
conditions=dict(method=['POST']))
mapper.connect(self.PATH_PREFIX + '/v2/Organizations/{organization_id}',
controller=org_controller,
action='get_organization',
conditions=dict(method=['GET']))
mapper.connect(self.PATH_PREFIX + '/v2/Organizations/{organization_id}',
controller=org_controller,
action='patch_organization',
conditions=dict(method=['PATCH']))
mapper.connect(self.PATH_PREFIX + '/v2/Organizations/{organization_id}',
controller=org_controller,
action='put_organization',
conditions=dict(method=['PUT']))
mapper.connect(self.PATH_PREFIX + '/v2/Organizations/{organization_id}',
controller=org_controller,
action='delete_organization',
conditions=dict(method=['DELETE']))
| apache-2.0 | 2,083,376,882,307,337,700 | 40.378092 | 80 | 0.514944 | false |
openid/python-openid | openid/yadis/manager.py | 1 | 6142 | from __future__ import unicode_literals
class YadisServiceManager(object):
"""Holds the state of a list of selected Yadis services, managing
storing it in a session and iterating over the services in order."""
def __init__(self, starting_url, yadis_url, services, session_key):
# The URL that was used to initiate the Yadis protocol
self.starting_url = starting_url
# The URL after following redirects (the identifier)
self.yadis_url = yadis_url
# List of service elements
self.services = list(services)
self.session_key = session_key
# Reference to the current service object
self._current = None
def __len__(self):
"""How many untried services remain?"""
return len(self.services)
def __iter__(self):
return self
def next(self):
"""Return the next service
self.current() will continue to return that service until the
next call to this method."""
try:
self._current = self.services.pop(0)
except IndexError:
raise StopIteration
else:
return self._current
def current(self):
"""Return the current service.
Returns None if there are no services left.
"""
return self._current
def forURL(self, url):
return url in [self.starting_url, self.yadis_url]
def started(self):
"""Has the first service been returned?"""
return self._current is not None
def store(self, session):
"""Store this object in the session, by its session key."""
session[self.session_key] = self
class Discovery(object):
"""State management for discovery.
High-level usage pattern is to call .getNextService(discover) in
order to find the next available service for this user for this
session. Once a request completes, call .finish() to clean up the
session state.
@ivar session: a dict-like object that stores state unique to the
requesting user-agent. This object must be able to store
serializable objects.
@ivar url: the URL that is used to make the discovery request
@ivar session_key_suffix: The suffix that will be used to identify
this object in the session object.
"""
DEFAULT_SUFFIX = 'auth'
PREFIX = '_yadis_services_'
def __init__(self, session, url, session_key_suffix=None):
"""Initialize a discovery object"""
self.session = session
self.url = url
if session_key_suffix is None:
session_key_suffix = self.DEFAULT_SUFFIX
self.session_key_suffix = session_key_suffix
def getNextService(self, discover):
"""Return the next authentication service for the pair of
user_input and session. This function handles fallback.
@param discover: a callable that takes a URL and returns a
list of services
@type discover: six.text_type -> [service]
@return: the next available service
"""
manager = self.getManager()
if manager is not None and not manager:
self.destroyManager()
if not manager:
yadis_url, services = discover(self.url)
manager = self.createManager(services, yadis_url)
if manager:
service = manager.next()
manager.store(self.session)
else:
service = None
return service
def cleanup(self, force=False):
"""Clean up Yadis-related services in the session and return
the most-recently-attempted service from the manager, if one
exists.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
@return: current service endpoint object or None if there is
no current service
"""
manager = self.getManager(force=force)
if manager is not None:
service = manager.current()
self.destroyManager(force=force)
else:
service = None
return service
# Lower-level methods
def getSessionKey(self):
"""Get the session key for this starting URL and suffix
@return: The session key
@rtype: six.text_type
"""
return self.PREFIX + self.session_key_suffix
def getManager(self, force=False):
"""Extract the YadisServiceManager for this object's URL and
suffix from the session.
@param force: True if the manager should be returned
regardless of whether it's a manager for self.url.
@return: The current YadisServiceManager, if it's for this
URL, or else None
"""
manager = self.session.get(self.getSessionKey())
if (manager is not None and (manager.forURL(self.url) or force)):
return manager
else:
return None
def createManager(self, services, yadis_url=None):
"""Create a new YadisService Manager for this starting URL and
suffix, and store it in the session.
@raises KeyError: When I already have a manager.
@return: A new YadisServiceManager or None
"""
key = self.getSessionKey()
if self.getManager():
raise KeyError('There is already a %r manager for %r' %
(key, self.url))
if not services:
return None
manager = YadisServiceManager(self.url, yadis_url, services, key)
manager.store(self.session)
return manager
def destroyManager(self, force=False):
"""Delete any YadisServiceManager with this starting URL and
suffix from the session.
If there is no service manager or the service manager is for a
different URL, it silently does nothing.
@param force: True if the manager should be deleted regardless
of whether it's a manager for self.url.
"""
if self.getManager(force=force) is not None:
key = self.getSessionKey()
del self.session[key]
| apache-2.0 | 8,008,034,754,780,183,000 | 30.020202 | 73 | 0.620156 | false |
mojodna/debian-mapnik | plugins/input/osm/build.py | 1 | 1631 | #
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2007 Artem Pavlenko, Jean-Francois Doyon
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# $Id$
Import ('env')
prefix = env['PREFIX']
plugin_env = env.Clone()
osm_src = Split(
"""
osmparser.cpp
osm.cpp
osm_datasource.cpp
osm_featureset.cpp
dataset_deliverer.cpp
basiccurl.cpp
"""
)
libraries = [ 'xml2' ]
libraries.append('curl')
libraries.append('mapnik')
libraries.append(env['ICU_LIB_NAME'])
input_plugin = plugin_env.SharedLibrary('../osm', source=osm_src, SHLIBPREFIX='', SHLIBSUFFIX='.input', LIBS=libraries, LINKFLAGS=env['CUSTOM_LDFLAGS'])
# if the plugin links to libmapnik ensure it is built first
Depends(input_plugin, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], input_plugin)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
| lgpl-2.1 | 7,205,425,582,168,794,000 | 30.980392 | 152 | 0.72103 | false |
viniciuschiele/central | central/config/dynamodb.py | 1 | 4852 | """
DynamoDB config implementation.
"""
from collections import Mapping
from ..compat import string_types
from ..exceptions import LibraryRequiredError
from ..structures import IgnoreCaseDict
from ..utils import make_ignore_case
from .core import BaseDataConfig
try:
import boto3
from boto3.dynamodb.conditions import Key
except:
boto3 = None
__all__ = [
'DynamoDBConfig',
]
class DynamoDBConfig(BaseDataConfig):
"""
A DynamoDB configuration based on `BaseDataConfig`.
The library boto3 must be installed.
Example usage:
.. code-block:: python
import boto3
from central.config.dynamodb import DynamoDBConfig
dynamodb = boto3.resource('dynamodb')
config = DynamoDBConfig(dynamodb, 'configurations')
config.load()
value = config.get('key')
:param client: The boto S3 resource.
:param str table_name: The DynamoDB table name.
:param str context_attribute: The attribute containing the context.
this is the primary key attribute when you are using primary key and sort key.
:param str context_value: The value to filter in the context attribute.
If None, no filter is applied.
:param str key_attribute: The attribute containing the keys.
:param str value_attribute: The attribute containing the values.
"""
def __init__(self, client, table_name,
context_attribute='context', context_value=None,
key_attribute='key', value_attribute='value'):
if boto3 is None:
raise LibraryRequiredError('boto3', 'https://pypi.python.org/pypi/boto3')
super(DynamoDBConfig, self).__init__()
if client is None:
raise TypeError('client cannot be None')
if not isinstance(table_name, string_types):
raise TypeError('table_name must be a str')
if not isinstance(context_attribute, string_types):
raise TypeError('context_attribute must be a str')
if context_value is not None and not isinstance(context_value, string_types):
raise TypeError('context_value must be a str')
if not isinstance(key_attribute, string_types):
raise TypeError('key_attribute must be a str')
if not isinstance(value_attribute, string_types):
raise TypeError('value_attribute must be a str')
self._client = client
self._table_name = table_name
self._context_attribute = context_attribute
self._context_value = context_value
self._key_attribute = key_attribute
self._value_attribute = value_attribute
@property
def table_name(self):
"""
Get the DynamoDB table name.
:return str: The table name.
"""
return self._table_name
@property
def context_attribute(self):
"""
Get the context attribute name.
This is the primary key attribute when you are using primary key and sort key.
:return str: The context attribute name.
"""
return self._context_attribute
@property
def context_value(self):
"""
Get the value to filter in the context attribute.
:return str: The value to filter in the context attribute.
"""
return self._context_value
@property
def key_attribute(self):
"""
Get the attribute containing the keys.
:return str: The attribute containing the keys.
"""
return self._key_attribute
@property
def value_attribute(self):
"""
Get the attribute containing the values.
:return str: The attribute containing the values.
"""
return self._value_attribute
def load(self):
"""
Load the configuration stored in the DynamoDB.
"""
data = IgnoreCaseDict()
table = self._client.Table(self._table_name)
last_evaluated_key = None
while True:
if last_evaluated_key is None:
kwargs = {}
else:
kwargs = {'ExclusiveStartKey': last_evaluated_key}
if self._context_value is None:
response = table.scan(**kwargs)
else:
key_exp = Key(self._context_attribute).eq(self._context_value)
response = table.query(KeyConditionExpression=key_exp, **kwargs)
for item in response['Items']:
key = item[self._key_attribute]
value = item[self._value_attribute]
if isinstance(value, Mapping):
value = make_ignore_case(value)
data[key] = value
if 'LastEvaluatedKey' not in response:
break
last_evaluated_key = response['LastEvaluatedKey']
self._data = data
| mit | -6,548,755,572,922,770,000 | 28.406061 | 86 | 0.612531 | false |
dudulianangang/vps | EneConsTest.py | 1 | 5969 | import sdf
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
plt.style.use('seaborn-white')
# plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = 'Tahoma'
# # plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 16
# plt.rcParams['axes.labelsize'] = 10
# plt.rcParams['axes.labelweight'] = 'bold'
# plt.rcParams['xtick.labelsize'] = 8
# plt.rcParams['ytick.labelsize'] = 8
# plt.rcParams['legend.fontsize'] = 10
# plt.rcParams['figure.titlesize'] = 12
# constants for normalization
n0 = 1.8e20
me = 9.1e-31
qe = 1.6e-19
ep = 8.9e-12
c = 3e8
wp = np.sqrt(n0*qe*qe/me/ep)
ld = c/wp
e0 = me*c*wp/qe
b0 = e0/c
tt = 1/wp
ts = 50*5
te = 1500
pct = 100
en0 = me*c**2
en1 = 0.5*ep*ld**2
# simulation domain
nx = 3500
ny = 3500
lx = 3500
ly = 3500
# figure domain (set by grid)
grid_min_x = 0
grid_max_x = nx
grid_min_y = 0
grid_max_y = ny
Gx = np.linspace(0,lx,nx)
Gy = np.linspace(0,ly,ny)
gx = Gx[grid_min_x:grid_max_x+1]
gy = Gy[grid_min_y:grid_max_y+1]
# figure parameters
# fs = 24
jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values
jet_vals = jetcmap(np.arange(9)) #extract those values as an array
jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value
newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals)
# define array
EneBmE = np.ones(7)
EneBmI = np.ones(7)
EneBgE = np.ones(7)
EneBgI = np.ones(7)
sex = np.ones(7)
sey = np.ones(7)
sez = np.ones(7)
sbx = np.ones(7)
sby = np.ones(7)
sbz = np.ones(7)
TpeC1 = np.ones(7)
TpeS1 = np.ones(7)
TfeC1 = np.ones(7)
TfeS1 = np.ones(7)
TpeC2 = np.ones(7)
TpeS2 = np.ones(7)
TfeC2 = np.ones(7)
TfeS2 = np.ones(7)
TeC1 = np.ones(7)
TeS1 = np.ones(7)
TeC2 = np.ones(7)
TeS2 = np.ones(7)
time = np.ones(7)
# plot function
file = '/Volumes/yaowp2016/'
folder = 'nj'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC1[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC1[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
folder = 'nj_non'
for i in range(7):
ii = i*5
time[i] = i*ts
fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data
GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data
GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data
GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data
WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data
WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data
WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data
WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data
EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct
EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct
EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct
EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct
fname = file+folder+'/'+str(ii).zfill(4)+'.sdf'
datafile = sdf.read(fname)
Ex = datafile.Electric_Field_Ex.data
Ey = datafile.Electric_Field_Ey.data
Ez = datafile.Electric_Field_Ez.data
Bx = datafile.Magnetic_Field_Bx.data*c
By = datafile.Magnetic_Field_By.data*c
Bz = datafile.Magnetic_Field_Bz.data*c
sex[i] = np.sum(Ex**2)*en1
sey[i] = np.sum(Ey**2)*en1
sez[i] = np.sum(Ez**2)*en1
sbx[i] = np.sum(Bx**2)*en1
sby[i] = np.sum(By**2)*en1
sbz[i] = np.sum(Bz**2)*en1
TpeC2[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i]
TfeC2[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i]
TfeS2[i] = datafile.Total_Field_Energy_in_Simulation__J_.data
TpeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data
TeC1 = TpeC1+TfeC1
TeS1 = TpeS1+TfeS1
TeC2 = TpeC2+TfeC2
TeS2 = TpeS2+TfeS2
np.save('tpec1.npy', TpeC1)
np.save('tpes1.npy', TpeS1)
np.save('tfec1.npy', TfeC1)
np.save('tfes1.npy', TfeS1)
np.save('tpec2.npy', TpeC2)
np.save('tpes2.npy', TpeS2)
np.save('tfec2.npy', TfeC2)
np.save('tfes2.npy', TfeS2)
np.save('tec1.npy', TeC1)
np.save('tes1.npy', TeS1)
np.save('tec2.npy', TeC2)
np.save('tes2.npy', TeS2)
# plt.figure(figsize=(8,5))
# ax = plt.subplot()
# ax.plot(time, TpeC1,'r-', lw=2, label='tbc-cal')
# ax.plot(time, TpeS1,'r--', lw=2, label='tbc-sys')
# ax.plot(time, TpeC2,'b-', lw=2, label='pbc-cal')
# ax.plot(time, TpeS2,'b--', lw=2, label='pbc-sys')
# plt.xlabel('time($\omega_{pe}^{-1}$)',fontsize=24)
# plt.ylabel('energy($J$)',fontsize=24)
# plt.legend(loc='best', numpoints=1, fancybox=True)
# plt.title('total system energy',fontsize=32,fontstyle='normal')
# plt.show()
# plt.savefig(file+folder+'/plots/'+'TotalEnergyComp.png',bbox_inches='tight') # n means normalized
# plt.close()
| apache-2.0 | -2,299,900,135,137,136,400 | 26.892523 | 100 | 0.675658 | false |
Adamssss/projectEuler | pb347.py | 1 | 1198 | import math
import time
t1 = time.time()
N = 10000000
prime = []
def primeSieve(n):
global prime
n = (n+1)//2
p = [True]*(n)
i = 1
prime.append(2)
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
p[i] = False
j = 2*i*i+2*i
while j < n:
p[j] = False
j += t
i += 1
return prime
primeSieve(N//2 + 100)
def S(n):
result = 0
for i in range(0,len(prime)-1):
if prime[i]*prime[i] > n:
break
for j in range(i+1,len(prime)):
if prime[i]*prime[j] > n:
break
result += M(prime[i],prime[j],n)
return result
def M(p,q,n):
if p*q > n:
return 0
m = p*q
r = m*Mh(p,q,n//m)
#print(p,q,n,r)
return r
def Mh(p,q,n):
if p > n and q > n:
return 1
t = 1
c = 0
while t <= n:
t= p*t
c += 1
t = t//p
c -= 1
m = t
while c > 0:
t = t//p
c -= 1
if t*q <= n:
t = t*q
if t > m:
m = t
return m
print(S(N))
print("time:",time.time()-t1)
| mit | 9,094,591,799,074,366,000 | 14.558442 | 44 | 0.3798 | false |
otfbot/otfbot | otfbot/lib/chatMod.py | 1 | 4027 | # This file is part of OtfBot.
#
# OtfBot is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OtfBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OtfBot; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# (c) 2005 - 2010 by Alexander Schier
# (c) 2006 - 2010 by Robert Weidlich
""" contains a abstract class for a Bot-module """
from pluginSupport import Plugin
class chatMod(Plugin):
"""
this class is mainly for documentation of the callbacks.
some functions are helper functions for common tasks, i.e.
kicked calls userLeft, so a plugin only implementing userLeft
will notice that a kicked user left the channel. a plugin implementing
kicked too, can handle it independent from userLeft, because kicked will
be overwritten
"""
def __init__(self, bot):
self.bot = bot
def auth(self, user):
"""check the authorisation of the user"""
pass
def joined(self, channel):
"""we have joined a channel"""
pass
def command(self, user, channel, command, options):
"""a command message received"""
pass
def query(self, user, channel, msg):
"""a private message received"""
pass
def msg(self, user, channel, msg):
"""message received"""
pass
def connectionMade(self):
"""made connection to server"""
pass
def connectionLost(self, reason):
"""lost connection to server"""
pass
def signedOn(self):
"""successfully signed on"""
pass
def left(self, channel):
"""we have left a channel"""
pass
def noticed(self, user, channel, msg):
"""we got a notice"""
pass
def action(self, user, channel, msg):
"""action (/me) received"""
pass
def modeChanged(self, user, channel, set, modes, args):
"""mode changed"""
pass
def kickedFrom(self, channel, kicker, message):
"""someone kicked the bot"""
self.left(channel)
def userKicked(self, kickee, channel, kicker, message):
"""someone kicked someone else"""
self.userLeft(kickee, channel)
def userJoined(self, user, channel):
"""a user joined the channel"""
pass
def userJoinedMask(self, user, channel):
pass
def userLeft(self, user, channel):
"""a user left the channel"""
pass
def userQuit(self, user, quitMessage):
"""a user disconnect from the network"""
pass
def yourHost(self, info):
"""info about your host"""
pass
def userRenamed(self, oldname, newname):
"""a user changed the nick"""
pass
def topicUpdated(self, user, channel, newTopic):
"""a user changed the topic of a channel"""
pass
def irc_unknown(self, prefix, command, params):
"""an IRC-Message, which is not handle by twisted was received"""
pass
def stop(self):
"""called, when the bot is stopped, or the module is reloaded"""
pass
def reload(self):
"""called to reload the settings of the module"""
pass
def start(self):
"""called to start the work of the module
put your initialization stuff in here insteadof __init__
"""
pass
def sendLine(self, line):
pass
def lineReceived(self, line):
pass
def ctcpQuery(self, user, channel, messages):
""" called for ctcp queries
"""
pass
| gpl-2.0 | 6,027,862,804,107,587,000 | 26.026846 | 76 | 0.619568 | false |
nikolas/raven-python | conftest.py | 1 | 1947 | from django.conf import settings
import os.path
import sys
collect_ignore = []
if sys.version_info[0] > 2:
collect_ignore.append("tests/contrib/flask")
if sys.version_info[1] == 2:
collect_ignore.append("tests/handlers/logbook")
try:
import gevent
except ImportError:
collect_ignore.append("tests/transport/gevent")
try:
import web
except ImportError:
collect_ignore.append("tests/contrib/webpy")
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.sites',
# Included to fix Disqus' test Django which solves IntegrityMessage case
'django.contrib.contenttypes',
'raven.contrib.django',
'tests.contrib.django',
]
use_djcelery = True
try:
import djcelery
INSTALLED_APPS.append('djcelery')
except ImportError:
use_djcelery = False
def pytest_configure(config):
where_am_i = os.path.dirname(os.path.abspath(__file__))
if not settings.configured:
settings.configure(
DATABASE_ENGINE='sqlite3',
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': ':memory:',
},
},
DATABASE_NAME=':memory:',
TEST_DATABASE_NAME=':memory:',
INSTALLED_APPS=INSTALLED_APPS,
ROOT_URLCONF='',
DEBUG=False,
SITE_ID=1,
BROKER_HOST="localhost",
BROKER_PORT=5672,
BROKER_USER="guest",
BROKER_PASSWORD="guest",
BROKER_VHOST="/",
SENTRY_ALLOW_ORIGIN='*',
CELERY_ALWAYS_EAGER=True,
TEMPLATE_DEBUG=True,
PROJECT_ROOT=where_am_i,
TEMPLATE_DIRS=[os.path.join(where_am_i, 'tests', 'contrib', 'django', 'templates')],
ALLOWED_HOSTS=['*'],
)
| bsd-3-clause | -4,972,678,971,291,255,000 | 24.96 | 96 | 0.57319 | false |
tisnik/fabric8-analytics-common | integration-tests/features/steps/jobs_api.py | 1 | 14506 | """Tests for jobs API endpoints."""
import os
import requests
import uuid
from behave import given, then, when
from src.attribute_checks import check_timestamp, check_job_token_attributes
from src.parsing import parse_token_clause
from src.authorization_tokens import jobs_api_authorization
@given('Jobs debug API is running')
def running_jobs_debug_api(context):
"""Wait for the job debug REST API to be available."""
if not context.is_jobs_debug_api_running(context):
context.wait_for_jobs_debug_api_service(context, 60)
@when('I access jobs API {url:S}')
def jobs_api_url(context, url):
"""Access the jobs service API using the HTTP GET method."""
context.response = requests.get(context.jobs_api_url + url)
@when('I access jobs API {url:S} with authorization token')
def jobs_api_url_with_authorization_token(context, url):
"""Access the jobs service API using the HTTP GET method."""
context.response = requests.get(context.jobs_api_url + url,
headers=jobs_api_authorization(context))
@when('I read list of jobs')
@when('I read list of jobs with type {type}')
@when('I read list of jobs {token} authorization token')
@when('I read list of jobs with type {type} {token} authorization token')
def list_of_jobs(context, type=None, token=None):
"""Read list of jobs via job API."""
endpoint = job_endpoint(context)
if type is not None:
endpoint += "?job_type=" + type
use_token = parse_token_clause(token)
if use_token:
context.response = requests.get(endpoint, headers=jobs_api_authorization(context))
else:
context.response = requests.get(endpoint)
def check_all_report_attributes(report):
"""Check all report attributes."""
attributes = ["analyses", "analyses_finished", "analyses_finished_unique",
"analyses_unfinished", "analyses_unique", "packages",
"packages_finished", "versions"]
for attribute in attributes:
assert attribute in report
assert int(report[attribute]) >= 0
@then('I should see proper analyses report')
def check_job_debug_analyses_report(context):
"""Check the analyses report returned by job API."""
json_data = context.response.json()
assert json_data is not None
assert "now" in json_data
check_timestamp(json_data["now"])
assert "report" in json_data
report = json_data["report"]
check_all_report_attributes(report)
def flow_sheduling_endpoint(context, state, job_id=None):
"""Return URL to flow-scheduling with the given state and job ID."""
if job_id:
return "{jobs_api_url}api/v1/jobs/flow-scheduling?state={state}&job_id={job_id}".\
format(jobs_api_url=context.jobs_api_url, state=state, job_id=job_id)
else:
return "{jobs_api_url}api/v1/jobs/flow-scheduling?state={state}".\
format(jobs_api_url=context.jobs_api_url, state=state)
def job_metadata_filename(metadata):
"""Construct relative filename to job metadata."""
return "data/{metadata}".format(metadata=metadata)
def job_endpoint(context, job_id=None):
"""Return URL for given job id that can be used to job state manipulation."""
url = "{jobs_api_url}api/v1/jobs".format(
jobs_api_url=context.jobs_api_url)
if job_id is not None:
url = "{url}/{job_id}".format(url=url, job_id=job_id)
return url
def send_json_file_to_job_api(context, endpoint, filename, use_token):
"""Send the given file to the selected job API endpoints.
If the use_token is set, send the 'auth-token' header with the token taken
from the context environment.
"""
if use_token:
headers = jobs_api_authorization(context)
context.response = context.send_json_file(endpoint, filename, headers)
else:
context.response = context.send_json_file(endpoint, filename)
@when("I post a job metadata {metadata} with state {state}")
@when("I post a job metadata {metadata} with state {state} {token} authorization token")
def perform_post_job(context, metadata, state, token="without"):
"""Perform API call to create a new job using the provided metadata.
The token parameter can be set to 'with', 'without', or 'using'.
"""
filename = job_metadata_filename(metadata)
endpoint = flow_sheduling_endpoint(context, state)
use_token = parse_token_clause(token)
send_json_file_to_job_api(context, endpoint, filename, use_token)
def get_unique_job_id(context, job_id):
"""Return unique job ID consisting of generated UUID and actual ID."""
if 'job_id_prefix' in context:
return "{uuid}_{job_id}".format(uuid=context.job_id_prefix, job_id=job_id)
else:
return job_id
@when("I post a job metadata {metadata} with job id {job_id} and state {state}")
@when("I post a job metadata {metadata} with job id {job_id} and state {state} {token} "
"authorization token")
def perform_post_job_with_state(context, metadata, job_id, state, token="without"):
"""Perform API call to create a new job.
The new job is created using the provided metadata and set a job
to given state. The token parameter can be set to 'with', 'without', or
'using'.
"""
filename = job_metadata_filename(metadata)
job_id = get_unique_job_id(context, job_id)
endpoint = flow_sheduling_endpoint(context, state, job_id)
use_token = parse_token_clause(token)
send_json_file_to_job_api(context, endpoint, filename, use_token)
@when("I delete job without id")
@when("I delete job without id {token} authorization token")
@when("I delete job with id {job_id}")
@when("I delete job with id {job_id} {token} authorization token")
def delete_job(context, job_id=None, token="without"):
"""Perform API call to delete a job with given ID."""
job_id = get_unique_job_id(context, job_id)
endpoint = job_endpoint(context, job_id)
use_token = parse_token_clause(token)
if use_token:
context.response = requests.delete(endpoint, headers=jobs_api_authorization(context))
else:
context.response = requests.delete(endpoint)
@when("I set status for job with id {job_id} to {status}")
@when("I set status for job with id {job_id} to {status} {token} authorization token")
def set_job_status(context, job_id, status, token="without"):
"""Perform API call to set job status."""
endpoint = job_endpoint(context, job_id)
url = "{endpoint}?state={status}".format(endpoint=endpoint, status=status)
use_token = parse_token_clause(token)
if use_token:
context.response = requests.put(url, headers=jobs_api_authorization(context))
else:
context.response = requests.put(url)
@when("I reset status for the job service")
@when("I set status for job service to {status}")
@when("I set status for job service to {status} {token} authorization token")
def set_job_service_status(context, status=None, token="without"):
"""Perform API call to set or reset job service status."""
url = "{jobs_api_url}api/v1/service/state".format(
jobs_api_url=context.jobs_api_url)
use_token = parse_token_clause(token)
if status is not None:
url = "{url}?state={status}".format(url=url, status=status)
if use_token:
context.response = requests.put(url, headers=jobs_api_authorization(context))
else:
context.response = requests.put(url)
@when("I clean all failed jobs")
@when("I clean all failed jobs {token} authorization token")
def clean_all_failed_jobs(context, token="without"):
"""Perform API call to clean up all failed jobs."""
url = "{url}api/v1/jobs/clean-failed".format(url=context.jobs_api_url)
use_token = parse_token_clause(token)
if use_token:
context.response = requests.delete(url, headers=jobs_api_authorization(context))
else:
context.response = requests.delete(url)
@when('I logout from the job service')
@when('I logout from the job service {token} authorization token')
def logout_from_the_jobs_service(context, token='without'):
"""Call API to logout from the job service."""
url = "{jobs_api_url}api/v1/logout".format(
jobs_api_url=context.jobs_api_url)
use_token = parse_token_clause(token)
if use_token:
headers = jobs_api_authorization(context)
context.response = requests.put(url, headers)
else:
context.response = requests.put(url)
@when('I access the job service endpoint to generate token')
def job_service_generate_token(context):
"""Generate token for the job service."""
url = "{jobs_api_url}api/v1/generate-token".format(
jobs_api_url=context.jobs_api_url)
context.response = requests.get(url)
@then('I should be redirected to {url}')
def check_redirection(context, url):
"""Check the response with redirection."""
assert context.response is not None
assert context.response.history is not None
assert context.response.url is not None
assert context.response.url.startswith(url)
@when("I ask for analyses report for ecosystem {ecosystem}")
@when("I ask for analyses report for ecosystem {ecosystem} {token} authorization token")
@when("I ask for analyses report for ecosystem {ecosystem} from date {from_date} {token} "
"authorization token")
@when("I ask for analyses report for ecosystem {ecosystem} to date {to_date} {token} "
"authorization token")
@when("I ask for analyses report for ecosystem {ecosystem} between dates {from_date} {to_date} "
"{token} authorization token")
def access_analyses_report(context, ecosystem, from_date=None, to_date=None, token="without"):
"""Perform API call to get analyses report for selected ecosystem."""
use_token = parse_token_clause(token)
url = "{url}api/v1/debug/analyses-report?ecosystem={ecosystem}".format(
url=context.jobs_api_url, ecosystem=ecosystem)
if from_date is not None:
url += "&from_date=" + from_date
if to_date is not None:
url += "&to_date=" + to_date
if use_token:
headers = jobs_api_authorization(context)
context.response = requests.get(url, headers=headers)
else:
context.response = requests.get(url)
def get_jobs_count(context):
"""Return job count read from the JSON response."""
jsondata = context.response.json()
jobs = jsondata['jobs']
assert jobs is not None
return jsondata['jobs_count']
@then('I should see {num:d} jobs')
def check_jobs_count(context, num):
"""Check the number of jobs."""
jobs_count = get_jobs_count(context)
assert jobs_count == num
@then('I should see N jobs')
def check_jobs(context):
"""Check and remember the number of jobs."""
jobs_count = get_jobs_count(context)
context.jobs_count = jobs_count
@then('I should see N+{num:d} jobs')
def check_jobs_count_plus_one(context, num):
"""Check the relative jobs count and remember the number of jobs."""
assert context.jobs_count is not None, \
"Please use 'I should see N jobs' test step first"
old_jobs_count = context.jobs_count
jobs_count = get_jobs_count(context)
expected = old_jobs_count + num
assert jobs_count == expected, "Expected %d jobs, but %d found instead" % \
(expected, jobs_count)
# remember the new number
context.jobs_count = jobs_count
def get_job_by_id(jobs, job_id):
"""Find the job by its ID."""
return next((job for job in jobs if job["job_id"] == job_id), None)
def check_job_state(job, state):
"""Check the state of given job."""
assert job is not None
assert job["state"] is not None
assert job["state"] == state
@then('I should find job with ID {job_id}')
@then('I should find job with ID {job_id} and state {state}')
def find_job(context, job_id, state=None):
"""Check the job ID existence.
Check if job with given ID is returned from the service and optionally if
the job status has expected value.
"""
jsondata = context.response.json()
jobs = jsondata['jobs']
job_id = get_unique_job_id(context, job_id)
job_ids = [job["job_id"] for job in jobs]
assert job_id in job_ids
if state is not None:
job = get_job_by_id(jobs, job_id)
check_job_state(job, state)
@then('I should not find job with ID {job_id}')
def should_not_find_job_by_id(context, job_id):
"""Check if job with given ID does not exist."""
jsondata = context.response.json()
jobs = jsondata['jobs']
job_id = get_unique_job_id(context, job_id)
job_ids = [job["job_id"] for job in jobs]
assert job_id not in job_ids
@when('I acquire job API authorization token')
def acquire_jobs_api_authorization_token(context):
"""Acquite the job API authorization token from the environment variable."""
context.jobs_api_token = os.environ.get("JOB_API_TOKEN")
# TODO: authorization via GitHub?
def check_token_attributes(token):
"""Check if given token has all required attributes."""
assert "token" in token
assert "rate" in token
assert "resources" in token
def check_token_name(token):
"""Check token name."""
resources = token["resources"]
token_names = ["core", "graphql", "search"]
for token_name in token_names:
assert token_name in resources
check_job_token_attributes(resources[token_name])
@then('I should see proper information about job API tokens')
def check_job_api_tokens_information(context):
"""Check the tokens information returned by job API."""
json_data = context.response.json()
assert json_data is not None
assert "tokens" in json_data
tokens = json_data["tokens"]
assert len(tokens) > 0
for token in tokens:
check_token_attributes(token)
rate_token = token["rate"]
check_job_token_attributes(rate_token)
check_token_name(token)
@when('I generate unique job ID prefix')
def generate_job_id_prefix(context):
"""Generate unique job ID prefix."""
context.job_id_prefix = uuid.uuid1()
@when("I perform kerberized {method} request to {url}")
def perform_kerberized_request(context, method, url):
"""Call REST API on coreapi-server."""
command = "curl -s -X {method} --negotiate -u : " + \
"http://coreapi-server:5000{url}".format(method=method, url=url)
context.kerb_request = \
context.exec_command_in_container(context.client, context.container,
command)
| apache-2.0 | -6,514,030,452,853,019,000 | 35.539043 | 96 | 0.674411 | false |
pleoni/game-of-life | plot/old/test_perf_mpi/life_perf_compilers.py | 1 | 1863 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import *
import sys
import datetime
datafile1="life_host_icc.out"
datafile2="life_host_gnu.out"
datafile3="life_host_pgi.out"
if len(sys.argv) > 1:
datafile=sys.argv[1]
plotfile="compilers_perf_eurora.png"
data1 = loadtxt(datafile1)
data2 = loadtxt(datafile2)
data3 = loadtxt(datafile3)
today = datetime.date.today()
fig = plt.figure() # apre una nuova figura
top = fig.add_subplot(211)
bottom = fig.add_subplot(212)
############# TOP
ICC_C1000 = data1[where((data1[:,0]==1) & (data1[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
ICC_C0 = data1[where((data1[:,0]==1) & (data1[:,5]==0) ),:][0] # mpi 1 - comp 0
GNU_C1000 = data2[where((data2[:,0]==1) & (data2[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
GNU_C0 = data2[where((data2[:,0]==1) & (data2[:,5]==0) ),:][0] # mpi 1 - comp 0
PGI_C1000 = data3[where((data3[:,0]==1) & (data3[:,5]==1000) ),:][0] # mpi 1 - Comp 1000
PGI_C0 = data3[where((data3[:,0]==1) & (data3[:,5]==0) ),:][0] # mpi 1 - comp 0
top.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=1000')
top.grid()
top.set_xlabel('Lattice Size')
top.set_ylabel('time')
#top.set_yscale('log')
#top.legend()
top.plot(ICC_C1000[:,3],ICC_C1000[:,8],'-xr',GNU_C1000[:,3],GNU_C1000[:,8],'-xg',PGI_C1000[:,3],PGI_C1000[:,8],'-xc');
top.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9})
############# BOTTOM
bottom.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=0')
bottom.grid()
bottom.set_xlabel('Lattice size')
bottom.set_ylabel('time')
bottom.plot(ICC_C0[:,3],ICC_C0[:,8],'-xr',GNU_C0[:,3],GNU_C0[:,8],'-xg',PGI_C0[:,3],PGI_C0[:,8],'-xc');
bottom.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9})
plt.subplots_adjust(hspace=0.5)
plt.savefig(plotfile)
#plt.show()
| gpl-2.0 | 7,832,986,248,728,866,000 | 27.661538 | 118 | 0.609769 | false |
IdeaSolutionsOnline/ERP4R | core/objs/sai_pesquisacont.py | 1 | 4546 | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = ['António Anacleto', 'Jair Medina']
__credits__ = []
__version__ = "1.0"
__maintainer__ = ['António Anacleto', 'Jair Medina']
__status__ = "Development"
__model_name__= 'sai_pesquisacont.SaiPesquisacont'
#import base_models#auth,
from orm import *
from form import *
class SaiPesquisacont (Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sai_pesquisacont'
self.__title__= 'Por Contribuinte'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__db_mode__ = 'None'
self.__workflow__ = (
'estado', {'Rascunho':['Confirmar'], 'Confirmado':['Imprimir','Exportar']}
)
self.__workflow_auth__ = {
'Confirmar':['Gestor'],
'Rascunho':['Gestor'],
'Exportar':['All'],
'Imprimir':['All'],
}
self.__no_edit__ = [
('estado', ['Confirmado','Impresso'])
]
self.__auth__ = {
'read':['All'],
'write':['Técnico DNRE','Gestor DNRE' ],
'create':['Administrador'],
'delete':['Administrador'],
'full_access':['Administrador']
}
self.nu_nif = integer_field(view_order=1, name='NIF', size=80)
self.cliente = boolean_field(view_order = 2, name = 'Cliente?', default = False)
self.fornecedor = boolean_field(view_order = 3, name = 'Fornecedor?', default = True)
self.estado = info_field(view_order=4, name ='Estado', default='Confirmado', hidden=True, nolabel=True,)
def prepare_data(self):
nu_nif = bottle.request.forms.get('nu_nif')
descricao = 'Aletras e Infrações de um Contribuinte'
cliente = bottle.request.forms.get('cliente')
record = {}
#print(nu_nif, cliente)
if cliente == 'False' :
sql="""select nu_nif, nm_contribuinte, dt_periodo, nu_nif_anexo, nm_contribuinte_anexo,nu_factura,dt_factura,vl_factura,vl_liquidado,validar_iva, nif_valido,declarado,info_valido from anexo_cli_out_13 where nu_nif= '{nif}' and validar_iva =1 or nu_nif='{nif}' and nif_valido = false or nu_nif='{nif}' and declarado = false or nu_nif='{nif}' and info_valido = false ORDER BY dt_periodo DESC""".format(nif=nu_nif)
data = run_sql(sql)
for i in data:
record['contribuinte']= i['nm_contribuinte']
break
record['sql2']=sql
record['nu_nif'] = nu_nif
record['lines'] = data
record['nome'] ='Cliente'
record['descricao'] = descricao
return record
else:
sql="""select nu_nif, nm_contribuinte, dt_periodo, nu_nif_anexo, nm_contribuinte_anexo,nu_factura,dt_factura,vl_factura,vl_dedutivel,validar_iva, nif_valido,declarado,info_valido from anexo_for_out_13 where nu_nif= '{nif}' and validar_iva =1 or nu_nif='{nif}' and nif_valido = false or nu_nif='{nif}' and declarado = false or nu_nif='{nif}' and info_valido = false ORDER BY dt_periodo DESC""".format(nif=nu_nif)
data = run_sql(sql)
for i in data:
record['contribuinte']= i['nm_contribuinte']
break
record['sql2']=sql
record['nu_nif'] = nu_nif
record['lines'] = data
record['nome'] ='Fornecedor'
record['descricao'] = descricao
return record
def Imprimir(self, key, window_id):
record = self.prepare_data()
if record['nome'] == 'Fornecedor':
template = 'sai_contribuintefor'
return Report(record=record, report_template=template).show()
else:
template = 'sai_contribuintecli'
return Report(record=record, report_template=template).show()
def Exportar(self, key, window_id):
x=self.prepare_data()
#record = get_records_to_print(key=key, model=self)
#print (record, key)
sql = x['sql2'] #record['sql']
print(sql, 'noooooooooo Exportar')
# variaveis = record['linha_sql_report']
# if variaveis:
# variaveis_dict = {}
# for variavel in variaveis:
# variaveis_dict[variavel['variavel']] = variavel['valor']
# sql = sql.format(**variaveis_dict)
result = run_sql(sql)
return data_to_csv(result, self, 'Gravar')
#253298121
| mit | -1,430,141,858,512,203,300 | 35.328 | 426 | 0.558027 | false |
macarthur-lab/xbrowse | breakpoint_search/urls.py | 1 | 1099 | from django.conf.urls import include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.conf import settings
import xbrowse_server.base.views.igv_views
import xbrowse_server.base.views.family_group_views
import xbrowse_server.base.views.reference_views
import xbrowse_server.phenotips.views
import xbrowse_server.gene_lists.urls
import xbrowse_server.staff.urls
import django.contrib.admindocs.urls
import django.views.static
import xbrowse_server.api.urls
from breakpoint_search.views import breakpoint_search, breakpoints,\
project_breakpoint
#import seqr.urls
admin.autodiscover()
urlpatterns = [
# Breakpoint search
url(r'^project/(?P<project_id>[\w.|-]+)/family/(?P<family_id>[\w.|-]+)/breakpoint-search', breakpoint_search, name='breakpoint_search'),
url(r'^project/(?P<project_id>[\w.|-]+)/family/(?P<family_id>[\w.|-]+)/breakpoints', breakpoints, name='breakpoints'),
url(r'^project/(?P<project_id>[\w.|-]+)/breakpoint/(?P<breakpoint_id>[\w.|-]+)', project_breakpoint, name='project_breakpoint'),
]
| agpl-3.0 | 1,139,833,521,849,080,300 | 38.25 | 140 | 0.753412 | false |
aberklotz/crimereport | parser/crimeparser/spiders/test_police_spider.py | 1 | 5741 | from unittest import TestCase
from scrapy.http import Request, HtmlResponse
from conftest import RESOURCE_DIR
from crimeparser.spiders.police_spider import PoliceSpider
class TestPoliceSpider(TestCase):
def test_parse_id(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
self.assertEqual("MI_2019_63764", report["id"])
def test_parse_number(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
self.assertEqual(206, report["number"])
def test_parse_year(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
self.assertEqual(2019, report["year"])
def test_parse_crimes_length(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crimes = report["crimes"]
self.assertEqual(10, len(crimes))
def test_parse_crimes_title(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
self.assertEqual("Wohnungseinbruch", crime["title"])
def test_parse_crimes_title2(self):
response = self.fake_response("sample2.html",
"https://www.polizei.sachsen.de/de/MI_2019_63837.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][1]
self.assertEqual("Infostand beschädigt – Mann leicht verletzt", crime["title"])
crime = report["crimes"][7]
self.assertEqual("Verkehrsunfall", crime["title"])
def test_parse_crimes_time(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
self.assertEqual("05.04.2019, 08.00 Uhr bis 12.30 Uhr", crime["time"])
def test_parse_crimes_place(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
self.assertEqual("Dresden-Strehlen, Otto-Dix-Ring", crime["place"])
def test_parse_crimes_content(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
content = crime["content"]
self.assertTrue(content.startswith("Unbekannte"))
self.assertTrue(content.endswith("beziffert."))
def test_parse_crimes_content_include_appeal_for_witnesses(self):
response = self.fake_response("sample2.html",
"https://www.polizei.sachsen.de/de/MI_2019_63837.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
content = crime["content"]
self.assertTrue(content.startswith("Gestern Nachmittag"))
self.assertTrue(content.endswith("entgegen. (ml)"))
def test_parse_crimes_content_exclude_appendix(self):
response = self.fake_response("sample2.html",
"https://www.polizei.sachsen.de/de/MI_2019_63837.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][7]
content = crime["content"]
self.assertTrue(content.startswith("Am Dienstagnachmittag kam"))
self.assertTrue(content.endswith("von rund 7.100 Euro. (lr)"))
def test_parse_crimes_region_dresden(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][0]
self.assertEqual("Landeshauptstadt Dresden", crime["region"])
def test_parse_crimes_region_meissen(self):
response = self.fake_response("sample.html",
"https://www.polizei.sachsen.de/de/MI_2019_63764.htm")
spider = PoliceSpider()
report = next(spider.parse_report(response))
crime = report["crimes"][7]
self.assertEqual("Landkreis Meißen", crime["region"])
def fake_response(self, file_name, url):
request = Request(url=url)
file_path = RESOURCE_DIR.joinpath(file_name)
file_content = open(file_path, 'r').read()
return HtmlResponse(url=url,
request=request,
body=file_content,
encoding="UTF-8")
| mit | -4,545,190,108,183,718,000 | 36.496732 | 92 | 0.587764 | false |
DistrictDataLabs/yellowbrick | yellowbrick/classifier/rocauc.py | 1 | 29053 | # yellowbrick.classifier.rocauc
# Implements visual ROC/AUC curves for classification evaluation.
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Author: Neal Humphrey
# Created: Tue May 03 18:15:42 2017 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: rocauc.py [5388065] [email protected] $
"""
Implements visual ROC/AUC curves for classification evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from sklearn.metrics import auc, roc_curve
from sklearn.preprocessing import label_binarize
from sklearn.utils.multiclass import type_of_target
from yellowbrick.exceptions import ModelError
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.classifier.base import ClassificationScoreVisualizer
# Dictionary keys for ROCAUC
MACRO = "macro"
MICRO = "micro"
# Target Type Constants
BINARY = "binary"
MULTICLASS = "multiclass"
##########################################################################
## ROCAUC Visualizer
##########################################################################
class ROCAUC(ClassificationScoreVisualizer):
"""
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
micro : bool, default: True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default: True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default: True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. For true
binary classifiers, setting per_class=False will plot the positive class
ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of
the negative class if only a decision_function method exists on the estimator.
binary : bool, default: False
This argument quickly resets the visualizer for true binary classification
by updating the micro, macro, and per_class arguments to False (do not use
in conjunction with those other arguments). Note that this is not a true
hyperparameter to the visualizer, it just collects other parameters into
a single, simpler argument.
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
if micro or macro is specified this returns an F1 score.
target_type_ : string
Specifies if the detected classification target was binary or multiclass.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso::
http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X_train, X_test, y_train, y_test = train_test_split(X, y)
>>> oz = ROCAUC(LogisticRegression())
>>> oz.fit(X_train, y_train)
>>> oz.score(X_test, y_test)
>>> oz.show()
"""
def __init__(
self,
estimator,
ax=None,
micro=True,
macro=True,
per_class=True,
binary=False,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
super(ROCAUC, self).__init__(
estimator,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Set the visual parameters for ROCAUC
# NOTE: the binary flag breaks our API since it's really just a meta parameter
# for micro, macro, and per_class. We knew this going in, but did it anyway.
self.binary = binary
if self.binary:
self.micro = False
self.macro = False
self.per_class = False
else:
self.micro = micro
self.macro = macro
self.per_class = per_class
def fit(self, X, y=None):
"""
Fit the classification model.
"""
# The target determines what kind of estimator is fit
ttype = type_of_target(y)
if ttype.startswith(MULTICLASS):
self.target_type_ = MULTICLASS
elif ttype.startswith(BINARY):
self.target_type_ = BINARY
else:
raise YellowbrickValueError(
(
"{} does not support target type '{}', "
"please provide a binary or multiclass single-output target"
).format(self.__class__.__name__, ttype)
)
# Fit the model and return self
return super(ROCAUC, self).fit(X, y)
def score(self, X, y=None):
"""
Generates the predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy unless micro or macro scores are requested.
"""
# Call super to check if fitted and to compute self.score_
# NOTE: this sets score to the base score if neither macro nor micro
super(ROCAUC, self).score(X, y)
# Compute the predictions for the test data
y_pred = self._get_y_scores(X)
if self.target_type_ == BINARY:
# For binary, per_class must be True to draw micro/macro curves
if (self.micro or self.macro) and not self.per_class:
raise ModelError(
"no curves will be drawn; ",
"set per_class=True or micro=False and macro=False.",
)
# For binary, if predictions are returned in shape (n,), micro and macro
# curves are not defined
if (self.micro or self.macro) and len(y_pred.shape) == 1:
raise ModelError(
"no curves will be drawn; set binary=True.",
)
if self.target_type_ == MULTICLASS:
# If it's multiclass classification, at least one of micro, macro, or
# per_class must be True
if not self.micro and not self.macro and not self.per_class:
raise YellowbrickValueError(
"no curves will be drawn; specify micro, macro, or per_class"
)
# Classes may be label encoded so only use what's in y to compute.
# The self.classes_ attribute will be used as names for labels.
classes = np.unique(y)
n_classes = len(classes)
# Store the false positive rate, true positive rate and curve info.
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
# If the decision is binary draw only ROC curve for the positive class
if self.target_type_ is BINARY and not self.per_class:
# In this case predict_proba returns an array of shape (n, 2) which
# specifies the probabilities of both the negative and positive classes.
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,), so plot it directly
self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred)
self.roc_auc[BINARY] = auc(self.fpr[BINARY], self.tpr[BINARY])
# Per-class binary decisions may have to have the negative class curve computed
elif self.target_type_ is BINARY and self.per_class:
# draw a curve for class 1 (the positive class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 1 to compute ROC
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred[:, 1])
else:
# decision_function returns array of shape (n,)
self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred)
self.roc_auc[1] = auc(self.fpr[1], self.tpr[1])
# draw a curve for class 0 (the negative class)
if len(y_pred.shape) == 2 and y_pred.shape[1] == 2:
# predict_proba returns array of shape (n, 2), so use
# probability of class 0 to compute ROC
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, y_pred[:, 0])
else:
# decision_function returns array of shape (n,).
# To draw a ROC curve for class 0 we swap the classes 0 and 1 in y
# and reverse classifiers predictions y_pred.
self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, -y_pred)
self.roc_auc[0] = auc(self.fpr[0], self.tpr[0])
else:
# Otherwise compute the ROC curve and ROC area for each class
for i, c in enumerate(classes):
self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:, i], pos_label=c)
self.roc_auc[i] = auc(self.fpr[i], self.tpr[i])
# Compute micro average
if self.micro:
self._score_micro_average(y, y_pred, classes, n_classes)
# Compute macro average
if self.macro:
self._score_macro_average(n_classes)
# Draw the Curves
self.draw()
# Set score to micro average if specified
if self.micro:
self.score_ = self.roc_auc[MICRO]
# Set score to macro average if not micro
if self.macro:
self.score_ = self.roc_auc[MACRO]
return self.score_
def draw(self):
"""
Renders ROC-AUC plot.
Called internally by score, possibly more than once
Returns
-------
ax : the axis with the plotted figure
"""
colors = self.class_colors_[0 : len(self.classes_)]
n_classes = len(colors)
# If it's a binary decision, plot the single ROC curve
if self.target_type_ == BINARY and not self.per_class:
self.ax.plot(
self.fpr[BINARY],
self.tpr[BINARY],
label="ROC for binary decision, AUC = {:0.2f}".format(
self.roc_auc[BINARY]
),
)
# If per-class plotting is requested, plot ROC curves for each class
if self.per_class:
for i, color in zip(range(n_classes), colors):
self.ax.plot(
self.fpr[i],
self.tpr[i],
color=color,
label="ROC of class {}, AUC = {:0.2f}".format(
self.classes_[i], self.roc_auc[i]
),
)
# If requested, plot the ROC curve for the micro average
if self.micro:
self.ax.plot(
self.fpr[MICRO],
self.tpr[MICRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="micro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["micro"]
),
)
# If requested, plot the ROC curve for the macro average
if self.macro:
self.ax.plot(
self.fpr[MACRO],
self.tpr[MACRO],
linestyle="--",
color=self.class_colors_[len(self.classes_) - 1],
label="macro-average ROC curve, AUC = {:0.2f}".format(
self.roc_auc["macro"]
),
)
# Plot the line of no discrimination to compare the curve to.
self.ax.plot([0, 1], [0, 1], linestyle=":", c=LINE_COLOR)
return self.ax
def finalize(self, **kwargs):
"""
Sets a title and axis labels of the figures and ensures the axis limits
are scaled between the valid ROCAUC score values.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title and add the legend
self.set_title("ROC Curves for {}".format(self.name))
self.ax.legend(loc="lower right", frameon=True)
# Set the limits for the ROC/AUC (always between 0 and 1)
self.ax.set_xlim([0.0, 1.0])
self.ax.set_ylim([0.0, 1.0])
# Set x and y axis labels
self.ax.set_ylabel("True Positive Rate")
self.ax.set_xlabel("False Positive Rate")
def _get_y_scores(self, X):
"""
The ``roc_curve`` metric requires target scores that can either be the
probability estimates of the positive class, confidence values or non-
thresholded measure of decisions (as returned by "decision_function").
This method computes the scores by resolving the estimator methods
that retreive these values.
.. todo:: implement confidence values metric.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features -- generally the test data
that is associated with y_true values.
"""
# The resolution order of scoring functions
attrs = ("predict_proba", "decision_function")
# Return the first resolved function
for attr in attrs:
try:
method = getattr(self.estimator, attr, None)
if method:
return method(X)
except AttributeError:
# Some Scikit-Learn estimators have both probability and
# decision functions but override __getattr__ and raise an
# AttributeError on access.
# Note that because of the ordering of our attrs above,
# estimators with both will *only* ever use probability.
continue
# If we've gotten this far, raise an error
raise ModelError(
"ROCAUC requires estimators with predict_proba or "
"decision_function methods."
)
def _score_micro_average(self, y, y_pred, classes, n_classes):
"""
Compute the micro average scores for the ROCAUC curves.
"""
# Convert y to binarized array for micro and macro scores
y = label_binarize(y, classes=classes)
if n_classes == 2:
y = np.hstack((1 - y, y))
# Compute micro-average
self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel())
self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO])
def _score_macro_average(self, n_classes):
"""
Compute the macro average scores for the ROCAUC curves.
"""
# Gather all FPRs
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
# Compute the averages per class
for i in range(n_classes):
avg_tpr += np.interp(all_fpr, self.fpr[i], self.tpr[i])
# Finalize the average
avg_tpr /= n_classes
# Store the macro averages
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
##########################################################################
## Quick method for ROCAUC
##########################################################################
def roc_auc(
estimator,
X_train,
y_train,
X_test=None,
y_test=None,
ax=None,
micro=True,
macro=True,
per_class=True,
binary=False,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
show=True,
**kwargs
):
"""ROCAUC
Receiver Operating Characteristic (ROC) curves are a measure of a
classifier's predictive quality that compares and visualizes the tradeoff
between the models' sensitivity and specificity. The ROC curve displays
the true positive rate on the Y axis and the false positive rate on the
X axis on both a global average and per-class basis. The ideal point is
therefore the top-left corner of the plot: false positives are zero and
true positives are one.
This leads to another metric, area under the curve (AUC), a computation
of the relationship between false positives and true positives. The higher
the AUC, the better the model generally is. However, it is also important
to inspect the "steepness" of the curve, as this describes the
maximization of the true positive rate while minimizing the false positive
rate. Generalizing "steepness" usually leads to discussions about
convexity, which we do not get into here.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
X_train : array-like, 2D
The table of instance data or independent variables that describe the outcome of
the dependent variable, y. Used to fit the visualizer and also to score the
visualizer if test splits are not specified.
y_train : array-like, 2D
The vector of target data or the dependent variable predicted by X. Used to fit
the visualizer and also to score the visualizer if test splits not specified.
X_test: array-like, 2D, default: None
The table of instance data or independent variables that describe the outcome of
the dependent variable, y. Used to score the visualizer if specified.
y_test: array-like, 1D, default: None
The vector of target data or the dependent variable predicted by X.
Used to score the visualizer if specified.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
test_size : float, default=0.2
The percentage of the data to reserve as test data.
random_state : int or None, default=None
The value to seed the random number generator for shuffling data.
micro : bool, default: True
Plot the micro-averages ROC curve, computed from the sum of all true
positives and false positives across all classes. Micro is not defined
for binary classification problems with estimators with only a
decision_function method.
macro : bool, default: True
Plot the macro-averages ROC curve, which simply takes the average of
curves across all classes. Macro is not defined for binary
classification problems with estimators with only a decision_function
method.
per_class : bool, default: True
Plot the ROC curves for each individual class. This should be set
to false if only the macro or micro average curves are required. For true
binary classifiers, setting per_class=False will plot the positive class
ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of
the negative class if only a decision_function method exists on the estimator.
binary : bool, default: False
This argument quickly resets the visualizer for true binary classification
by updating the micro, macro, and per_class arguments to False (do not use
in conjunction with those other arguments). Note that this is not a true
hyperparameter to the visualizer, it just collects other parameters into
a single, simpler argument.
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Notes
-----
ROC curves are typically used in binary classification, and in fact the
Scikit-Learn ``roc_curve`` metric is only able to perform metrics for
binary classifiers. As a result it is necessary to binarize the output or
to use one-vs-rest or one-vs-all strategies of classification. The
visualizer does its best to handle multiple situations, but exceptions can
arise from unexpected models or outputs.
Another important point is the relationship of class labels specified on
initialization to those drawn on the curves. The classes are not used to
constrain ordering or filter curves; the ROC computation happens on the
unique values specified in the target vector to the ``score`` method. To
ensure the best quality visualization, do not use a LabelEncoder for this
and do not pass in class labels.
.. seealso:: https://bit.ly/2IORWO2
.. todo:: Allow the class list to filter the curves on the visualization.
Examples
--------
>>> from yellowbrick.classifier import ROCAUC
>>> from sklearn.linear_model import LogisticRegression
>>> data = load_data("occupancy")
>>> features = ["temp", "relative humidity", "light", "C02", "humidity"]
>>> X = data[features].values
>>> y = data.occupancy.values
>>> roc_auc(LogisticRegression(), X, y)
Returns
-------
viz : ROCAUC
Returns the fitted, finalized visualizer object
"""
# Instantiate the visualizer
visualizer = ROCAUC(
estimator=estimator,
ax=ax,
micro=micro,
macro=macro,
per_class=per_class,
binary=binary,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train, **kwargs)
# Scores the visualizer with X_test and y_test if provided,
# X_train, y_train if not provided
if X_test is not None and y_test is not None:
visualizer.score(X_test, y_test)
else:
visualizer.score(X_train, y_train)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer
| apache-2.0 | 9,541,486,833,438,028 | 39.073103 | 88 | 0.620521 | false |
linkslice/ZenPacks.community.EMCIsilon | ZenPacks/community/EMCIsilon/modeler/plugins/community/snmp/EMCIsilonDiskPerfs.py | 1 | 1526 | from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetTableMap,
)
class EMCIsilonDiskPerfs(SnmpPlugin):
relname = 'emcisilon_diskperfs'
modname = 'ZenPacks.community.EMCIsilon.EMCIsilonDiskPerf'
snmpGetTableMaps = (
GetTableMap(
'diskPerfTable', '.1.3.6.1.4.1.12124.2.2.52.1', {
'.1': 'diskPerfBay',
'.2': 'diskPerfDeviceName',
'.3': 'diskperfOpsPerSecond',
'.4': 'diskperfInBitsPerSecond',
'.5': 'diskperfOutBitsPerSecond',
}
),
)
def process(self, device, results, log):
emcisilon_diskperfs = results[1].get('diskPerfTable', {})
rm = self.relMap()
for snmpindex, row in emcisilon_diskperfs.items():
name = row.get('diskPerfDeviceName')
if not name:
log.warn('Skipping empty disk perf stats')
continue
log.debug('found disk perf stats: %s at %s', name, snmpindex.strip('.'))
rm.append(self.objectMap({
'id': self.prepId(name),
'title': name,
'snmpindex': snmpindex.strip('.'),
'disk_perf_ops_per_second': row.get('diskperfOpsPerSecond'),
'disk_perf_in_bits_per_second': row.get('diskperfInBitsPerSecond'),
'disk_perf_out_bits_per_second': row.get('diskperfOutBitsPerSecond'),
}))
log.debug(rm)
return rm
| mit | 513,000,661,527,704,640 | 34.488372 | 85 | 0.542595 | false |
ellak-monades-aristeias/enhydris | enhydris/hcore/migrations/0006_offset_help.py | 1 | 2090 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hcore', '0005_remove_is_active'),
]
operations = [
migrations.AlterField(
model_name='timeseries',
name='timestamp_offset_minutes',
field=models.IntegerField(help_text='If unsure, set this to zero. It indicates the difference of what is shown from what is meant. For example, if for an hourly time series it is -5, then 2015-10-14 11:00 means the interval from 2015-10-14 09:55 to 2015-10-14 10:55. -1440 is common for daily time series.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_offset_months',
field=models.SmallIntegerField(help_text='If unsure, set this to 1 for monthly, 12 for annual, and zero otherwise. For a monthly time series, an offset of -475 minutes and 1 month means that 2003-11-01 00:00 (normally shown as 2003-11) denotes the interval 2003-10-31 18:05 to 2003-11-30 18:05.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_rounding_minutes',
field=models.PositiveIntegerField(help_text='For an hourly time series whose timestamps end in :00, set this to zero; if they end in :12, set it to 12. For a ten-minute time series with timestamps ending in :12, :22, :32, etc., set it to 2. For daily ending at 08:00, set it to 480. Leave empty if timestamps are irregular.', null=True, blank=True),
),
migrations.AlterField(
model_name='timeseries',
name='timestamp_rounding_months',
field=models.PositiveSmallIntegerField(help_text='Set this to zero, except for annual time series, indicating the difference from January; for example, set it to 9 if the timestamps use a hydrological year starting in October. Leave empty if timestamps are irregular.', null=True, blank=True),
),
]
| agpl-3.0 | -1,065,025,794,875,866,500 | 60.470588 | 362 | 0.673206 | false |
crackcell/www-entity-mining | software/game/17173.com/crawler.py | 1 | 1873 | #!/usr/bin/env python
# -*- encoding: utf-8; indent-tabs-mode: nil -*-
"""
crawler
~~~~~~~
desc
:copyright: (c) 2015 Menglong TAN.
"""
import os
import sys
import re
import urllib2
import time
import BeautifulSoup
import logging
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(fmt)
logger.addHandler(ch)
class Star(object):
def __init__(self):
self.name = ""
self.gender = ""
self.nation = ""
self.birth = ""
self.horoscope = ""
self.height = ""
def __repr__(self):
return "%s\t%s\t%s\t%s\t%s\t%s" % (self.name, self.gender, self.nation,
self.birth, self.horoscope,
self.height)
def extract_list(url):
headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5"}
req = urllib2.Request(url, headers=headers)
resp = None
try:
resp = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print "Error Code:", e.code
return
except urllib2.URLError, e:
print "Error Reason:", e.reason
return
soup = BeautifulSoup.BeautifulSoup(resp.read())
games = []
cnt = 0
for html in soup.findAll("a", attrs={"class":"link"}):
games.append(str(html.contents[0]))
cnt += 1
logger.info("progress: %d", cnt)
return games
if __name__ == "__main__":
list_url = "http://www.17173.com/zq/all.shtml"
f = open("games.dat", "w+")
logger.info("progress")
games = extract_list(list_url)
for game in games:
f.write(game + "\n")
f.flush()
f.close()
| bsd-3-clause | -3,796,389,506,530,348,000 | 23.973333 | 116 | 0.566471 | false |
Wonjuseo/Project101 | others/sine_RNN.py | 1 | 4425 | import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
def sin(x, T=100):
return np.sin(2.0*np.pi*x/T)
def problem(T=100,ampl=0.05):
x = np.arange(0,2*T+1)
noise = ampl*np.random.uniform(low=-1.0,high=1.0,size=len(x))
return sin(x) + noise
class EarlyStopping():
def __init__(self,patience=0,verbose=0):
self._step = 0
self._loss = float('inf')
self.patience = patience
self.verbose = verbose
def validate(self,loss):
if self._loss <loss:
self._step+=1
if self._step>self.patience:
if self.verbose:
print('early stopping')
return True
else:
self._step = 0
self._loss = loss
return False
def inference(x,n_batch,maxlen=None,n_hidden=None,n_out=None):
def weight_variable(shape):
initial = tf.truncated_normal(shape,stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.zeros(shape,dtype=tf.float32)
return tf.Variable(initial)
cell = tf.contrib.rnn.GRUCell(n_hidden)
initial_state = cell.zero_state(n_batch,tf.float32)
state = initial_state
outputs= []
with tf.variable_scope('RNN'):
for t in range(maxlen):
if t>0:
tf.get_variable_scope().reuse_variables()
(cell_output,state) = cell(x[:,t,:],state)
outputs.append(cell_output)
output = outputs[-1]
V = weight_variable([n_hidden,n_out])
c = bias_variable([n_out])
y = tf.matmul(output,V)+c
return y
def loss(y,t):
mse = tf.reduce_mean(tf.square(y-t))
return mse
def training(loss):
optimizer = tf.train.AdamOptimizer(learning_rate=0.001,beta1=0.9,beta2=0.999)
train_step = optimizer.minimize(loss)
return train_step
T=100
sine_data = problem(T)
length = 2*T
maxlen = 25
data = []
target = []
for i in range(0,length-maxlen+1):
data.append(sine_data[i:i+maxlen])
target.append(sine_data[i+maxlen])
X = np.array(data).reshape(len(data),maxlen,1) # 1 dimension
Y = np.array(target).reshape(len(data),1)
X = np.zeros((len(data),maxlen,1),dtype=float)
Y = np.zeros((len(data),1),dtype=float)
for i, seq in enumerate(data):
for t, value in enumerate(seq):
X[i,t,0] = value
Y[i,0] = target[i]
train_data = int(len(data)*0.9)
test_data = len(data)-train_data
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=test_data)
n_in = len(X[0][0])
n_hidden = 20
n_out = len(Y[0])
x = tf.placeholder(tf.float32,shape=[None,maxlen,n_in])
t = tf.placeholder(tf.float32,shape=[None,n_out])
n_batch = tf.placeholder(tf.int32)
y = inference(x,n_batch,maxlen=maxlen,n_hidden=n_hidden,n_out=n_out)
loss_fun = loss(y,t)
train_step = training(loss_fun)
epochs = 500
batch_size = 10
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
n_batches = train_data//batch_size
early_stopping = EarlyStopping(patience=10,verbose=1)
history = {'val_loss':[],'val_acc':[]}
for epoch in range(epochs):
X_, Y_ = shuffle(X_train,Y_train)
for i in range(n_batches):
start = i*batch_size
end = start + batch_size
sess.run(train_step,feed_dict={x:X_[start:end],t:Y_[start:end],n_batch:batch_size})
val_loss = loss_fun.eval(session=sess,feed_dict={x:X_test,t:Y_test,n_batch:test_data})
history['val_loss'].append(val_loss)
print('epochs:',epoch,'validation_loss:',val_loss)
#if early_stopping.validate(val_loss):
# break
truncate = maxlen
Z = X[:1]
original = [sine_data[i] for i in range(maxlen)]
predicted = [None for i in range(maxlen)]
for i in range(length-maxlen+1):
z_=Z[-1:]
y_=y.eval(session=sess,feed_dict={x:Z[-1:],n_batch:1})
sequence_ = np.concatenate((z_.reshape(maxlen,n_in)[1:],y_),axis=0).reshape(1,maxlen,n_in)
Z = np.append(Z,sequence_,axis=0)
predicted.append(y_.reshape(-1))
import matplotlib.pyplot as plt
plt.rc('font',family='serif')
plt.figure()
plt.plot(problem(T,ampl=0),linestyle='dotted',color='#aaaaaa')
plt.plot(original,linestyle='dashed',color='black')
plt.plot(predicted,color='black')
plt.show()
| apache-2.0 | -1,230,086,603,572,883,200 | 24.981707 | 94 | 0.608588 | false |
JungeAlexander/cocoscore | ci/bootstrap.py | 1 | 2124 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
if __name__ == "__main__":
base_path = dirname(dirname(abspath(__file__)))
print("Project path: {0}".format(base_path))
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
subprocess.check_call(["virtualenv", env_path])
except subprocess.CalledProcessError:
subprocess.check_call([sys.executable, "-m", "virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment...")
subprocess.check_call([join(bin_path, "pip"), "install", "jinja2"])
python_executable = join(bin_path, "python")
if not os.path.samefile(python_executable, sys.executable):
print("Re-executing with: {0}".format(python_executable))
os.execv(python_executable, [python_executable, __file__])
import jinja2
import subprocess
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [
line.strip()
# WARNING: 'tox' must be installed globally or in the project's virtualenv
for line in subprocess.check_output(['tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line not in ['clean', 'report', 'docs', 'check']]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
| mit | 5,956,504,451,291,047,000 | 35.62069 | 110 | 0.636064 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.