code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# Copyright (C) 2007 Alec Thomas <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Alec Thomas <[email protected]>
from fnmatch import fnmatch
from itertools import groupby
import os
from trac.core import *
from trac.config import Option
from trac.perm import PermissionSystem, IPermissionPolicy
ConfigObj = None
try:
from configobj import ConfigObj
except ImportError:
pass
class AuthzPolicy(Component):
"""Permission policy using an authz-like configuration file.
Refer to SVN documentation for syntax of the authz file. Groups are
supported.
As the fine-grained permissions brought by this permission policy are
often used in complement of the other pemission policies (like the
`DefaultPermissionPolicy`), there's no need to redefine all the
permissions here. Only additional rights or restrictions should be added.
=== Installation ===
Note that this plugin requires the `configobj` package:
http://www.voidspace.org.uk/python/configobj.html
You should be able to install it by doing a simple `easy_install configobj`
Enabling this policy requires listing it in `trac.ini:
{{{
[trac]
permission_policies = AuthzPolicy, DefaultPermissionPolicy
[authz_policy]
authz_file = conf/authzpolicy.conf
}}}
This means that the `AuthzPolicy` permissions will be checked first, and
only if no rule is found will the `DefaultPermissionPolicy` be used.
=== Configuration ===
The `authzpolicy.conf` file is a `.ini` style configuration file.
- Each section of the config is a glob pattern used to match against a
Trac resource descriptor. These descriptors are in the form:
{{{
<realm>:<id>@<version>[/<realm>:<id>@<version> ...]
}}}
Resources are ordered left to right, from parent to child. If any
component is inapplicable, `*` is substituted. If the version pattern is
not specified explicitely, all versions (`@*`) is added implicitly
Example: Match the WikiStart page
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart]
}}}
Example: Match the attachment `wiki:WikiStart@117/attachment/FOO.JPG@*`
on WikiStart
{{{
[wiki:*]
[wiki:WikiStart*]
[wiki:WikiStart@*]
[wiki:WikiStart@*/attachment/*]
[wiki:WikiStart@117/attachment/FOO.JPG]
}}}
- Sections are checked against the current Trac resource '''IN ORDER''' of
appearance in the configuration file. '''ORDER IS CRITICAL'''.
- Once a section matches, the current username is matched, '''IN ORDER''',
against the keys of the section. If a key is prefixed with a `@`, it is
treated as a group. If a key is prefixed with a `!`, the permission is
denied rather than granted. The username will match any of 'anonymous',
'authenticated', <username> or '*', using normal Trac permission rules.
Example configuration:
{{{
[groups]
administrators = athomas
[*/attachment:*]
* = WIKI_VIEW, TICKET_VIEW
[wiki:WikiStart@*]
@administrators = WIKI_ADMIN
anonymous = WIKI_VIEW
* = WIKI_VIEW
# Deny access to page templates
[wiki:PageTemplates/*]
* =
# Match everything else
[*]
@administrators = TRAC_ADMIN
anonymous = BROWSER_VIEW, CHANGESET_VIEW, FILE_VIEW, LOG_VIEW,
MILESTONE_VIEW, POLL_VIEW, REPORT_SQL_VIEW, REPORT_VIEW, ROADMAP_VIEW,
SEARCH_VIEW, TICKET_CREATE, TICKET_MODIFY, TICKET_VIEW, TIMELINE_VIEW,
WIKI_CREATE, WIKI_MODIFY, WIKI_VIEW
# Give authenticated users some extra permissions
authenticated = REPO_SEARCH, XML_RPC
}}}
"""
implements(IPermissionPolicy)
authz_file = Option('authz_policy', 'authz_file', None,
'Location of authz policy configuration file.')
authz = None
authz_mtime = None
# IPermissionPolicy methods
def check_permission(self, action, username, resource, perm):
if ConfigObj is None:
self.log.error('configobj package not found')
return None
if self.authz_file and not self.authz_mtime or \
os.path.getmtime(self.get_authz_file()) > self.authz_mtime:
self.parse_authz()
resource_key = self.normalise_resource(resource)
self.log.debug('Checking %s on %s', action, resource_key)
permissions = self.authz_permissions(resource_key, username)
if permissions is None:
return None # no match, can't decide
elif permissions == ['']:
return False # all actions are denied
# FIXME: expand all permissions once for all
ps = PermissionSystem(self.env)
for deny, perms in groupby(permissions,
key=lambda p: p.startswith('!')):
if deny and action in ps.expand_actions([p[1:] for p in perms]):
return False # action is explicitly denied
elif action in ps.expand_actions(perms):
return True # action is explicitly granted
return None # no match for action, can't decide
# Internal methods
def get_authz_file(self):
f = self.authz_file
return os.path.isabs(f) and f or os.path.join(self.env.path, f)
def parse_authz(self):
self.env.log.debug('Parsing authz security policy %s' %
self.get_authz_file())
self.authz = ConfigObj(self.get_authz_file())
self.groups_by_user = {}
for group, users in self.authz.get('groups', {}).iteritems():
if isinstance(users, basestring):
users = [users]
for user in users:
self.groups_by_user.setdefault(user, set()).add('@' + group)
self.authz_mtime = os.path.getmtime(self.get_authz_file())
def normalise_resource(self, resource):
def flatten(resource):
if not resource or not (resource.realm or resource.id):
return []
# XXX Due to the mixed functionality in resource we can end up with
# ticket, ticket:1, ticket:1@10. This code naively collapses all
# subsets of the parent resource into one. eg. ticket:1@10
parent = resource.parent
while parent and (resource.realm == parent.realm or \
(resource.realm == parent.realm and resource.id == parent.id)):
parent = parent.parent
if parent:
parent = flatten(parent)
else:
parent = []
return parent + ['%s:%s@%s' % (resource.realm or '*',
resource.id or '*',
resource.version or '*')]
return '/'.join(flatten(resource))
def authz_permissions(self, resource_key, username):
# TODO: Handle permission negation in sections. eg. "if in this
# ticket, remove TICKET_MODIFY"
valid_users = ['*', 'anonymous']
if username and username != 'anonymous':
valid_users = ['*', 'authenticated', username]
for resource_section in [a for a in self.authz.sections
if a != 'groups']:
resource_glob = resource_section
if '@' not in resource_glob:
resource_glob += '@*'
if fnmatch(resource_key, resource_glob):
section = self.authz[resource_section]
for who, permissions in section.iteritems():
if who in valid_users or \
who in self.groups_by_user.get(username, []):
self.env.log.debug('%s matched section %s for user %s'
% (resource_key, resource_glob, username))
if isinstance(permissions, basestring):
return [permissions]
else:
return permissions
return None
| dokipen/trac | tracopt/perm/authz_policy.py | Python | bsd-3-clause | 8,731 |
from __future__ import unicode_literals
from datetime import timedelta
import logging
import os
import re
import time
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
import requests
from mama_cas.compat import Session
from mama_cas.exceptions import InvalidProxyCallback
from mama_cas.exceptions import InvalidRequest
from mama_cas.exceptions import InvalidService
from mama_cas.exceptions import InvalidTicket
from mama_cas.exceptions import UnauthorizedServiceProxy
from mama_cas.exceptions import ValidationError
from mama_cas.request import SingleSignOutRequest
from mama_cas.services import get_logout_url
from mama_cas.services import logout_allowed
from mama_cas.services import service_allowed
from mama_cas.services import proxy_allowed
from mama_cas.services import proxy_callback_allowed
from mama_cas.utils import add_query_params
from mama_cas.utils import clean_service_url
from mama_cas.utils import is_scheme_https
from mama_cas.utils import match_service
logger = logging.getLogger(__name__)
class TicketManager(models.Manager):
def create_ticket(self, ticket=None, **kwargs):
"""
Create a new ``Ticket``. Additional arguments are passed to the
``create()`` function. Return the newly created ``Ticket``.
"""
if not ticket:
ticket = self.create_ticket_str()
if 'service' in kwargs:
kwargs['service'] = clean_service_url(kwargs['service'])
if 'expires' not in kwargs:
expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE)
kwargs['expires'] = expires
t = self.create(ticket=ticket, **kwargs)
logger.debug("Created %s %s" % (t.name, t.ticket))
return t
def create_ticket_str(self, prefix=None):
"""
Generate a sufficiently opaque ticket string to ensure the ticket is
not guessable. If a prefix is provided, prepend it to the string.
"""
if not prefix:
prefix = self.model.TICKET_PREFIX
return "%s-%d-%s" % (prefix, int(time.time()),
get_random_string(length=self.model.TICKET_RAND_LEN))
def validate_ticket(self, ticket, service, renew=False, require_https=False):
"""
Given a ticket string and service identifier, validate the
corresponding ``Ticket``. If validation succeeds, return the
``Ticket``. If validation fails, raise an appropriate error.
If ``renew`` is ``True``, ``ServiceTicket`` validation will
only succeed if the ticket was issued from the presentation
of the user's primary credentials.
If ``require_https`` is ``True``, ``ServiceTicket`` validation
will only succeed if the service URL scheme is HTTPS.
"""
if not ticket:
raise InvalidRequest("No ticket string provided")
if not self.model.TICKET_RE.match(ticket):
raise InvalidTicket("Ticket string %s is invalid" % ticket)
try:
t = self.get(ticket=ticket)
except self.model.DoesNotExist:
raise InvalidTicket("Ticket %s does not exist" % ticket)
if t.is_consumed():
raise InvalidTicket("%s %s has already been used" %
(t.name, ticket))
if t.is_expired():
raise InvalidTicket("%s %s has expired" % (t.name, ticket))
if not service:
raise InvalidRequest("No service identifier provided")
if require_https and not is_scheme_https(service):
raise InvalidService("Service %s is not HTTPS" % service)
if not service_allowed(service):
raise InvalidService("Service %s is not a valid %s URL" %
(service, t.name))
try:
if not match_service(t.service, service):
raise InvalidService("%s %s for service %s is invalid for "
"service %s" % (t.name, ticket, t.service, service))
except AttributeError:
pass
try:
if renew and not t.is_primary():
raise InvalidTicket("%s %s was not issued via primary "
"credentials" % (t.name, ticket))
except AttributeError:
pass
logger.debug("Validated %s %s" % (t.name, ticket))
return t
def delete_invalid_tickets(self):
"""
Delete consumed or expired ``Ticket``s that are not referenced
by other ``Ticket``s. Invalid tickets are no longer valid for
authentication and can be safely deleted.
A custom management command is provided that executes this method
on all applicable models by running ``manage.py cleanupcas``.
"""
for ticket in self.filter(Q(consumed__isnull=False) |
Q(expires__lte=now())).order_by('-expires'):
try:
ticket.delete()
except models.ProtectedError:
pass
def consume_tickets(self, user):
"""
Consume all valid ``Ticket``s for a specified user. This is run
when the user logs out to ensure all issued tickets are no longer
valid for future authentication attempts.
"""
for ticket in self.filter(user=user, consumed__isnull=True,
expires__gt=now()):
ticket.consume()
@python_2_unicode_compatible
class Ticket(models.Model):
"""
``Ticket`` is an abstract base class implementing common methods
and fields for CAS tickets.
"""
TICKET_EXPIRE = getattr(settings, 'MAMA_CAS_TICKET_EXPIRE', 90)
TICKET_RAND_LEN = getattr(settings, 'MAMA_CAS_TICKET_RAND_LEN', 32)
TICKET_RE = re.compile("^[A-Z]{2,3}-[0-9]{10,}-[a-zA-Z0-9]{%d}$" % TICKET_RAND_LEN)
ticket = models.CharField(_('ticket'), max_length=255, unique=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'),
on_delete=models.CASCADE)
expires = models.DateTimeField(_('expires'))
consumed = models.DateTimeField(_('consumed'), null=True)
objects = TicketManager()
class Meta:
abstract = True
def __str__(self):
return self.ticket
@property
def name(self):
return self._meta.verbose_name
def consume(self):
"""
Consume a ``Ticket`` by populating the ``consumed`` field with
the current datetime. A consumed ``Ticket`` is invalid for future
authentication attempts.
"""
self.consumed = now()
self.save()
def is_consumed(self):
"""
Check a ``Ticket``s consumed state, consuming it in the process.
"""
if self.consumed is None:
self.consume()
return False
return True
def is_expired(self):
"""
Check a ``Ticket``s expired state. Return ``True`` if the ticket is
expired, and ``False`` otherwise.
"""
return self.expires <= now()
class ServiceTicketManager(TicketManager):
def request_sign_out(self, user):
"""
Send a single logout request to each service accessed by a
specified user. This is called at logout when single logout
is enabled.
If requests-futures is installed, asynchronous requests will
be sent. Otherwise, synchronous requests will be sent.
"""
session = Session()
for ticket in self.filter(user=user, consumed__gte=user.last_login):
ticket.request_sign_out(session=session)
class ServiceTicket(Ticket):
"""
(3.1) A ``ServiceTicket`` is used by the client as a credential to
obtain access to a service. It is obtained upon a client's presentation
of credentials and a service identifier to /login.
"""
TICKET_PREFIX = 'ST'
service = models.CharField(_('service'), max_length=255)
primary = models.BooleanField(_('primary'), default=False)
objects = ServiceTicketManager()
class Meta:
verbose_name = _('service ticket')
verbose_name_plural = _('service tickets')
def is_primary(self):
"""
Check the credential origin for a ``ServiceTicket``. If the ticket was
issued from the presentation of the user's primary credentials,
return ``True``, otherwise return ``False``.
"""
if self.primary:
return True
return False
def request_sign_out(self, session=requests):
"""
Send a POST request to the ``ServiceTicket``s logout URL to
request sign-out.
"""
if logout_allowed(self.service):
request = SingleSignOutRequest(context={'ticket': self})
url = get_logout_url(self.service) or self.service
session.post(url, data={'logoutRequest': request.render_content()})
logger.info("Single sign-out request sent to %s" % url)
class ProxyTicket(Ticket):
"""
(3.2) A ``ProxyTicket`` is used by a service as a credential to obtain
access to a back-end service on behalf of a client. It is obtained upon
a service's presentation of a ``ProxyGrantingTicket`` and a service
identifier.
"""
TICKET_PREFIX = 'PT'
service = models.CharField(_('service'), max_length=255)
granted_by_pgt = models.ForeignKey('ProxyGrantingTicket',
verbose_name=_('granted by proxy-granting ticket'),
on_delete=models.CASCADE)
class Meta:
verbose_name = _('proxy ticket')
verbose_name_plural = _('proxy tickets')
class ProxyGrantingTicketManager(TicketManager):
def create_ticket(self, service, pgturl, **kwargs):
"""
When a ``pgtUrl`` parameter is provided to ``/serviceValidate`` or
``/proxyValidate``, attempt to create a new ``ProxyGrantingTicket``.
If validation succeeds, create and return the ``ProxyGrantingTicket``.
If validation fails, return ``None``.
"""
pgtid = self.create_ticket_str()
pgtiou = self.create_ticket_str(prefix=self.model.IOU_PREFIX)
try:
self.validate_callback(service, pgturl, pgtid, pgtiou)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None
else:
# pgtUrl validation succeeded, so create a new PGT with the
# previously generated ticket strings
return super(ProxyGrantingTicketManager, self).create_ticket(ticket=pgtid, iou=pgtiou, **kwargs)
def validate_callback(self, service, pgturl, pgtid, pgtiou):
"""Verify the provided proxy callback URL."""
if not proxy_allowed(service):
raise UnauthorizedServiceProxy("%s is not authorized to use proxy authentication" % service)
if not is_scheme_https(pgturl):
raise InvalidProxyCallback("Proxy callback %s is not HTTPS" % pgturl)
if not proxy_callback_allowed(service, pgturl):
raise InvalidProxyCallback("%s is not an authorized proxy callback URL" % pgturl)
# Verify that the SSL certificate is valid
verify = os.environ.get('REQUESTS_CA_BUNDLE', True)
try:
requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.SSLError:
raise InvalidProxyCallback("SSL certificate validation failed for proxy callback %s" % pgturl)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
# Callback certificate appears valid, so send the ticket strings
pgturl = add_query_params(pgturl, {'pgtId': pgtid, 'pgtIou': pgtiou})
try:
response = requests.get(pgturl, verify=verify, timeout=5)
except requests.exceptions.RequestException as e:
raise InvalidProxyCallback(e)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise InvalidProxyCallback("Proxy callback %s returned %s" % (pgturl, e))
class ProxyGrantingTicket(Ticket):
"""
(3.3) A ``ProxyGrantingTicket`` is used by a service to obtain proxy
tickets for obtaining access to a back-end service on behalf of a
client. It is obtained upon validation of a ``ServiceTicket`` or a
``ProxyTicket``.
"""
TICKET_PREFIX = 'PGT'
IOU_PREFIX = 'PGTIOU'
TICKET_EXPIRE = getattr(settings, 'SESSION_COOKIE_AGE')
iou = models.CharField(_('iou'), max_length=255, unique=True)
granted_by_st = models.ForeignKey(ServiceTicket, null=True, blank=True,
on_delete=models.PROTECT,
verbose_name=_('granted by service ticket'))
granted_by_pt = models.ForeignKey(ProxyTicket, null=True, blank=True,
on_delete=models.PROTECT,
verbose_name=_('granted by proxy ticket'))
objects = ProxyGrantingTicketManager()
class Meta:
verbose_name = _('proxy-granting ticket')
verbose_name_plural = _('proxy-granting tickets')
def is_consumed(self):
"""Check a ``ProxyGrantingTicket``s consumed state."""
return self.consumed is not None
| orbitvu/django-mama-cas | mama_cas/models.py | Python | bsd-3-clause | 13,586 |
"""
Model and manager used by the two-step (sign up, then activate)
workflow. If you're not using that workflow, you don't need to have
'registration' in your INSTALLED_APPS.
This is provided primarily for backwards-compatibility with existing
installations; new installs of django-registration should look into
the HMAC activation workflow in registration.backends.hmac, which
provides a two-step process but requires no models or storage of the
activation key.
"""
import datetime
import hashlib
import re
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
User = get_user_model()
user_kwargs = {
User.USERNAME_FIELD: username,
'email': email,
'password': password,
}
new_user = User.objects.create_user(**user_kwargs)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.atomic(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
User = get_user_model()
username = str(getattr(user, User.USERNAME_FIELD))
hash_input = (get_random_string(5) + username).encode('utf-8')
activation_key = hashlib.sha1(hash_input).hexdigest()
return self.create(user=user,
activation_key=activation_key)
@transaction.atomic
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
profile.delete()
user.delete()
@python_2_unicode_compatible
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.OneToOneField(settings.AUTH_USER_MODEL,
verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(
days=settings.ACCOUNT_ACTIVATION_DAYS
)
return self.activation_key == self.ACTIVATED or \
(self.user.date_joined + expiration_date <= timezone.now())
activation_key_expired.boolean = True
def send_activation_email(self, site):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the body of the email.
These templates will each receive the following context
variables:
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
"""
ctx_dict = {'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site}
subject = render_to_string('registration/activation_email_subject.txt',
ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/activation_email.txt',
ctx_dict)
self.user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
| tdruez/django-registration | registration/models.py | Python | bsd-3-clause | 10,792 |
from flask import Flask
from . import config
from . import ElaborateCharts
app = Flask(__name__)
app.config['SECRET_KEY'] = config.SECRET_KEY
charts = ElaborateCharts(app)
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True)
| Perlence/elaborate-lastfm-charts | elaboratecharts/app.py | Python | bsd-3-clause | 246 |
import glob
import os
from .. import *
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJava(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'), install=True,
*args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from java!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'program.jar'),
])
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from java!\n'
)
@skip_if('gcj' not in test_features, 'skipping gcj tests')
class TestGcj(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'),
extra_env={'JAVAC': os.getenv('GCJ', 'gcj')},
*args, **kwargs)
def test_build(self):
self.build('program')
self.assertOutput([executable('program')], 'hello from java!\n')
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJavaLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java_library'),
install=True, *args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'lib.jar'),
os.path.join(self.libdir, 'program.jar'),
])
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from library!\n'
)
def test_package(self):
self.build('install')
self.configure(
srcdir=os.path.join('languages', 'java_package'), installdir=None,
extra_env={'CLASSPATH': os.path.join(self.libdir, '*')}
)
self.build()
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
| jimporter/bfg9000 | test/integration/languages/test_java.py | Python | bsd-3-clause | 2,704 |
#!/usr/bin/env python
from __future__ import print_function
import re
import ast
import subprocess
import sys
from optparse import OptionParser
DEBUG = False
CONFIRM_STEPS = False
DRY_RUN = False
def skip_step():
"""
Asks for user's response whether to run a step. Default is yes.
:return: boolean
"""
global CONFIRM_STEPS
if CONFIRM_STEPS:
choice = raw_input("--- Confirm step? (y/N) [y] ")
if choice.lower() == 'n':
return True
return False
def run_step(*args):
"""
Prints out the command and asks if it should be run.
If yes (default), runs it.
:param args: list of strings (command and args)
"""
global DRY_RUN
cmd = args
print(' '.join(cmd))
if skip_step():
print('--- Skipping...')
elif DRY_RUN:
print('--- Pretending to run...')
else:
subprocess.check_output(cmd)
def version(version_file):
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open(version_file, 'rb') as f:
ver = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
return ver
def commit_for_release(version_file, ver):
run_step('git', 'reset')
run_step('git', 'add', version_file)
run_step('git', 'commit', '--message', 'Releasing version %s' % ver)
def create_git_tag(tag_name):
run_step('git', 'tag', '-s', '-m', tag_name, tag_name)
def create_source_tarball():
run_step('python', 'setup.py', 'sdist')
def upload_source_tarball():
run_step('python', 'setup.py', 'sdist', 'upload')
def push_to_github():
run_step('git', 'push', 'origin', 'master')
def push_tags_to_github():
run_step('git', 'push', '--tags', 'origin')
if __name__ == '__main__':
if DEBUG:
subprocess.check_output = lambda x: x
ver = version('pgcli/__init__.py')
print('Releasing Version:', ver)
parser = OptionParser()
parser.add_option(
"-c", "--confirm-steps", action="store_true", dest="confirm_steps",
default=False, help=("Confirm every step. If the step is not "
"confirmed, it will be skipped.")
)
parser.add_option(
"-d", "--dry-run", action="store_true", dest="dry_run",
default=False, help="Print out, but not actually run any steps."
)
popts, pargs = parser.parse_args()
CONFIRM_STEPS = popts.confirm_steps
DRY_RUN = popts.dry_run
choice = raw_input('Are you sure? (y/N) [n] ')
if choice.lower() != 'y':
sys.exit(1)
commit_for_release('pgcli/__init__.py', ver)
create_git_tag('v%s' % ver)
create_source_tarball()
push_to_github()
push_tags_to_github()
upload_source_tarball()
| koljonen/pgcli | release.py | Python | bsd-3-clause | 2,731 |
from configurations import values
from . import common, databases, email
from .. import __version__
class Raven(object):
"""Report uncaught exceptions to the Sentry server."""
INSTALLED_APPS = common.Common.INSTALLED_APPS + ('raven.contrib.django.raven_compat',)
RAVEN_CONFIG = {
'dsn': values.URLValue(environ_name='RAVEN_CONFIG_DSN'),
'release': __version__,
}
class Sentry404(Raven):
"""Log 404 events to the Sentry server."""
MIDDLEWARE_CLASSES = (
'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
) + common.Common.MIDDLEWARE_CLASSES
class Public(email.Email, databases.Databases, common.Common):
"""General settings for public projects."""
SECRET_KEY = values.SecretValue()
CSRF_COOKIE_HTTPONLY = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
X_FRAME_OPTIONS = 'DENY'
SILENCED_SYSTEM_CHECKS = values.ListValue([])
class Stage(Public):
"""Settings for staging server."""
pass
class SSL(object):
"""Settings for SSL."""
SECURE_SSL_HOST = values.Value('www.example.com')
SECURE_SSL_REDIRECT = True
class Prod(Public, SSL):
"""Settings for production server."""
pass
| CodeforLeipzig/fog | fog/config/settings/public.py | Python | bsd-3-clause | 1,254 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as np
import OpenGL.GL as gl
import texture, shader, colormap, color
class Image(object):
''' '''
def __init__(self, Z, format=None, cmap=colormap.IceAndFire, vmin=None, vmax=None,
interpolation='nearest', origin='lower', lighted=False,
gridsize=(0.0,0.0,0.0), elevation = 0.0):
''' Creates a texture from numpy array.
Parameters:
-----------
Z : numpy array
Z may be a float32 or uint8 array with following shapes:
* M
* MxN
* MxNx[1,2,3,4]
format: [None | 'A' | 'LA' | 'RGB' | 'RGBA']
Specify the texture format to use. Most of times it is possible to
find it automatically but there are a few cases where it not
possible to decide. For example an array with shape (M,3) can be
considered as 2D alpha texture of size (M,3) or a 1D RGB texture of
size (M,).
interpolation: 'nearest', 'bilinear' or 'bicubic'
Interpolation method.
vmin: scalar
Minimal representable value.
vmax: scalar
Maximal representable value.
origin: 'lower' or 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner.
'''
self._lut = None
self._interpolation = interpolation
self._lighted = lighted
self._gridsize = gridsize
self._elevation = elevation
self._texture = texture.Texture(Z)
self._origin = origin
self._vmin = vmin
self._vmax = vmax
self._data = Z
self.cmap = cmap # This takes care of actual build
self._shader = None
self.build()
def build(self):
''' Build shader '''
interpolation = self._interpolation
gridsize = self._gridsize
elevation = self._elevation
lighted = self._lighted
cmap = self._cmap
self._shader = None
# Source format is RGB or RGBA, no need of a colormap
if self._texture.src_format in [gl.GL_RGB,gl.GL_RGBA]:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
# Source format is not RGB or RGBA
else:
if cmap:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = shader.Nearest(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
self.update()
@property
def shape(self):
''' Underlying array shape. '''
return self._data.shape
@property
def data(self):
''' Underlying array '''
return self._data
@property
def texture(self):
''' Underlying texture '''
return self._texture
@property
def shader(self):
''' Currently active shader '''
return self._shader
@property
def format(self):
''' Array representation format (string). '''
format = self._texture.src_format
if format == gl.GL_ALPHA:
return 'A'
elif format == gl.GL_LUMINANCE_ALPHA:
return 'LA'
elif format == gl.GL_RGB:
return 'RGB'
elif format == gl.GL_RGBA:
return 'RGBA'
def _get_cmap(self):
return self._cmap
def _set_cmap(self, cmap):
self._cmap = cmap
colors = self.cmap.LUT['rgb'][1:].flatten().view((np.float32,3))
self._lut = texture.Texture(colors)
cmap = property(_get_cmap, _set_cmap,
doc=''' Colormap to be used to represent the array. ''')
def _get_elevation(self):
return self._elevation
def _set_elevation(self, elevation):
# Do we need to re-build shader ?
if not (elevation*self._elevation):
self._elevation = elevation
self.build()
elif self._shader:
self._elevation = elevation
self._shader._elevation = elevation
elevation = property(_get_elevation, _set_elevation,
doc=''' Image elevation. ''')
def _get_origin(self):
return self._origin
def _set_origin(self, origin):
self._origin = origin
origin = property(_get_origin, _set_origin,
doc=''' Place the [0,0] index of the array in the upper
left or lower left corner. ''')
def _get_lighted(self):
return self._lighted
def _set_lighted(self, lighted):
self._lighted = lighted
self.build()
lighted = property(_get_lighted, _set_lighted,
doc=''' Indicate whether image is ligthed. ''')
def _get_interpolation(self):
return self._interpolation
def _set_interpolation(self, interpolation):
self._interpolation = interpolation
self.build()
interpolation = property(_get_interpolation, _set_interpolation,
doc=''' Interpolation method. ''')
def _get_vmin(self):
return self._vmin
def _set_vmin(self, vmin):
self._vmin = vmin
vmin = property(_get_vmin, _set_vmin,
doc=''' Minimal representable value. ''')
def _get_vmax(self):
return self._vmax
def _set_vmax(self, vmax):
self._vmax = vmax
vmax = property(_get_vmax, _set_vmax,
doc=''' Maximal representable value. ''')
def _get_gridsize(self):
return self._gridsize
def _get_gridsize_x(self):
return self._gridsize[0]
def _get_gridsize_y(self):
return self._gridsize[1]
def _get_gridsize_z(self):
return self._gridsize[2]
def _set_gridsize(self, gridsize):
# Do we need to re-build shader ?
x,y,z = gridsize
x,y,z = max(0,x),max(0,y),max(0,z)
_x,_y,_z = self._gridsize
self._gridsize = x,y,z
if not (x+y+z)*(_x+_y+_z) and (x+y+z)+(_x+_y+_z):
self.build()
elif self._shader:
self._shader._gridsize = x,y,z
def _set_gridsize_x(self, x):
self.gridsize = (max(0,x), self._gridsize[1], self._gridsize[2])
def _set_gridsize_y(self, y):
self.gridsize = (self._gridsize[0], max(0,y), self._gridsize[2])
def _set_gridsize_z(self, z):
self.gridsize = (self._gridsize[0], self._gridsize[1], max(0,z))
gridsize = property(_get_gridsize, _set_gridsize,
doc=''' Image grid (x,y,z). ''')
def update(self):
''' Data update. '''
if self.vmin is None:
vmin = self.data.min()
else:
vmin = self.vmin
if self.vmax is None:
vmax = self._data.max()
else:
vmax = self.vmax
if vmin == vmax:
vmin, vmax = 0, 1
if self._lut:
s = self._lut.width
self._texture.update(bias = 1.0/(s-1)-vmin*((s-3.1)/(s-1))/(vmax-vmin),
scale = ((s-3.1)/(s-1))/(vmax-vmin))
else:
self._texture.update(bias=-vmin/(vmax-vmin),scale=1.0/(vmax-vmin))
def blit(self, x, y, w, h):
''' Blit array onto active framebuffer. '''
if self._shader:
self._shader.bind(self.texture,self._lut)
if self.origin == 'lower':
t=0,1
else:
t=1,0
gl.glColor(1,1,1,1)
self._texture.blit(x,y,w,h,t=t)
if self._shader:
self._shader.unbind()
| davidcox/glumpy | glumpy/image.py | Python | bsd-3-clause | 8,858 |
#!/usr/bin/env python -u
# encoding: utf-8
#
# Copyright (c) 2012, Peter Hillerström <[email protected]>
# All rights reserved. This software is licensed under 3-clause BSD license.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
import pytest
from prism.grep import pattern, search
def log_lines():
return [
"[Sun Apr 08 12:51:52 2012] [notice] Digest: done",
"[Mon Jul 11 09:26:13 2011] Error: [client ::1] File does not exist: /Library/WebServer/Documents/favicon.ico",
]
def test_search():
for line in log_lines():
assert search(line), "Regexp pattern '{0}' didn't match line '{1}'".format(pattern, line)
| peterhil/prism | prism/test/grep_test.py | Python | bsd-3-clause | 738 |
# -*- coding: utf-8 -*-
"""
Контролер веб интерфейса бота
:copyright: (c) 2013 by Pavel Lyashkov.
:license: BSD, see LICENSE for more details.
"""
import re
import os
from flask import Flask, Blueprint, abort, request, make_response, url_for, render_template
from web import app
from web import cache
api = Blueprint('api', __name__)
@api.route('/index', methods=['GET'])
def index():
return 1
| shaiban/flask-btce | web/views/api.py | Python | bsd-3-clause | 442 |
"""
This module contains a class, :class:`Query`, that was implemented to provide
users with means to programmatically query the
`ACS Zeropoints Calculator <https://acszeropoints.stsci.edu>`_.
The API works by submitting requests to the
ACS Zeropoints Calculator referenced above and hence, it is only valid for ACS
specific instruments (HRC, SBC, or WFC).
The API can be used in two ways by specifying either a
``(date, detector, filter)`` combination or just a ``(date, detector)``
combination. In the first case, the query
will return the zeropoint information for the specific filter and detector at
specified date. In the second case, the query will return the zeropoint
information for all the filters for the desired detector at the specified date.
In either case, the result will be an ``astropy.table.QTable`` where each column
is an ``astropy.units.quantity.Quantity`` object with the appropriate units attached.
Examples
--------
Retrieve the zeropoint information for all the filters on 2016-04-01 for WFC:
>>> from acstools import acszpt
>>> date = '2016-04-01'
>>> detector = 'WFC'
>>> q = acszpt.Query(date=date, detector=detector)
>>> zpt_table = q.fetch()
>>> print(zpt_table)
FILTER PHOTPLAM PHOTFLAM STmag VEGAmag ABmag
Angstrom erg / (Angstrom cm2 s) mag(ST) mag mag(AB)
str6 float64 float64 float64 float64 float64
------ -------- ---------------------- ------- ------- -------
F435W 4329.2 3.148e-19 25.155 25.763 25.665
F475W 4746.2 1.827e-19 25.746 26.149 26.056
F502N 5023.0 5.259e-18 22.098 22.365 22.285
F550M 5581.5 3.99e-19 24.898 24.825 24.856
F555W 5360.9 1.963e-19 25.667 25.713 25.713
F606W 5922.0 7.811e-20 26.668 26.405 26.498
F625W 6312.0 1.188e-19 26.213 25.735 25.904
F658N 6584.0 1.97e-18 23.164 22.381 22.763
F660N 6599.4 5.156e-18 22.119 21.428 21.714
F775W 7693.2 9.954e-20 26.405 25.272 25.667
F814W 8045.0 7.046e-20 26.78 25.517 25.944
F850LP 9033.2 1.52e-19 25.945 24.332 24.858
F892N 8914.8 1.502e-18 23.458 21.905 22.4
Retrieve the zeropoint information for the F435W filter on 2016-04-01 for WFC:
>>> from acstools import acszpt
>>> date = '2016-04-01'
>>> detector = 'WFC'
>>> filt = 'F435W'
>>> q = acszpt.Query(date=date, detector=detector, filter=filt)
>>> zpt_table = q.fetch()
>>> print(zpt_table)
FILTER PHOTPLAM PHOTFLAM STmag VEGAmag ABmag
Angstrom erg / (Angstrom cm2 s) mag(ST) mag mag(AB)
------ -------- ---------------------- ------- ------- -------
F435W 4329.2 3.148e-19 25.155 25.763 25.665
Retrieve the zeropoint information for the F435W filter for WFC at multiple dates:
>>> from acstools import acszpt
>>> dates = ['2004-10-13', '2011-04-01', '2014-01-17', '2018-05-23']
>>> queries = []
>>> for date in dates:
... q = acszpt.Query(date=date, detector='WFC', filt='F435W')
... zpt_table = q.fetch()
... # Each object has a zpt_table attribute, so we save the instance
... queries.append(q)
>>> for q in queries:
... print(q.date, q.zpt_table['PHOTFLAM'][0], q.zpt_table['STmag'][0])
2004-10-13 3.074e-19 erg / (Angstrom cm2 s) 25.181 mag(ST)
2011-04-01 3.138e-19 erg / (Angstrom cm2 s) 25.158 mag(ST)
2014-01-17 3.144e-19 erg / (Angstrom cm2 s) 25.156 mag(ST)
2018-05-23 3.152e-19 erg / (Angstrom cm2 s) 25.154 mag(ST)
>>> type(queries[0].zpt_table['PHOTFLAM'])
astropy.units.quantity.Quantity
"""
import datetime as dt
import logging
import os
from urllib.request import urlopen
from urllib.error import URLError
import astropy.units as u
from astropy.table import QTable
from bs4 import BeautifulSoup
import numpy as np
__taskname__ = "acszpt"
__author__ = "Nathan Miles"
__version__ = "1.0"
__vdate__ = "22-Jan-2019"
__all__ = ['Query']
# Initialize the logger
logging.basicConfig()
LOG = logging.getLogger(f'{__taskname__}.Query')
LOG.setLevel(logging.INFO)
class Query:
"""Class used to interface with the ACS Zeropoints Calculator API.
Parameters
----------
date : str
Input date in the following ISO format, YYYY-MM-DD.
detector : {'HRC', 'SBC', 'WFC'}
One of the three channels on ACS: HRC, SBC, or WFC.
filt : str or `None`, optional
One of valid filters for the chosen detector. If no filter is supplied,
all of the filters for the chosen detector will be used:
* HRC:
F220W, F250W, F330W,
F344N, F435W, F475W,
F502N, F550M, F555W,
F606W, F625W, F658N, F660N,
F775W, F814W, F850LP, F892N
* WFC:
F435W, F475W,
F502N, F550M, F555W,
F606W, F625W, F658N, F660N,
F775W, F814W, F850LP, F892N
* SBC:
F115LP, F122M, F125LP,
F140LP, F150LP, F165LP
"""
def __init__(self, date, detector, filt=None):
# Set the attributes
self._date = date
self._detector = detector.upper()
self._filt = filt
self.valid_filters = {
'WFC': ['F435W', 'F475W', 'F502N', 'F550M',
'F555W', 'F606W', 'F625W', 'F658N',
'F660N', 'F775W', 'F814W', 'F850LP', 'F892N'],
'HRC': ['F220W', 'F250W', 'F330W', 'F344N',
'F435W', 'F475W', 'F502N', 'F550M',
'F555W', 'F606W', 'F625W', 'F658N',
'F660N', 'F775W', 'F814W', 'F850LP', 'F892N'],
'SBC': ['F115LP', 'F122M', 'F125LP',
'F140LP', 'F150LP', 'F165LP']
}
self._zpt_table = None
# Set the private attributes
if filt is None:
self._url = ('https://acszeropoints.stsci.edu/results_all/?'
f'date={self.date}&detector={self.detector}')
else:
self._filt = filt.upper()
self._url = ('https://acszeropoints.stsci.edu/results_single/?'
f'date1={self.date}&detector={self.detector}'
f'&{self.detector}_filter={self.filt}')
# ACS Launch Date
self._acs_installation_date = dt.datetime(2002, 3, 7)
# The farthest date in future that the component and throughput files
# are valid for. If input date is larger, extrapolation is not valid.
self._extrapolation_date = dt.datetime(2021, 12, 31)
self._msg_div = '-' * 79
self._valid_detectors = ['HRC', 'SBC', 'WFC']
self._response = None
self._failed = False
self._data_units = {
'FILTER': u.dimensionless_unscaled,
'PHOTPLAM': u.angstrom,
'PHOTFLAM': u.erg / u.cm ** 2 / u.second / u.angstrom,
'STmag': u.STmag,
'VEGAmag': u.mag,
'ABmag': u.ABmag
}
self._block_size = len(self._data_units)
@property
def date(self):
"""The user supplied date. (str)"""
return self._date
@property
def detector(self):
"""The user supplied detector. (str)"""
return self._detector
@property
def filt(self):
"""The user supplied filter, if one was given. (str or `None`)"""
return self._filt
@property
def zpt_table(self):
"""The results returned by the ACS Zeropoint Calculator. (`astropy.table.QTable`)"""
return self._zpt_table
def _check_inputs(self):
"""Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
"""
valid_detector = True
valid_filter = True
valid_date = True
# Determine the submitted detector is valid
if self.detector not in self._valid_detectors:
msg = (f'{self.detector} is not a valid detector option.\n'
'Please choose one of the following:\n'
f'{os.linesep.join(self._valid_detectors)}\n'
f'{self._msg_div}')
LOG.error(msg)
valid_detector = False
# Determine if the submitted filter is valid
if (self.filt is not None and valid_detector and
self.filt not in self.valid_filters[self.detector]):
msg = (f'{self.filt} is not a valid filter for {self.detector}\n'
'Please choose one of the following:\n'
f'{os.linesep.join(self.valid_filters[self.detector])}\n'
f'{self._msg_div}')
LOG.error(msg)
valid_filter = False
# Determine if the submitted date is valid
date_check = self._check_date()
if date_check is not None:
LOG.error(f'{date_check}\n{self._msg_div}')
valid_date = False
if not valid_detector or not valid_filter or not valid_date:
return False
return True
def _check_date(self, fmt='%Y-%m-%d'):
"""Convenience method for determining if the input date is valid.
Parameters
----------
fmt : str
The format of the date string. The default is ``%Y-%m-%d``, which
corresponds to ``YYYY-MM-DD``.
Returns
-------
status : str or `None`
If the date is valid, returns `None`. If the date is invalid,
returns a message explaining the issue.
"""
result = None
try:
dt_obj = dt.datetime.strptime(self.date, fmt)
except ValueError:
result = f'{self.date} does not match YYYY-MM-DD format'
else:
if dt_obj < self._acs_installation_date:
result = ('The observation date cannot occur '
'before ACS was installed '
f'({self._acs_installation_date.strftime(fmt)})')
elif dt_obj > self._extrapolation_date:
result = ('The observation date cannot occur after the '
'maximum allowable date, '
f'{self._extrapolation_date.strftime(fmt)}. '
'Extrapolations of the '
'instrument throughput after this date lead to '
'high uncertainties and are therefore invalid.')
finally:
return result
def _submit_request(self):
"""Submit a request to the ACS Zeropoint Calculator.
If an exception is raised during the request, an error message is
given. Otherwise, the response is saved in the corresponding
attribute.
"""
if not self._url.startswith('http'):
raise ValueError(f'Invalid URL {self._url}')
try:
self._response = urlopen(self._url) # nosec
except URLError as e:
msg = (f'{repr(e)}\n{self._msg_div}\nThe query failed! '
'Please check your inputs. '
'If the error persists, submit a ticket to the '
'ACS Help Desk at hsthelp.stsci.edu with the error message '
'displayed above.')
LOG.error(msg)
self._failed = True
else:
self._failed = False
def _parse_and_format(self):
""" Parse and format the results returned by the ACS Zeropoint Calculator.
Using ``beautifulsoup4``, find all the ``<tb> </tb>`` tags present in
the response. Format the results into an astropy.table.QTable with
corresponding units and assign it to the zpt_table attribute.
"""
soup = BeautifulSoup(self._response.read(), 'html.parser')
# Grab all elements in the table returned by the ZPT calc.
td = soup.find_all('td')
# Remove the units attached to PHOTFLAM and PHOTPLAM column names.
td = [val.text.split(' ')[0] for val in td]
# Turn the single list into a 2-D numpy array
data = np.reshape(td,
(int(len(td) / self._block_size), self._block_size))
# Create the QTable, note that sometimes self._response will be empty
# even though the return was successful; hence the try/except to catch
# any potential index errors. Provide the user with a message and
# set the zpt_table to None.
try:
tab = QTable(data[1:, :],
names=data[0],
dtype=[str, float, float, float, float, float])
except IndexError as e:
msg = (f'{repr(e)}\n{self._msg_div}\n'
'There was an issue parsing the request. '
'Try resubmitting the query. If this issue persists, please '
'submit a ticket to the Help Desk at'
'https://stsci.service-now.com/hst')
LOG.info(msg)
self._zpt_table = None
else:
# If and only if no exception was raised, attach the units to each
# column of the QTable. Note we skip the FILTER column because
# Quantity objects in astropy must be numerical (i.e. not str)
for col in tab.colnames:
if col.lower() == 'filter':
continue
tab[col].unit = self._data_units[col]
self._zpt_table = tab
def fetch(self):
"""Submit the request to the ACS Zeropoints Calculator.
This method will:
* submit the request
* parse the response
* format the results into a table with the correct units
Returns
-------
tab : `astropy.table.QTable` or `None`
If the request was successful, returns a table; otherwise, `None`.
"""
LOG.info('Checking inputs...')
valid_inputs = self._check_inputs()
if valid_inputs:
LOG.info(f'Submitting request to {self._url}')
self._submit_request()
if self._failed:
return
LOG.info('Parsing the response and formatting the results...')
self._parse_and_format()
return self.zpt_table
LOG.error('Please fix the incorrect input(s)')
| jhunkeler/acstools | acstools/acszpt.py | Python | bsd-3-clause | 14,427 |
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from churchill.apps.core.models import BaseModel
from churchill.apps.currencies.services import get_default_currency_id
class StatsCalculationStrategy(models.TextChoices):
LAST_SHOT = "LAST_SHOT", _("From the last shot")
WEEKLY = "WEEKLY", _("Weekly")
MONTHLY = "MONTHLY", _("Monthly")
ALL_TIME = "ALL_TIME", _("For the all time")
class Profile(BaseModel):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
related_name="profile",
)
image = models.FileField(
upload_to=settings.PROFILE_IMAGE_DIRECTORY, null=True, blank=True
)
language = models.CharField(
max_length=5,
blank=True,
default=settings.LANGUAGE_CODE,
choices=settings.LANGUAGES,
)
currency = models.ForeignKey(
"currencies.Currency",
related_name="profiles",
on_delete=models.DO_NOTHING,
blank=True,
default=get_default_currency_id,
)
next_day_offset = models.IntegerField(
blank=True,
default=settings.NEXT_DAY_OFFSET,
help_text=_("Offset in hours for the next day"),
)
avg_consumption = models.IntegerField(
blank=True,
default=settings.AVG_ALCOHOL_CONSUMPTION,
help_text=_("Average alcohol consumption in ml per year"),
)
avg_price = models.DecimalField(
max_digits=5,
decimal_places=2,
blank=True,
default=settings.AVG_ALCOHOL_PRICE,
help_text=_("Average alcohol price for 1000 ml"),
)
stats_calculation_strategy = models.CharField(
max_length=20,
choices=StatsCalculationStrategy.choices,
default=StatsCalculationStrategy.MONTHLY,
)
verification_token = models.CharField(max_length=16, null=True, blank=True)
def __str__(self):
return self.user.email
| manti-by/Churchill | churchill/apps/profiles/models.py | Python | bsd-3-clause | 2,037 |
# import argcomplete
# import httplib
# import logging
# import simplejson
# import sys
# import urllib2
# from time import strftime, localtime
# from conpaas.core import https
# from .base import BaseClient
# from .config import config
# from .service import ServiceCmd
# MODES = ['DEMO', 'REAL']
# TASKFARM_MNG_PORT = 8475
# def http_jsonrpc_post(hostname, uri, method, port=TASKFARM_MNG_PORT, params=None):
# """Perform a plain HTTP JSON RPC post (for task farming)"""
# if params is None:
# params = {}
# url = "http://%s:%s%s" % (hostname, port, uri)
# data = simplejson.dumps({'method': method,
# 'params': params,
# 'jsonrpc': '2.0',
# 'id': 1,
# })
# req = urllib2.Request(url, data, {'Content-Type': 'application/json'})
# res = urllib2.urlopen(req).read()
# return res
# def http_file_upload_post(host, uri, port=TASKFARM_MNG_PORT, params=None, files=None):
# """Perform a plain HTTP file upload post (for task farming)"""
# if params is None:
# params = {}
# if files is None:
# files = []
# content_type, body = https.client._encode_multipart_formdata(params, files)
# h = httplib.HTTP(host, port)
# h.putrequest('POST', uri)
# h.putheader('content-type', content_type)
# h.putheader('content-length', str(len(body)))
# h.endheaders()
# h.send(body)
# _errcode, _errmsg, _headers = h.getreply()
# return h.file.read()
# class TaskFarmCmd(ServiceCmd):
# def __init__(self, parser, client):
# self.initial_expected_state = 'RUNNING'
# ServiceCmd.__init__(self, parser, client, "taskfarm", ['node'],
# "TaskFarm service sub-commands help")
# self._add_get_mode()
# self._add_set_mode()
# self._add_upload()
# self._add_select_schedule()
# def call_manager(self, app_id, service_id, method, data=None):
# """TaskFarm peculiarities:
# 1) it works via plain HTTP
# 2) it uses port 8475
# 3) the 'shutdown' method is called 'terminate_workers'
# 4) it accepts only POST requests
# 5) it does not have to be started or stopped
# """
# if data is None:
# data = {}
# if method == "shutdown":
# method = "terminate_workers"
# service = self.client.service_dict(app_id, service_id)
# res = http_jsonrpc_post(service['application']['manager'], '/', method, params=data)
# try:
# data = simplejson.loads(res[1])
# except ValueError:
# data = simplejson.loads(res)
# return data.get('result', data)
# def _add_start(self):
# """
# TaskFarm does not have to be started.
# Overrides ServiceCmd._add_start().
# """
# pass
# def _add_stop(self):
# """
# TaskFarm does not have to be stopped.
# Overrides ServiceCmd._add_stop()
# """
# pass
# def _print_res(self, res):
# resres = res['result']
# if 'error' in resres:
# self.client.error("%s" % resres['error'])
# elif 'message' in resres:
# print "%s" % resres['message']
# else:
# print "%s" % res
# # ======= get_mode
# def _add_get_mode(self):
# subparser = self.add_parser('get_mode', help="get TaskFarm mode")
# subparser.set_defaults(run_cmd=self.get_mode, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# def get_mode(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_string_mode(app_id, service_id)
# print "%s" % mode
# def get_string_mode(self, app_id, service_id):
# res = self.call_manager(app_id, service_id, "get_service_info")
# return res['mode']
# # ======= set_mode
# def _add_set_mode(self):
# subparser = self.add_parser('set_mode', help="set TaskFarm mode")
# subparser.set_defaults(run_cmd=self.set_mode, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('mode', choices=MODES, help="mode")
# def set_mode(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# old_mode = self.get_string_mode(app_id, service_id)
# if old_mode != 'NA':
# res = {'result': {'error': 'ERROR: mode is already set to %s' % old_mode}}
# else:
# res = self.call_manager(app_id, service_id, "set_service_mode", [args.mode])
# self._print_res(res)
# # ========== upload bag of tasks
# def _add_upload(self):
# subparser = self.add_parser('upload_bot', help="upload bag of tasks")
# subparser.set_defaults(run_cmd=self.upload_bag_of_tasks,
# parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('filename',
# help="file containing the bag of tasks")
# subparser.add_argument('location',
# help="XtreemFS location, e.g., 192.168.122.1/uc3")
# def upload_bag_of_tasks(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_string_mode(app_id, service_id)
# if mode == 'NA':
# res = {'result': {'error': 'ERROR: to upload bag of task, first specify run mode.'}}
# else:
# service = self.client.service_dict(app_id, service_id)
# params = {'uriLocation': args.location,
# 'method': 'start_sampling'}
# filecontents = open(args.filename).read()
# res = http_file_upload_post(service['application']['manager'], '/', params=params,
# files=[('botFile', args.filename, filecontents)])
# res = simplejson.loads(res)
# self._print_res(res)
# # ========= select_schedule
# def _add_select_schedule(self):
# subparser = self.add_parser('upload_bot', help="upload bag of tasks")
# subparser.set_defaults(run_cmd=self.select_schedule, parser=subparser)
# subparser.add_argument('app_name_or_id',
# help="Name or identifier of an application")
# subparser.add_argument('serv_name_or_id',
# help="Name or identifier of a service")
# subparser.add_argument('schedule', type=int, help="schedule identifier")
# def _select_schedule(self, args):
# app_id, service_id = self.check_service(args.app_name_or_id, args.serv_name_or_id)
# mode = self.get_mode(app_id, service_id)
# if mode == 'NA':
# return {'result': {'error': 'ERROR: to select a schedule, first specify run mode DEMO or REAL, then upload a bag of tasks '}}
# # check schedule availability
# res = self.call_manager(app_id, service_id, "get_service_info")
# if res['noCompletedTasks'] == 0:
# return {'message': "No schedule available yet: try again later..."}
# if res['state'] != 'RUNNING':
# return {'message': "Busy %s: try again later..." % res['phase']}
# sres = self.call_manager(app_id, service_id, "get_sampling_results")
# sdata = simplejson.loads(sres)
# if 'timestamp' in sdata:
# # Sampling is ready, check if bag is ready, or if we have to choose a schedule
# ts = sdata['timestamp']
# print strftime("Bag sampled on %a %d %b %Y at %H:%M:%S %Z", localtime(ts / 1000))
# if 'schedules' in sdata:
# #sch = sdata['schedules']
# #ss = simplejson.dumps(sch)
# # print "schedules: ", ss
# numscheds = len(sdata['schedules'])
# if numscheds == 0:
# return {'result': {'message': "Bag finished during sampling phase"}}
# if res['noTotalTasks'] == res['noCompletedTasks']:
# return {'result': {'message': "Taskfarm already finished"}}
# # check schedule selection
# if (args.schedule < 1) or (args.schedule > numscheds):
# return {'result': {'error': "ERROR: select schedule in interval [1..%d]" % numscheds}}
# # start execution
# # "{"method":"start_execution","params":["1371729870918","2"],"jsonrpc":"2.0","id":1}"
# res = self.call_manager(app_id, service_id, "start_execution", [ts, args.schedule - 1])
# return {'result': res}
# def select_schedule(self, args):
# res = self._select_schedule(args)
# self._print_res(res)
# def main():
# logger = logging.getLogger(__name__)
# console = logging.StreamHandler()
# formatter = logging.Formatter('%(levelname)s - %(message)s')
# console.setFormatter(formatter)
# logger.addHandler(console)
# cmd_client = BaseClient(logger)
# parser, argv = config('Manage ConPaaS PHP services.', logger)
# _serv_cmd = TaskFarmCmd(parser, cmd_client)
# argcomplete.autocomplete(parser)
# args = parser.parse_args(argv)
# cmd_client.set_config(args.director_url, args.username, args.password,
# args.debug)
# try:
# args.run_cmd(args)
# except:
# e = sys.exc_info()[1]
# sys.stderr.write("ERROR: %s\n" % e)
# sys.exit(1)
# if __name__ == '__main__':
# main()
| ConPaaS-team/conpaas | cps-tools/src/cps_tools/taskfarm.py | Python | bsd-3-clause | 10,346 |
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__license__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
USER_AGENT = 'api-indiv-0829BA2B33942A4A5E6338FE05EFB8A1'
HOST_NAME = "http://myanimelist.net"
DEBUG = False
RETRY_NUMBER = 4
RETRY_SLEEP = 1
SHORT_SITE_FORMAT_TIME = '%b %Y'
LONG_SITE_FORMAT_TIME = '%b %d, %Y'
MALAPPINFO_FORMAT_TIME = "%Y-%m-%d"
MALAPPINFO_NONE_TIME = "0000-00-00"
MALAPI_FORMAT_TIME = "%Y%m%d"
MALAPI_NONE_TIME = "00000000"
| pymal-developers/pymal | pymal/consts.py | Python | bsd-3-clause | 496 |
#!/usr/bin/env python
#
# Program: $Id: $
# Author: Robert Beverly <[email protected]>
# Description: Experimental tracebox warts parser
import sys
import struct
import dpkt
from sc_warts import *
if dpkt.__version__ == '1.8':
print "Upgrade dpkt"
sys.exit(-1)
TRACEBOXTYPE = 0x0c
def dict_diff(a, b):
diff = dict()
for k in a:
if k in b:
if b[k] != a[k]:
diff[k] = (a[k],b[k])
return diff
# return set(a.items()) ^ set(b.items())
class WartsTraceBoxReader(WartsReader):
def __init__(self, wartsfile, verbose=False):
super(WartsTraceBoxReader, self).__init__(wartsfile, verbose)
def next(self):
while True:
obj = self.next_object()
if not obj:
return (False, False)
if (obj.typ == TRACEBOXTYPE):
return (obj.flags, obj.pkts)
def next_object(self):
# read warts object header
self.header = self.fd.read(8)
# sanity check
if len(self.header) != 8:
return None
(magic, typ, length) = struct.unpack('!HHI', self.header)
if self.verbose:
print "Magic: %02X Obj: %02X Len: %02x" % (magic, typ, length)
assert(magic == obj_type['MAGIC'])
# read remainder of object
data = self.fd.read(length)
if typ == obj_type['LIST']:
return WartsList(data, verbose=self.verbose)
elif typ == obj_type['CYCLESTART']:
return WartsCycle(data, verbose=self.verbose)
elif typ == obj_type['CYCLE']:
return WartsCycle(data, verbose=self.verbose)
elif typ == obj_type['CYCLE_STOP']:
return WartsCycleStop(data, verbose=self.verbose)
elif typ == TRACEBOXTYPE:
return WartsTraceBox(data, verbose=self.verbose)
else:
print "Unsupported object: %02x Len: %d" % (typ, length)
assert False
class WartsTraceBox(WartsBaseObject):
def __init__(self, data, verbose=False):
super(WartsTraceBox, self).__init__(TRACEBOXTYPE, verbose)
self.data = data
self.flagdata = data
self.pkts = []
self.flag_defines = [
('listid', unpack_uint32_t),
('cycleid', unpack_uint32_t),
('userid', unpack_uint32_t),
('srcaddr', self.unpack_address),
('dstaddr', self.unpack_address),
('sport', unpack_uint16_t),
('dport', unpack_uint16_t),
('start', read_timeval),
('result', unpack_uint16_t),
('rtt', unpack_uint8_t),
('qtype', unpack_uint8_t),
('udp', unpack_uint8_t),
('printmode', unpack_uint8_t),
('pktc16', unpack_uint16_t),
('pktc', unpack_uint32_t),
]
flag_bytes = self.read_flags()
if self.verbose:
print "TB Params:", self.flags
offset = flag_bytes
for i in range(self.flags['pktc']):
pkt = WartsTraceBoxPkt(data[offset:], self.referenced_address, self.verbose)
self.pkts.append(pkt.flags)
offset+=pkt.flag_bytes
if self.verbose: print "Pkt %d: %s" % (i+1, pkt.flags)
class WartsTraceBoxPkt(WartsBaseObject):
def __init__(self, data, refs, verbose=False):
super(WartsTraceBoxPkt, self).__init__(TRACEBOXTYPE, verbose)
self.update_ref(refs)
self.flagdata = data
self.flag_defines = [
('dir', unpack_uint8_t),
('time', read_timeval),
('len', unpack_uint16_t),
('data', self.read_pass),
]
self.flag_bytes = self.read_flags()
datalen = self.flags['len']
self.flags['data'] = self.read_tracebox_pkt(data[self.flag_bytes:self.flag_bytes+datalen])
self.flag_bytes += self.flags['len']
def read_pass(self, b):
return ("pass", 0)
def read_tracebox_pkt(self, data):
fields = dict()
ip = dpkt.ip.IP(data)
fields['hop'] = socket.inet_ntoa(ip.src)
if ip.p == dpkt.ip.IP_PROTO_ICMP:
# This is a reply from a hop
fields['hop'] = socket.inet_ntoa(ip.src)
icmp = ip.data
#print "ICMP quote:", icmp.type, icmp.code, "LEN:", len(icmp.data.data)
# icmp.data is type dpkt.icmp.TimeExceed
# so, icmp.data.data is a dpkt.ip.IP
ip = icmp.data.data
fields['IP::Version'] = ip.v
fields['IP::IHL'] = ip.hl
dscp = (ip.tos & 0xFC) >> 2
ecn = (ip.tos & 0x03)
fields['IP::DiffServicesCP'] = hex(dscp)
fields['IP::ECN'] = hex(ecn)
fields['IP:Length'] = hex(ip.len)
fields['IP:ID'] = ip.id
flags = (ip.df >> 1) + ip.mf
fields['IP:Flags'] = hex(flags)
fields['IP:FragmentOffset'] = ip.offset
fields['IP:TTL'] = ip.ttl
fields['IP::Protocol'] = ip.p
fields['IP::Checksum'] = hex(ip.sum)
fields['IP::SourceAddr'] = socket.inet_ntoa(ip.src)
fields['IP::DestAddr'] = socket.inet_ntoa(ip.dst)
if ip.p == dpkt.ip.IP_PROTO_TCP:
tcp = ip.data
if not isinstance(tcp, dpkt.tcp.TCP):
#print "Partial quote!"
z = struct.pack('12sB',ip.data,0x50) + struct.pack('7B',*([0]*7))
tcp = dpkt.tcp.TCP(z)
#print type(tcp)
if len(ip.data) >= 4:
fields['TCP::SPort'] = hex(tcp.sport)
fields['TCP::DPort'] = hex(tcp.dport)
if len(ip.data) >= 8:
fields['TCP::SeqNumber'] = hex(tcp.seq)
if len(ip.data) >= 12:
fields['TCP::AckNumber'] = hex(tcp.ack)
if len(ip.data) >= 16:
fields['TCP::Offset'] = hex(tcp.off)
fields['TCP::Flags'] = hex(tcp.flags)
fields['TCP::Window'] = hex(tcp.win)
if len(ip.data) == 20:
fields['TCP::Checksum'] = hex(tcp.sum)
fields['TCP::UrgentPtr'] = hex(tcp.urp)
if len(ip.data) >= 20:
if len(tcp.opts) > 0:
opts = dpkt.tcp.parse_opts(tcp.opts)
for o,d in opts:
if o == dpkt.tcp.TCP_OPT_EOL:
fields['TCP::OPT_EOL'] = d
elif o == dpkt.tcp.TCP_OPT_NOP:
fields['TCP::OPT_NOP'] = d
elif o == dpkt.tcp.TCP_OPT_MSS:
fields['TCP::OPT_MSS'] = d
elif o == dpkt.tcp.TCP_OPT_WSCALE:
fields['TCP::OPT_WSCALE'] = d
elif o == dpkt.tcp.TCP_OPT_SACKOK:
fields['TCP::OPT_SACKOK'] = d
elif o == dpkt.tcp.TCP_OPT_SACK:
fields['TCP::OPT_SACK'] = d
elif o == dpkt.tcp.TCP_OPT_TIMESTAMP:
fields['TCP::OPT_TIMESTAMP'] = d
return fields
if __name__ == "__main__":
assert len(sys.argv) == 2
w = WartsTraceBoxReader(sys.argv[1], verbose=False)
while True:
(flags, pkts) = w.next()
if flags == False: break
print "tracebox from %s to %s (result: %d)" % (flags['srcaddr'], flags['dstaddr'], flags['result'])
last_tx = None
last_tx_ts = 0
i = 0
for pkt in pkts:
ts = pkt['time'] - flags['start']
if pkt['dir'] == 1: #TX
#print " TX at %1.3f:" % (ts)
if last_tx != None:
i+=1
print " %d: *" % (i)
last_tx = pkt['data']
last_tx_ts = pkt['time']
else: #RX
#print " RX at %1.3f:" % (ts)
i+=1
rtt = (pkt['time'] - last_tx_ts)*1000.0
if last_tx:
diff = dict_diff(last_tx, pkt['data'])
print " %d: %s RTT:%1.3f: %s" % (i, pkt['data']['hop'], rtt, " ".join(diff.keys()))
last_tx = None
| cmand/scamper | sc_tracebox.py | Python | bsd-3-clause | 7,053 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 100, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_None/trend_ConstantTrend/cycle_12/ar_12/test_artificial_32_None_ConstantTrend_12_12_100.py | Python | bsd-3-clause | 265 |
from django.db import models
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model
from django.contrib.auth.models import UserManager, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.six import text_type
from userena import settings as userena_settings
from userena.utils import generate_sha1, get_profile_model, get_datetime_now, \
get_user_profile
from userena import signals as userena_signals
from guardian.shortcuts import assign_perm, get_perms
import re
SHA1_RE = re.compile('^[a-f0-9]{40}$')
ASSIGNED_PERMISSIONS = {
'profile':
(('view_profile', 'Can view profile'),
('change_profile', 'Can change profile'),
('delete_profile', 'Can delete profile')),
'user':
(('change_user', 'Can change user'),
('delete_user', 'Can delete user'))
}
class UserenaManager(UserManager):
""" Extra functionality for the Userena model. """
def create_user(self, username, email, password, active=False,
send_email=True, pending_activation=False, first_name="", last_name="", organization = ""):
"""
A simple wrapper that creates a new :class:`User`.
:param username:
String containing the username of the new user.
:param email:
String containing the email address of the new user.
:param password:
String containing the password for the new user.
:param active:
Boolean that defines if the user requires activation by clicking
on a link in an e-mail. Defaults to ``False``.
:param send_email:
Boolean that defines if the user should be sent an email. You could
set this to ``False`` when you want to create a user in your own
code, but don't want the user to activate through email.
:return: :class:`User` instance representing the new user.
"""
new_user = get_user_model().objects.create_user(
username, email, password)
new_user.is_active = active
new_user.first_name = first_name
new_user.last_name = last_name
new_user.save()
userena_profile = self.create_userena_profile(new_user)
# All users have an empty profile
profile_model = get_profile_model()
try:
new_profile = new_user.emif_profile
except profile_model.DoesNotExist:
new_profile = profile_model(user=new_user)
new_profile.save(using=self._db)
# Give permissions to view and change profile
for perm in ASSIGNED_PERMISSIONS['profile']:
assign_perm(perm[0], new_user, get_user_profile(user=new_user))
# Give permissions to view and change itself
for perm in ASSIGNED_PERMISSIONS['user']:
assign_perm(perm[0], new_user, new_user)
userena_profile = self.create_userena_profile(new_user)
if send_email:
if pending_activation:
userena_profile.send_pending_activation_email(organization=organization)
else:
userena_profile.send_activation_email()
return new_user
def create_userena_profile(self, user):
"""
Creates an :class:`UserenaSignup` instance for this user.
:param user:
Django :class:`User` instance.
:return: The newly created :class:`UserenaSignup` instance.
"""
if isinstance(user.username, text_type):
user.username = smart_text(user.username)
salt, activation_key = generate_sha1(user.username)
try:
profile = self.get(user=user)
except self.model.DoesNotExist:
profile = self.create(user=user,
activation_key=activation_key)
return profile
def reissue_activation(self, activation_key):
"""
Creates a new ``activation_key`` resetting activation timeframe when
users let the previous key expire.
:param activation_key:
String containing the secret SHA1 activation key.
"""
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
try:
salt, new_activation_key = generate_sha1(userena.user.username)
userena.activation_key = new_activation_key
userena.save(using=self._db)
userena.user.date_joined = get_datetime_now()
userena.user.save(using=self._db)
userena.send_activation_email()
return True
except Exception:
return False
def activate_user(self, activation_key):
"""
Activate an :class:`User` by supplying a valid ``activation_key``.
If the key is valid and an user is found, activates the user and
return it. Also sends the ``activation_complete`` signal.
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
The newly activated :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(activation_key):
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not userena.user.is_active:
if not userena.activation_key_expired():
is_active = True
user = userena.user
user.is_active = is_active
userena.activation_key = userena_settings.USERENA_ACTIVATED
userena.save(using=self._db)
user.save(using=self._db)
# Send the activation_complete signal
userena_signals.activation_complete.send(sender=None,
user=user)
return user
return False
def check_expired_activation(self, activation_key):
"""
Check if ``activation_key`` is still valid.
Raises a ``self.model.DoesNotExist`` exception if key is not present or
``activation_key`` is not a valid string
:param activation_key:
String containing the secret SHA1 for a valid activation.
:return:
True if the ket has expired, False if still valid.
"""
if SHA1_RE.search(activation_key):
userena = self.get(activation_key=activation_key)
return userena.activation_key_expired()
raise self.model.DoesNotExist
def reject_user(self, activation_key):
if SHA1_RE.search(activation_key):
try:
userena = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not userena.user.is_active and not userena.activation_key_expired():
user = userena.user
user.userena_signup.activation_key = userena_settings.USERENA_ACTIVATION_REJECTED
user.userena_signup.send_rejection_email()
user.is_active = False
user.userena_signup.save()
user.save()
return True
return False
def confirm_email(self, confirmation_key):
"""
Confirm an email address by checking a ``confirmation_key``.
A valid ``confirmation_key`` will set the newly wanted e-mail
address as the current e-mail address. Returns the user after
success or ``False`` when the confirmation key is
invalid. Also sends the ``confirmation_complete`` signal.
:param confirmation_key:
String containing the secret SHA1 that is used for verification.
:return:
The verified :class:`User` or ``False`` if not successful.
"""
if SHA1_RE.search(confirmation_key):
try:
userena = self.get(email_confirmation_key=confirmation_key,
email_unconfirmed__isnull=False)
except self.model.DoesNotExist:
return False
else:
user = userena.user
old_email = user.email
user.email = userena.email_unconfirmed
userena.email_unconfirmed, userena.email_confirmation_key = '',''
userena.save(using=self._db)
user.save(using=self._db)
# Send the confirmation_complete signal
userena_signals.confirmation_complete.send(sender=None,
user=user,
old_email=old_email)
return user
return False
def delete_expired_users(self):
"""
Checks for expired users and delete's the ``User`` associated with
it. Skips if the user ``is_staff``.
:return: A list containing the deleted users.
"""
deleted_users = []
for user in get_user_model().objects.filter(is_staff=False,
is_active=False):
if user.userena_signup.activation_key_expired():
deleted_users.append(user)
user.delete()
return deleted_users
def check_permissions(self):
"""
Checks that all permissions are set correctly for the users.
:return: A set of users whose permissions was wrong.
"""
# Variable to supply some feedback
changed_permissions = []
changed_users = []
warnings = []
# Check that all the permissions are available.
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
model_obj = get_profile_model()
else: model_obj = get_user_model()
model_content_type = ContentType.objects.get_for_model(model_obj)
for perm in perms:
try:
Permission.objects.get(codename=perm[0],
content_type=model_content_type)
except Permission.DoesNotExist:
changed_permissions.append(perm[1])
Permission.objects.create(name=perm[1],
codename=perm[0],
content_type=model_content_type)
# it is safe to rely on settings.ANONYMOUS_USER_NAME since it is a
# requirement of django-guardian
for user in get_user_model().objects.exclude(username=settings.ANONYMOUS_USER_NAME):
try:
user_profile = get_user_profile(user=user)
except ObjectDoesNotExist:
warnings.append(_("No profile found for %(username)s") \
% {'username': user.username})
else:
all_permissions = get_perms(user, user_profile) + get_perms(user, user)
for model, perms in ASSIGNED_PERMISSIONS.items():
if model == 'profile':
perm_object = get_user_profile(user=user)
else: perm_object = user
for perm in perms:
if perm[0] not in all_permissions:
assign_perm(perm[0], user, perm_object)
changed_users.append(user)
return (changed_permissions, changed_users, warnings)
class UserenaBaseProfileManager(models.Manager):
""" Manager for :class:`UserenaProfile` """
def get_visible_profiles(self, user=None):
"""
Returns all the visible profiles available to this user.
For now keeps it simple by just applying the cases when a user is not
active, a user has it's profile closed to everyone or a user only
allows registered users to view their profile.
:param user:
A Django :class:`User` instance.
:return:
All profiles that are visible to this user.
"""
profiles = self.all()
filter_kwargs = {'user__is_active': True}
profiles = profiles.filter(**filter_kwargs)
if user and isinstance(user, AnonymousUser):
profiles = profiles.exclude(Q(privacy='closed') | Q(privacy='registered'))
else: profiles = profiles.exclude(Q(privacy='closed'))
return profiles
| bioinformatics-ua/django-userena | userena/managers.py | Python | bsd-3-clause | 12,823 |
from django import forms
from django.core import validators
from comperio.accounts.models import cUser, Settings, cGroup
from django.core.validators import email_re
import random, datetime, sha
MIN_PASSWORD_LENGTH = 6
class LoginForm(forms.Form):
"""account login form"""
username = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5', 'placeholder':'username', 'tabindex':'1'}), help_text="username or email")
password = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'password', 'tabindex':'2'}))
class RegistrationForm(forms.Form):
"""user registration form"""
def check_consent(val):
"""check if the user has agreed to the consent form"""
return val
username = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5', 'placeholder':'username'}), max_length=30)
email = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5','placeholder':'email'}), max_length=60, validators=[validators.validate_email])
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)])
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'verify password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)])
consent = forms.BooleanField(widget=forms.CheckboxInput() , label="I have read and understood the above consent form")
honeypot = forms.CharField(widget=forms.HiddenInput(), required=False)
def isValidHuman(self, new_data):
"""check if the user is human"""
return new_data['honeypot'] == ""
def isValidUsername(self, new_data):
"""check if the username is valid"""
if not cUser.objects.filter(username=new_data['username']):
return True
return False
def isValidEmail(self, new_data):
"""check if the email is unique"""
# TODO: email is ok if same
if not cUser.objects.filter(email=new_data['email']):
return True
return False
# TODO: display specific error messages on the form
# TODO: form is not passing field errors.
def isValidPassword(self, new_data):
"""
check if the passwords match
"""
if len(new_data['password1']) < MIN_PASSWORD_LENGTH or len(new_data['password2']) < MIN_PASSWORD_LENGTH:
return False
return True
def PasswordsMatch(self, new_data):
"""check if the passwords match"""
if new_data['password1'] == new_data['password2']:
return True
return False
def save(self, new_data):
"""create a new inactive user from the form data"""
# make sure email is unique
if new_data['consent'] == False:
raise forms.ValidationError(u'You must agree to the consent form')
try:
duplicate = cUser.objects.get(email=new_data['email'])
except cUser.DoesNotExist:
# make sure we have a valid email
if email_re.search(new_data['email']):
# Build the activation key for their account
salt = sha.new(str(random.random())).hexdigest()[:5]
activation_key = sha.new(salt+new_data['username']).hexdigest()
key_expires = datetime.datetime.today() + datetime.timedelta(2)
u = cUser.objects.create(username=new_data['username'],
email=new_data['email'],
activation_key=activation_key,
key_expires=key_expires,
)
u.set_password(new_data['password1'])
u.is_active=False
u.save()
return u
# invalid email
raise forms.ValidationError(u'invalid email')
# duplciate user or bad email
raise forms.ValidationError(u'email already in use')
return None
class EditAccountForm(forms.Form):
"""user registration form"""
username = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5', 'placeholder':'username'}), max_length=30)
email = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5','placeholder':'email'}), max_length=60, validators=[validators.validate_email])
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)], required=False)
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'verify password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)], required=False)
def isValidUsername(self, new_data):
"""check if the username is valid"""
# TODO: username ok if same
if not cUser.objects.filter(username=new_data['username']):
return True
return False
# TODO: display specific error messages on the form
# TODO: form is not passing field errors.
def isValidPassword(self, new_data):
"""
check if the passwords match
"""
if new_data['password1'] != '':
if len(new_data['password1']) < MIN_PASSWORD_LENGTH or len(new_data['password2']) < MIN_PASSWORD_LENGTH:
return False
return True
def isValidEmail(self, new_data):
"""check if the email is unique"""
# TODO: email is ok if same
if not cUser.objects.filter(email=new_data['email']):
return True
return False
def PasswordsMatch(self, new_data):
"""check if the passwords match"""
if new_data['password1'] == new_data['password2']:
return True
return False
def update(self, request, u):
"""update an existing user from the form data"""
# make sure email is unique
new_data = request.POST.copy()
if u.email != new_data['email']:
try:
duplicate = cUser.objects.get(email=new_data['email'])
raise forms.ValidationError(u'email is not available')
except cUser.DoesNotExist:
u.email = new_data['email']
if u.username != new_data['username']:
try:
duplicate = cUser.objects.get(username=new_data['username'])
raise forms.ValidationError(u'username is not available')
except cUser.DoesNotExist:
u.username = new_data['username']
if new_data['password1'] != '':
u.set_password(new_data['password1'])
u.save()
class CreateGroupForm(forms.Form):
"""create a new user group"""
title = forms.CharField(widget=forms.TextInput(attrs={'class':'span-10 title',}), max_length=100)
description = forms.CharField(widget=forms.Textarea(attrs={'class':'span-10 description-textarea',}), max_length=1000, required=False)
#
#
# Hierarchical: only managers can send invites (individual and mass)
# overview page of students/minions
# can view code quality and comment ratio.
#
# Peer: all members can send invites, only managers can send mass invites
#
#
type = forms.CharField(widget=forms.Select(choices=cGroup.types), required=False)
visibility = forms.CharField(widget=forms.Select(choices=cGroup.visibility_types), required=False)
open_registration = forms.CharField(widget=forms.CheckboxInput(), help_text="Open registration allows anyone to request group membership")
class SettingsForm(forms.ModelForm):
"""profile settings form"""
class Meta:
model = Settings
exclude = ("user",)
# TODO: Should we allow users to change their username?
#
#class EditProfileForm(forms.ModelForm):
# """edit profile details"""
# username = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5', 'placeholder':'username'}), max_length=30)
# email = forms.CharField(widget=forms.TextInput(attrs={'class':'span-5','placeholder':'email'}), max_length=30, validators=[validators.validate_email])
# password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)])
# password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'span-5 char_count','placeholder':'verify password'}), max_length=60, validators=[validators.MinLengthValidator(MIN_PASSWORD_LENGTH)])
#
| neutrinog/Comperio | comperio/accounts/forms.py | Python | bsd-3-clause | 8,980 |
# -*- coding: utf-8 -*-
from classytags.arguments import Argument, MultiValueArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from classytags.parser import Parser
from cms.models import Page, Placeholder as PlaceholderModel
from cms.plugin_rendering import render_plugins, render_placeholder
from cms.plugins.utils import get_plugins
from cms.utils import get_language_from_request
from cms.utils.moderator import get_cmsplugin_queryset, get_page_queryset
from cms.utils.placeholder import validate_placeholder_name
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from itertools import chain
import re
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int) or (isinstance(site, basestring) and site.isdigit()):
site_id = int(site)
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return name+'__page_lookup:'+page_key+'_site:'+str(site_id)+'_lang:'+str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain':site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
if settings.SEND_BROKEN_LINK_EMAILS:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(InclusionTag):
template = 'cms/content.html'
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, context, page_lookup, lang, site):
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id)+'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, settings.CMS_CACHE_DURATIONS['content'])
if url:
return {'content': url}
return {'content': ''}
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
return placeholder_cache[page.pk].get(name, None)
placeholder_cache[page.pk] = {}
placeholders = page.placeholders.all()
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
current_page._tmp_placeholders_cache = placeholder_cache
return placeholder_cache[page.pk].get(name, None)
def get_placeholder_content(context, request, current_page, name, inherit):
pages = [current_page]
if inherit:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not get_plugins(request, placeholder):
continue
content = render_placeholder(placeholder, context, name)
if content:
return content
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name)
class PlaceholderParser(Parser):
def parse_blocks(self):
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
validate_placeholder_name(name)
width = None
inherit = False
for bit in extra_bits:
if bit == 'inherit':
inherit = True
elif bit.isdigit():
width = int(bit)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
return ''
content = get_placeholder_content(context, request, page, name, inherit)
if not content and nodelist:
return nodelist.render(context)
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
register.tag(Placeholder)
class PageAttribute(Tag):
"""
This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" page_lookup %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"meta_keywords",
"page_title",
"menu_title"
]
def render_tag(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
f = getattr(page, "get_%s" % name)
return f(language=lang, fallback=True)
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices' : unique_choices}
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
validate_placeholder_name(placeholder_name)
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
try:
placeholder = page.placeholders.get(slot=placeholder_name)
except PlaceholderModel.DoesNotExist:
if settings.DEBUG:
raise
return {'content': ''}
baseqs = get_cmsplugin_queryset(request)
plugins = baseqs.filter(
placeholder=placeholder,
language=lang,
placeholder__slot__iexact=placeholder_name,
parent__isnull=True
).order_by('position').select_related()
c = render_plugins(plugins, context, placeholder)
content = "".join(c)
if cache_result:
cache.set(cache_key, content, settings.CMS_CACHE_DURATIONS['content'])
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = False
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class CMSToolbar(InclusionTag):
template = 'cms/toolbar/toolbar.html'
name = 'cms_toolbar'
def render(self, context):
request = context.get('request', None)
if not request:
return ''
toolbar = getattr(request, 'toolbar', None)
if not toolbar:
return ''
if not toolbar.show_toolbar:
return ''
return super(CMSToolbar, self).render(context)
def get_context(self, context):
context['CMS_TOOLBAR_CONFIG'] = context['request'].toolbar.as_json(context)
return context
register.tag(CMSToolbar)
| VillageAlliance/django-cms | cms/templatetags/cms_tags.py | Python | bsd-3-clause | 14,804 |
import hashlib
import json
import os
import uuid
from django import forms
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel,
TabbedInterface)
from wagtail.admin.forms import WagtailAdminPageForm
from wagtail.admin.mail import send_mail
from wagtail.contrib.forms.forms import FormBuilder
from wagtail.contrib.forms.models import (
FORM_FIELD_CHOICES, AbstractEmailForm, AbstractFormField, AbstractFormSubmission)
from wagtail.contrib.forms.views import SubmissionsListView
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.contrib.sitemaps import Sitemap
from wagtail.contrib.table_block.blocks import TableBlock
from wagtail.core.blocks import CharBlock, RawHTMLBlock, RichTextBlock, StructBlock
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager, PageQuerySet, Task
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.documents.models import AbstractDocument, Document
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.search import index
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from .forms import FormClassAdditionalFieldPageForm, ValidatedPageForm
EVENT_AUDIENCE_CHOICES = (
('public', "Public"),
('private', "Private"),
)
COMMON_PANELS = (
FieldPanel('slug'),
FieldPanel('seo_title'),
FieldPanel('show_in_menus'),
FieldPanel('search_description'),
)
# Link fields
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
related_name='+',
on_delete=models.CASCADE
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = models.CharField(max_length=255, blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Simple page
class SimplePage(Page):
content = models.TextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('content'),
]
def get_admin_display_title(self):
return "%s (simple page)" % super().get_admin_display_title()
# Page with Excluded Fields when copied
class PageWithExcludedCopyField(Page):
content = models.TextField()
# Exclude this field from being copied
special_field = models.CharField(
blank=True, max_length=255, default='Very Special')
exclude_fields_in_copy = ['special_field']
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('special_field'),
FieldPanel('content'),
]
class PageWithOldStyleRouteMethod(Page):
"""
Prior to Wagtail 0.4, the route() method on Page returned an HttpResponse
rather than a Page instance. As subclasses of Page may override route,
we need to continue accepting this convention (albeit as a deprecated API).
"""
content = models.TextField()
template = 'tests/simple_page.html'
def route(self, request, path_components):
return self.serve(request)
# File page
class FilePage(Page):
file_field = models.FileField()
FilePage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('file_field'),
]
# Event page
class EventPageCarouselItem(Orderable, CarouselItem):
page = ParentalKey('tests.EventPage', related_name='carousel_items', on_delete=models.CASCADE)
class EventPageRelatedLink(Orderable, RelatedLink):
page = ParentalKey('tests.EventPage', related_name='related_links', on_delete=models.CASCADE)
class EventPageSpeakerAward(Orderable, models.Model):
speaker = ParentalKey('tests.EventPageSpeaker', related_name='awards', on_delete=models.CASCADE)
name = models.CharField("Award name", max_length=255)
date_awarded = models.DateField(null=True, blank=True)
panels = [
FieldPanel('name'),
FieldPanel('date_awarded'),
]
class EventPageSpeaker(Orderable, LinkFields, ClusterableModel):
page = ParentalKey('tests.EventPage', related_name='speakers', related_query_name='speaker', on_delete=models.CASCADE)
first_name = models.CharField("Name", max_length=255, blank=True)
last_name = models.CharField("Surname", max_length=255, blank=True)
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def name_display(self):
return self.first_name + " " + self.last_name
panels = [
FieldPanel('first_name'),
FieldPanel('last_name'),
ImageChooserPanel('image'),
MultiFieldPanel(LinkFields.panels, "Link"),
InlinePanel('awards', label="Awards"),
]
class EventCategory(models.Model):
name = models.CharField("Name", max_length=255)
def __str__(self):
return self.name
# Override the standard WagtailAdminPageForm to add validation on start/end dates
# that appears as a non-field error
class EventPageForm(WagtailAdminPageForm):
def clean(self):
cleaned_data = super().clean()
# Make sure that the event starts before it ends
start_date = cleaned_data['date_from']
end_date = cleaned_data['date_to']
if start_date and end_date and start_date > end_date:
raise ValidationError('The end date must be after the start date')
return cleaned_data
class EventPage(Page):
date_from = models.DateField("Start date", null=True)
date_to = models.DateField(
"End date",
null=True,
blank=True,
help_text="Not required if event is on a single day"
)
time_from = models.TimeField("Start time", null=True, blank=True)
time_to = models.TimeField("End time", null=True, blank=True)
audience = models.CharField(max_length=255, choices=EVENT_AUDIENCE_CHOICES)
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
cost = models.CharField(max_length=255)
signup_link = models.URLField(blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
categories = ParentalManyToManyField(EventCategory, blank=True)
search_fields = [
index.SearchField('get_audience_display'),
index.SearchField('location'),
index.SearchField('body'),
index.FilterField('url_path'),
]
password_required_template = 'tests/event_page_password_required.html'
base_form_class = EventPageForm
EventPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('date_from'),
FieldPanel('date_to'),
FieldPanel('time_from'),
FieldPanel('time_to'),
FieldPanel('location'),
FieldPanel('audience'),
FieldPanel('cost'),
FieldPanel('signup_link'),
InlinePanel('carousel_items', label="Carousel items"),
FieldPanel('body', classname="full"),
InlinePanel('speakers', label="Speakers", heading="Speaker lineup"),
InlinePanel('related_links', label="Related links"),
FieldPanel('categories'),
# InlinePanel related model uses `pk` not `id`
InlinePanel('head_counts', label='Head Counts'),
]
EventPage.promote_panels = [
MultiFieldPanel(COMMON_PANELS, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
class HeadCountRelatedModelUsingPK(models.Model):
"""Related model that uses a custom primary key (pk) not id"""
custom_id = models.AutoField(primary_key=True)
event_page = ParentalKey(
EventPage,
on_delete=models.CASCADE,
related_name='head_counts'
)
head_count = models.IntegerField()
panels = [FieldPanel('head_count')]
# Override the standard WagtailAdminPageForm to add field that is not in model
# so that we can test additional potential issues like comparing versions
class FormClassAdditionalFieldPage(Page):
location = models.CharField(max_length=255)
body = RichTextField(blank=True)
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('location'),
FieldPanel('body'),
FieldPanel('code'), # not in model, see set base_form_class
]
base_form_class = FormClassAdditionalFieldPageForm
# Just to be able to test multi table inheritance
class SingleEventPage(EventPage):
excerpt = models.TextField(
max_length=255,
blank=True,
null=True,
help_text="Short text to describe what is this action about"
)
# Give this page model a custom URL routing scheme
def get_url_parts(self, request=None):
url_parts = super().get_url_parts(request=request)
if url_parts is None:
return None
else:
site_id, root_url, page_path = url_parts
return (site_id, root_url, page_path + 'pointless-suffix/')
def route(self, request, path_components):
if path_components == ['pointless-suffix']:
# treat this as equivalent to a request for this page
return super().route(request, [])
else:
# fall back to default routing rules
return super().route(request, path_components)
def get_admin_display_title(self):
return "%s (single event)" % super().get_admin_display_title()
SingleEventPage.content_panels = [FieldPanel('excerpt')] + EventPage.content_panels
# "custom" sitemap object
class EventSitemap(Sitemap):
pass
# Event index (has a separate AJAX template, and a custom template context)
class EventIndex(Page):
intro = RichTextField(blank=True)
ajax_template = 'tests/includes/event_listing.html'
def get_events(self):
return self.get_children().live().type(EventPage)
def get_paginator(self):
return Paginator(self.get_events(), 4)
def get_context(self, request, page=1):
# Pagination
paginator = self.get_paginator()
try:
events = paginator.page(page)
except PageNotAnInteger:
events = paginator.page(1)
except EmptyPage:
events = paginator.page(paginator.num_pages)
# Update context
context = super().get_context(request)
context['events'] = events
return context
def route(self, request, path_components):
if self.live and len(path_components) == 1:
try:
return self.serve(request, page=int(path_components[0]))
except (TypeError, ValueError):
pass
return super().route(request, path_components)
def get_static_site_paths(self):
# Get page count
page_count = self.get_paginator().num_pages
# Yield a path for each page
for page in range(page_count):
yield '/%d/' % (page + 1)
# Yield from superclass
for path in super().get_static_site_paths():
yield path
def get_sitemap_urls(self, request=None):
# Add past events url to sitemap
return super().get_sitemap_urls(request=request) + [
{
'location': self.full_url + 'past/',
'lastmod': self.latest_revision_created_at
}
]
def get_cached_paths(self):
return super().get_cached_paths() + [
'/past/'
]
EventIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
class FormField(AbstractFormField):
page = ParentalKey('FormPage', related_name='form_fields', on_delete=models.CASCADE)
class FormPage(AbstractEmailForm):
def get_context(self, request):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
# This is redundant (SubmissionsListView is the default view class), but importing
# SubmissionsListView in this models.py helps us to confirm that this recipe
# https://docs.wagtail.io/en/stable/reference/contrib/forms/customisation.html#customise-form-submissions-listing-in-wagtail-admin
# works without triggering circular dependency issues -
# see https://github.com/wagtail/wagtail/issues/6265
submissions_list_view_class = SubmissionsListView
FormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a non-HTML extension
class JadeFormField(AbstractFormField):
page = ParentalKey('JadeFormPage', related_name='form_fields', on_delete=models.CASCADE)
class JadeFormPage(AbstractEmailForm):
template = "tests/form_page.jade"
JadeFormPage.content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Form page that redirects to a different page
class RedirectFormField(AbstractFormField):
page = ParentalKey('FormPageWithRedirect', related_name='form_fields', on_delete=models.CASCADE)
class FormPageWithRedirect(AbstractEmailForm):
thank_you_redirect_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
def get_context(self, request):
context = super(FormPageWithRedirect, self).get_context(request)
context['greeting'] = "hello world"
return context
def render_landing_page(self, request, form_submission=None, *args, **kwargs):
"""
Renders the landing page OR if a receipt_page_redirect is chosen redirects to this page.
"""
if self.thank_you_redirect_page:
return redirect(self.thank_you_redirect_page.url, permanent=False)
return super(FormPageWithRedirect, self).render_landing_page(request, form_submission, *args, **kwargs)
FormPageWithRedirect.content_panels = [
FieldPanel('title', classname="full title"),
PageChooserPanel('thank_you_redirect_page'),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with a custom FormSubmission
class FormPageWithCustomSubmission(AbstractEmailForm):
"""
This Form page:
* Have custom submission model
* Have custom related_name (see `FormFieldWithCustomSubmission.page`)
* Saves reference to a user
* Doesn't render html form, if submission for current user is present
"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
context['greeting'] = "hello world"
return context
def get_form_fields(self):
return self.custom_form_fields.all()
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
def get_submission_class(self):
return CustomFormPageSubmission
def process_form_submission(self, form):
form_submission = self.get_submission_class().objects.create(
form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
page=self, user=form.user
)
if self.to_address:
addresses = [x.strip() for x in self.to_address.split(',')]
content = '\n'.join([x[1].label + ': ' + str(form.data.get(x[0])) for x in form.fields.items()])
send_mail(self.subject, content, addresses, self.from_address,)
# process_form_submission should now return the created form_submission
return form_submission
def serve(self, request, *args, **kwargs):
if self.get_submission_class().objects.filter(page=self, user__pk=request.user.pk).exists():
return TemplateResponse(
request,
self.template,
self.get_context(request)
)
return super().serve(request, *args, **kwargs)
FormPageWithCustomSubmission.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('custom_form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
class FormFieldWithCustomSubmission(AbstractFormField):
page = ParentalKey(FormPageWithCustomSubmission, on_delete=models.CASCADE, related_name='custom_form_fields')
class CustomFormPageSubmission(AbstractFormSubmission):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
def get_data(self):
form_data = super().get_data()
form_data.update({
'useremail': self.user.email,
})
return form_data
# Custom form page with custom submission listing view and form submission
class FormFieldForCustomListViewPage(AbstractFormField):
page = ParentalKey(
'FormPageWithCustomSubmissionListView',
related_name='form_fields',
on_delete=models.CASCADE
)
class FormPageWithCustomSubmissionListView(AbstractEmailForm):
"""Form Page with customised submissions listing view"""
intro = RichTextField(blank=True)
thank_you_text = RichTextField(blank=True)
def get_submissions_list_view_class(self):
from .views import CustomSubmissionsListView
return CustomSubmissionsListView
def get_submission_class(self):
return CustomFormPageSubmission
def get_data_fields(self):
data_fields = [
('useremail', 'User email'),
]
data_fields += super().get_data_fields()
return data_fields
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
InlinePanel('form_fields', label="Form fields"),
FieldPanel('thank_you_text', classname="full"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# FormPage with cutom FormBuilder
EXTENDED_CHOICES = FORM_FIELD_CHOICES + (('ipaddress', 'IP Address'),)
class ExtendedFormField(AbstractFormField):
"""Override the field_type field with extended choices."""
page = ParentalKey(
'FormPageWithCustomFormBuilder',
related_name='form_fields',
on_delete=models.CASCADE)
field_type = models.CharField(
verbose_name='field type', max_length=16, choices=EXTENDED_CHOICES)
class CustomFormBuilder(FormBuilder):
"""
A custom FormBuilder that has an 'ipaddress' field with
customised create_singleline_field with shorter max_length
"""
def create_singleline_field(self, field, options):
options['max_length'] = 120 # usual default is 255
return forms.CharField(**options)
def create_ipaddress_field(self, field, options):
return forms.GenericIPAddressField(**options)
class FormPageWithCustomFormBuilder(AbstractEmailForm):
"""
A Form page that has a custom form builder and uses a custom
form field model with additional field_type choices.
"""
form_builder = CustomFormBuilder
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('form_fields', label="Form fields"),
MultiFieldPanel([
FieldPanel('to_address', classname="full"),
FieldPanel('from_address', classname="full"),
FieldPanel('subject', classname="full"),
], "Email")
]
# Snippets
class AdvertPlacement(models.Model):
page = ParentalKey('wagtailcore.Page', related_name='advert_placements', on_delete=models.CASCADE)
advert = models.ForeignKey('tests.Advert', related_name='+', on_delete=models.CASCADE)
colour = models.CharField(max_length=255)
class AdvertTag(TaggedItemBase):
content_object = ParentalKey('Advert', related_name='tagged_items', on_delete=models.CASCADE)
class Advert(ClusterableModel):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
tags = TaggableManager(through=AdvertTag, blank=True)
panels = [
FieldPanel('url'),
FieldPanel('text'),
FieldPanel('tags'),
]
def __str__(self):
return self.text
register_snippet(Advert)
class AdvertWithCustomPrimaryKey(ClusterableModel):
advert_id = models.CharField(max_length=255, primary_key=True)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomPrimaryKey)
class AdvertWithCustomUUIDPrimaryKey(ClusterableModel):
advert_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
panels = [
FieldPanel('url'),
FieldPanel('text'),
]
def __str__(self):
return self.text
register_snippet(AdvertWithCustomUUIDPrimaryKey)
class AdvertWithTabbedInterface(models.Model):
url = models.URLField(null=True, blank=True)
text = models.CharField(max_length=255)
something_else = models.CharField(max_length=255)
advert_panels = [
FieldPanel('url'),
FieldPanel('text'),
]
other_panels = [
FieldPanel('something_else'),
]
edit_handler = TabbedInterface([
ObjectList(advert_panels, heading='Advert'),
ObjectList(other_panels, heading='Other'),
])
def __str__(self):
return self.text
class Meta:
ordering = ('text',)
register_snippet(AdvertWithTabbedInterface)
class StandardIndex(Page):
""" Index for the site """
parent_page_types = [Page]
# A custom panel setup where all Promote fields are placed in the Content tab instead;
# we use this to test that the 'promote' tab is left out of the output when empty
StandardIndex.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('seo_title'),
FieldPanel('slug'),
InlinePanel('advert_placements', label="Adverts"),
]
StandardIndex.promote_panels = []
class StandardChild(Page):
pass
# Test overriding edit_handler with a custom one
StandardChild.edit_handler = TabbedInterface([
ObjectList(StandardChild.content_panels, heading='Content'),
ObjectList(StandardChild.promote_panels, heading='Promote'),
ObjectList(StandardChild.settings_panels, heading='Settings', classname='settings'),
ObjectList([], heading='Dinosaurs'),
], base_form_class=WagtailAdminPageForm)
class BusinessIndex(Page):
""" Can be placed anywhere, can only have Business children """
subpage_types = ['tests.BusinessChild', 'tests.BusinessSubIndex']
class BusinessSubIndex(Page):
""" Can be placed under BusinessIndex, and have BusinessChild children """
# BusinessNowherePage is 'incorrectly' added here as a possible child.
# The rules on BusinessNowherePage prevent it from being a child here though.
subpage_types = ['tests.BusinessChild', 'tests.BusinessNowherePage']
parent_page_types = ['tests.BusinessIndex', 'tests.BusinessChild']
class BusinessChild(Page):
""" Can only be placed under Business indexes, no children allowed """
subpage_types = []
parent_page_types = ['tests.BusinessIndex', BusinessSubIndex]
class BusinessNowherePage(Page):
""" Not allowed to be placed anywhere """
parent_page_types = []
class TaggedPageTag(TaggedItemBase):
content_object = ParentalKey('tests.TaggedPage', related_name='tagged_items', on_delete=models.CASCADE)
class TaggedPage(Page):
tags = ClusterTaggableManager(through=TaggedPageTag, blank=True)
TaggedPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
class SingletonPage(Page):
@classmethod
def can_create_at(cls, parent):
# You can only create one of these!
return super(SingletonPage, cls).can_create_at(parent) \
and not cls.objects.exists()
class SingletonPageViaMaxCount(Page):
max_count = 1
class PageChooserModel(models.Model):
page = models.ForeignKey('wagtailcore.Page', help_text='help text', on_delete=models.CASCADE)
class EventPageChooserModel(models.Model):
page = models.ForeignKey('tests.EventPage', help_text='more help text', on_delete=models.CASCADE)
class SnippetChooserModel(models.Model):
advert = models.ForeignKey(Advert, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advert'),
]
class SnippetChooserModelWithCustomPrimaryKey(models.Model):
advertwithcustomprimarykey = models.ForeignKey(AdvertWithCustomPrimaryKey, help_text='help text', on_delete=models.CASCADE)
panels = [
SnippetChooserPanel('advertwithcustomprimarykey'),
]
class CustomImage(AbstractImage):
caption = models.CharField(max_length=255, blank=True)
fancy_caption = RichTextField(blank=True)
not_editable_field = models.CharField(max_length=255, blank=True)
admin_form_fields = Image.admin_form_fields + (
'caption',
'fancy_caption',
)
class Meta:
unique_together = [
('title', 'collection')
]
class CustomRendition(AbstractRendition):
image = models.ForeignKey(CustomImage, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
# Custom image model with a required field
class CustomImageWithAuthor(AbstractImage):
author = models.CharField(max_length=255)
admin_form_fields = Image.admin_form_fields + (
'author',
)
class CustomRenditionWithAuthor(AbstractRendition):
image = models.ForeignKey(CustomImageWithAuthor, related_name='renditions', on_delete=models.CASCADE)
class Meta:
unique_together = (
('image', 'filter_spec', 'focal_point_key'),
)
class CustomDocument(AbstractDocument):
description = models.TextField(blank=True)
fancy_description = RichTextField(blank=True)
admin_form_fields = Document.admin_form_fields + (
'description',
'fancy_description'
)
class Meta:
unique_together = [
('title', 'collection')
]
class StreamModel(models.Model):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
class ExtendedImageChooserBlock(ImageChooserBlock):
"""
Example of Block with custom get_api_representation method.
If the request has an 'extended' query param, it returns a dict of id and title,
otherwise, it returns the default value.
"""
def get_api_representation(self, value, context=None):
image_id = super().get_api_representation(value, context=context)
if 'request' in context and context['request'].query_params.get('extended', False):
return {
'id': image_id,
'title': value.title
}
return image_id
class StreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ExtendedImageChooserBlock()),
('product', StructBlock([
('name', CharBlock()),
('price', CharBlock()),
])),
('raw_html', RawHTMLBlock()),
])
api_fields = ('body',)
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
preview_modes = []
class DefaultStreamPage(Page):
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
], default='')
content_panels = [
FieldPanel('title'),
StreamFieldPanel('body'),
]
class MTIBasePage(Page):
is_creatable = False
class Meta:
verbose_name = "MTI Base page"
class MTIChildPage(MTIBasePage):
# Should be creatable by default, no need to set anything
pass
class AbstractPage(Page):
class Meta:
abstract = True
@register_setting
class TestSetting(BaseSetting):
title = models.CharField(max_length=100)
email = models.EmailField(max_length=50)
@register_setting
class ImportantPages(BaseSetting):
sign_up_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
general_terms_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
privacy_policy_page = models.ForeignKey(
'wagtailcore.Page', related_name="+", null=True, on_delete=models.SET_NULL)
@register_setting(icon="tag")
class IconSetting(BaseSetting):
pass
class NotYetRegisteredSetting(BaseSetting):
pass
@register_setting
class FileUploadSetting(BaseSetting):
file = models.FileField()
class BlogCategory(models.Model):
name = models.CharField(unique=True, max_length=80)
class BlogCategoryBlogPage(models.Model):
category = models.ForeignKey(BlogCategory, related_name="+", on_delete=models.CASCADE)
page = ParentalKey('ManyToManyBlogPage', related_name='categories', on_delete=models.CASCADE)
panels = [
FieldPanel('category'),
]
class ManyToManyBlogPage(Page):
"""
A page type with two different kinds of M2M relation.
We don't formally support these, but we don't want them to cause
hard breakages either.
"""
body = RichTextField(blank=True)
adverts = models.ManyToManyField(Advert, blank=True)
blog_categories = models.ManyToManyField(
BlogCategory, through=BlogCategoryBlogPage, blank=True)
# make first_published_at editable on this page model
settings_panels = Page.settings_panels + [
FieldPanel('first_published_at'),
]
class OneToOnePage(Page):
"""
A Page containing a O2O relation.
"""
body = RichTextBlock(blank=True)
page_ptr = models.OneToOneField(Page, parent_link=True,
related_name='+', on_delete=models.CASCADE)
class GenericSnippetPage(Page):
"""
A page containing a reference to an arbitrary snippet (or any model for that matter)
linked by a GenericForeignKey
"""
snippet_content_type = models.ForeignKey(ContentType, on_delete=models.SET_NULL, null=True)
snippet_object_id = models.PositiveIntegerField(null=True)
snippet_content_object = GenericForeignKey('snippet_content_type', 'snippet_object_id')
class CustomImageFilePath(AbstractImage):
def get_upload_to(self, filename):
"""Create a path that's file-system friendly.
By hashing the file's contents we guarantee an equal distribution
of files within our root directories. This also gives us a
better chance of uploading images with the same filename, but
different contents - this isn't guaranteed as we're only using
the first three characters of the checksum.
"""
original_filepath = super().get_upload_to(filename)
folder_name, filename = original_filepath.split(os.path.sep)
# Ensure that we consume the entire file, we can't guarantee that
# the stream has not be partially (or entirely) consumed by
# another process
original_position = self.file.tell()
self.file.seek(0)
hash256 = hashlib.sha256()
while True:
data = self.file.read(256)
if not data:
break
hash256.update(data)
checksum = hash256.hexdigest()
self.file.seek(original_position)
return os.path.join(folder_name, checksum[:3], filename)
class CustomPageQuerySet(PageQuerySet):
def about_spam(self):
return self.filter(title__contains='spam')
CustomManager = PageManager.from_queryset(CustomPageQuerySet)
class CustomManagerPage(Page):
objects = CustomManager()
class MyBasePage(Page):
"""
A base Page model, used to set site-wide defaults and overrides.
"""
objects = CustomManager()
class Meta:
abstract = True
class MyCustomPage(MyBasePage):
pass
class ValidatedPage(Page):
foo = models.CharField(max_length=255)
base_form_class = ValidatedPageForm
content_panels = Page.content_panels + [
FieldPanel('foo'),
]
class DefaultRichTextFieldPage(Page):
body = RichTextField()
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class DefaultRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body')
]
class CustomRichTextFieldPage(Page):
body = RichTextField(editor='custom')
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
class CustomRichBlockFieldPage(Page):
body = StreamField([
('rich_text', RichTextBlock(editor='custom')),
])
content_panels = [
FieldPanel('title', classname="full title"),
StreamFieldPanel('body'),
]
class RichTextFieldWithFeaturesPage(Page):
body = RichTextField(features=['quotation', 'embed', 'made-up-feature'])
content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('body'),
]
# a page that only contains RichTextField within an InlinePanel,
# to test that the inline child's form media gets pulled through
class SectionedRichTextPageSection(Orderable):
page = ParentalKey('tests.SectionedRichTextPage', related_name='sections', on_delete=models.CASCADE)
body = RichTextField()
panels = [
FieldPanel('body')
]
class SectionedRichTextPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class InlineStreamPageSection(Orderable):
page = ParentalKey('tests.InlineStreamPage', related_name='sections', on_delete=models.CASCADE)
body = StreamField([
('text', CharBlock()),
('rich_text', RichTextBlock()),
('image', ImageChooserBlock()),
])
panels = [
StreamFieldPanel('body')
]
class InlineStreamPage(Page):
content_panels = [
FieldPanel('title', classname="full title"),
InlinePanel('sections')
]
class TableBlockStreamPage(Page):
table = StreamField([('table', TableBlock())])
content_panels = [StreamFieldPanel('table')]
class UserProfile(models.Model):
# Wagtail's schema must be able to coexist alongside a custom UserProfile model
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
favourite_colour = models.CharField(max_length=255)
class PanelSettings(TestSetting):
panels = [
FieldPanel('title')
]
class TabbedSettings(TestSetting):
edit_handler = TabbedInterface([
ObjectList([
FieldPanel('title')
], heading='First tab'),
ObjectList([
FieldPanel('email')
], heading='Second tab'),
])
class AlwaysShowInMenusPage(Page):
show_in_menus_default = True
# test for AddField migrations on StreamFields using various default values
class AddedStreamFieldWithoutDefaultPage(Page):
body = StreamField([
('title', CharBlock())
])
class AddedStreamFieldWithEmptyStringDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default='')
class AddedStreamFieldWithEmptyListDefaultPage(Page):
body = StreamField([
('title', CharBlock())
], default=[])
# test customising edit handler definitions on a per-request basis
class PerUserContentPanels(ObjectList):
def _replace_children_with_per_user_config(self):
self.children = self.instance.basic_content_panels
if self.request.user.is_superuser:
self.children = self.instance.superuser_content_panels
self.children = [
child.bind_to(model=self.model, instance=self.instance,
request=self.request, form=self.form)
for child in self.children]
def on_instance_bound(self):
# replace list of children when both instance and request are available
if self.request:
self._replace_children_with_per_user_config()
else:
super().on_instance_bound()
def on_request_bound(self):
# replace list of children when both instance and request are available
if self.instance:
self._replace_children_with_per_user_config()
else:
super().on_request_bound()
class PerUserPageMixin:
basic_content_panels = []
superuser_content_panels = []
@cached_classmethod
def get_edit_handler(cls):
tabs = []
if cls.basic_content_panels and cls.superuser_content_panels:
tabs.append(PerUserContentPanels(heading='Content'))
if cls.promote_panels:
tabs.append(ObjectList(cls.promote_panels,
heading='Promote'))
if cls.settings_panels:
tabs.append(ObjectList(cls.settings_panels,
heading='Settings',
classname='settings'))
edit_handler = TabbedInterface(tabs,
base_form_class=cls.base_form_class)
return edit_handler.bind_to(model=cls)
class SecretPage(PerUserPageMixin, Page):
boring_data = models.TextField()
secret_data = models.TextField()
basic_content_panels = Page.content_panels + [
FieldPanel('boring_data'),
]
superuser_content_panels = basic_content_panels + [
FieldPanel('secret_data'),
]
class SimpleParentPage(Page):
# `BusinessIndex` has been added to bring it in line with other tests
subpage_types = ['tests.SimpleChildPage', BusinessIndex]
class SimpleChildPage(Page):
# `Page` has been added to bring it in line with other tests
parent_page_types = ['tests.SimpleParentPage', Page]
max_count_per_parent = 1
class PersonPage(Page):
first_name = models.CharField(
max_length=255,
verbose_name='First Name',
)
last_name = models.CharField(
max_length=255,
verbose_name='Last Name',
)
content_panels = Page.content_panels + [
MultiFieldPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
], 'Person'),
InlinePanel('addresses', label='Address'),
]
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'Persons'
class Address(index.Indexed, ClusterableModel, Orderable):
address = models.CharField(
max_length=255,
verbose_name='Address',
)
tags = ClusterTaggableManager(
through='tests.AddressTag',
blank=True,
)
person = ParentalKey(
to='tests.PersonPage',
related_name='addresses',
verbose_name='Person'
)
panels = [
FieldPanel('address'),
FieldPanel('tags'),
]
class Meta:
verbose_name = 'Address'
verbose_name_plural = 'Addresses'
class AddressTag(TaggedItemBase):
content_object = ParentalKey(
to='tests.Address',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class RestaurantPage(Page):
tags = ClusterTaggableManager(through='tests.TaggedRestaurant', blank=True)
content_panels = Page.content_panels + [
FieldPanel('tags'),
]
class RestaurantTag(TagBase):
free_tagging = False
class Meta:
verbose_name = "Tag"
verbose_name_plural = "Tags"
class TaggedRestaurant(ItemBase):
tag = models.ForeignKey(
RestaurantTag, related_name="tagged_restaurants", on_delete=models.CASCADE
)
content_object = ParentalKey(
to='tests.RestaurantPage',
on_delete=models.CASCADE,
related_name='tagged_items'
)
class SimpleTask(Task):
pass
| takeflight/wagtail | wagtail/tests/testapp/models.py | Python | bsd-3-clause | 43,462 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base
from misc import GetPageInfo
from models import PageIdentifier
from category import GetSubcategoryInfos
from revisions import GetCurrentContent, GetPageRevisionInfos
from meta import GetSourceInfo
def test_unicode_title():
get_beyonce = GetCurrentContent("Beyoncé Knowles")
assert get_beyonce()
def test_coercion_basic():
pid = PageIdentifier(title='Africa', page_id=123, ns=4, source='enwp')
get_subcats = GetSubcategoryInfos(pid)
assert get_subcats.input_param == 'Category:Africa'
def test_web_request():
url = 'http://upload.wikimedia.org/wikipedia/commons/d/d2/Mcgregor.jpg'
get_photo = base.WebRequestOperation(url)
res = get_photo()
text = res[0]
assert len(text) == 16408
def test_get_html():
get_africa_html = base.GetPageHTML('Africa')
res = get_africa_html()
text = res[0]
assert len(text) > 350000
def test_missing_revisions():
get_revs = GetPageRevisionInfos('Coffee_lololololol')
rev_list = get_revs()
'''
Should return 'missing' and negative pageid
'''
assert len(rev_list) == 0
def test_get_meta():
get_source_info = GetSourceInfo()
meta = get_source_info()
assert meta
def test_client_passed_to_subops():
# This tests whether the client object given to the initial operation
# is passed to its sub-operations.
# Use just enough titles to force multiplexing so that we can get
# sub ops to test.
titles = ['a'] * (base.DEFAULT_QUERY_LIMIT.get_limit() + 1)
client = base.MockClient()
op = GetPageInfo(titles, client=client)
assert id(op.subop_queues[0].peek().client) == id(client)
| mahmoud/wapiti | wapiti/operations/test_basic.py | Python | bsd-3-clause | 1,717 |
import math
from numba import njit
from tardis.montecarlo.montecarlo_numba import (
njit_dict_no_parallel,
)
import tardis.montecarlo.montecarlo_numba.numba_config as nc
from tardis.montecarlo.montecarlo_numba.numba_config import (
C_SPEED_OF_LIGHT,
MISS_DISTANCE,
SIGMA_THOMSON,
CLOSE_LINE_THRESHOLD,
)
from tardis.montecarlo.montecarlo_numba.utils import MonteCarloException
from tardis.montecarlo.montecarlo_numba.r_packet import print_r_packet_properties
@njit(**njit_dict_no_parallel)
def calculate_distance_boundary(r, mu, r_inner, r_outer):
"""
Calculate distance to shell boundary in cm.
Parameters
----------
r : float
radial coordinate of the RPacket
mu : float
cosine of the direction of movement
r_inner : float
inner radius of current shell
r_outer : float
outer radius of current shell
"""
delta_shell = 0
if mu > 0.0:
# direction outward
distance = math.sqrt(r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (
r * mu
)
delta_shell = 1
else:
# going inward
check = r_inner * r_inner + (r * r * (mu * mu - 1.0))
if check >= 0.0:
# hit inner boundary
distance = -r * mu - math.sqrt(check)
delta_shell = -1
else:
# miss inner boundary
distance = math.sqrt(
r_outer * r_outer + ((mu * mu - 1.0) * r * r)
) - (r * mu)
delta_shell = 1
return distance, delta_shell
@njit(**njit_dict_no_parallel)
def calculate_distance_line(
r_packet, comov_nu, is_last_line, nu_line, time_explosion
):
"""
Calculate distance until RPacket is in resonance with the next line
Parameters
----------
r_packet : tardis.montecarlo.montecarlo_numba.r_packet.RPacket
comov_nu : float
comoving frequency at the CURRENT position of the RPacket
is_last_line : bool
return MISS_DISTANCE if at the end of the line list
nu_line : float
line to check the distance to
time_explosion : float
time since explosion in seconds
Returns
-------
"""
nu = r_packet.nu
if is_last_line:
return MISS_DISTANCE
nu_diff = comov_nu - nu_line
# for numerical reasons, if line is too close, we set the distance to 0.
if abs(nu_diff / nu) < CLOSE_LINE_THRESHOLD:
nu_diff = 0.0
if nu_diff >= 0:
distance = (nu_diff / nu) * C_SPEED_OF_LIGHT * time_explosion
else:
raise MonteCarloException(
"nu difference is less than 0.0"
)
if nc.ENABLE_FULL_RELATIVITY:
return calculate_distance_line_full_relativity(
nu_line, nu, time_explosion, r_packet
)
return distance
@njit(**njit_dict_no_parallel)
def calculate_distance_line_full_relativity(
nu_line, nu, time_explosion, r_packet
):
# distance = - mu * r + (ct - nu_r * nu_r * sqrt(ct * ct - (1 + r * r * (1 - mu * mu) * (1 + pow(nu_r, -2))))) / (1 + nu_r * nu_r);
nu_r = nu_line / nu
ct = C_SPEED_OF_LIGHT * time_explosion
distance = -r_packet.mu * r_packet.r + (
ct
- nu_r
* nu_r
* math.sqrt(
ct * ct
- (
1
+ r_packet.r
* r_packet.r
* (1 - r_packet.mu * r_packet.mu)
* (1 + 1.0 / (nu_r * nu_r))
)
)
) / (1 + nu_r * nu_r)
return distance
@njit(**njit_dict_no_parallel)
def calculate_distance_electron(electron_density, tau_event):
"""
Calculate distance to Thomson Scattering
Parameters
----------
electron_density : float
tau_event : float
"""
# add full_relativity here
return tau_event / (electron_density * SIGMA_THOMSON)
| tardis-sn/tardis | tardis/montecarlo/montecarlo_numba/calculate_distances.py | Python | bsd-3-clause | 3,846 |
"""URI API
This file contains the part of the blaze API dealing with URIs. The
"URI API". In Blaze persistence is provided by the means of this URI
API, that allows specifying a "location" for an array as an URI.
The URI API allows:
- saving existing arrays to an URI.
- loading an array into memory from an URI.
- opening an URI as an array.
- dropping the contents of a given URI.
"""
from __future__ import absolute_import, division, print_function
import os
import warnings
from datashape import to_numpy, to_numpy_dtype
import blz
from ..py2help import urlparse
from ..datadescriptor import (BLZDataDescriptor, CSVDataDescriptor,
JSONDataDescriptor, HDF5DataDescriptor)
from ..objects.array import Array
# ----------------------------------------------------------------------
# Some helper functions to workaround quirks
# XXX A big hack for some quirks in current datashape. The next deals
# with the cases where the shape is not present like in 'float32'
def _to_numpy(ds):
res = to_numpy(ds)
res = res if type(res) is tuple else ((), to_numpy_dtype(ds))
return res
class Storage(object):
"""
Storage(uri, mode='a', permanent=True)
Class to host parameters for persistence properties.
Parameters
----------
uri : string
The URI where the data set will be stored.
mode : string ('r'ead, 'a'ppend)
The mode for creating/opening the storage.
permanent : bool
Whether this file should be permanent or not.
Examples
--------
>>> store = Storage('blz-store.blz')
"""
SUPPORTED_FORMATS = ('json', 'csv', 'blz', 'hdf5')
@property
def uri(self):
"""The URI for the data set."""
return self._uri
@property
def mode(self):
"""The mode for opening the storage."""
return self._mode
@property
def format(self):
"""The format used for storage."""
return self._format
@property
def permanent(self):
"""Whether this file should be permanent or not."""
return self._permanent
@property
def path(self):
"""Returns a blz path for a given uri."""
return self._path
def __init__(self, uri, mode='a', permanent=True, format=None):
if not isinstance(uri, str):
raise ValueError("`uri` must be a string.")
self._uri = uri
self._format = self._path = ""
self._set_format_and_path_from_uri(uri, format)
self._mode = mode
if not permanent:
raise ValueError(
"`permanent` set to False is not supported yet.")
self._permanent = permanent
def __repr__(self):
args = ["uri=%s" % self._uri, "mode=%s" % self._mode]
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def _set_format_and_path_from_uri(self, uri, format=None):
"""Parse the uri into the format and path"""
up = urlparse.urlparse(self._uri)
if up.scheme in self.SUPPORTED_FORMATS:
warnings.warn("Blaze no longer uses file type in network protocol field of the uri. "
"Please use format kwarg.", DeprecationWarning)
self._path = up.netloc + up.path
if os.name == 'nt' and len(up.scheme) == 1:
# This is a workaround for raw windows paths like
# 'C:/x/y/z.csv', for which urlparse parses 'C' as
# the scheme and '/x/y/z.csv' as the path.
self._path = uri
if not self._path:
raise ValueError("Unable to extract path from uri: %s", uri)
_, extension = os.path.splitext(self._path)
extension = extension.strip('.')
# Support for deprecated format in url network scheme
format_from_up = None
if up.scheme in self.SUPPORTED_FORMATS:
format_from_up = up.scheme
if format and format_from_up != format_from_up:
raise ValueError("URI scheme and file format do not match. Given uri: %s, format: %s" %
(up.geturl(), format))
# find actual format
if format:
self._format = format
elif format_from_up:
self._format = format_from_up
elif extension:
self._format = extension
else:
raise ValueError("Cannot determine format from: %s" % uri)
if self._format not in self.SUPPORTED_FORMATS:
raise ValueError("`format` '%s' is not supported." % self._format)
def _persist_convert(persist):
if not isinstance(persist, Storage):
if isinstance(persist, str):
persist = Storage(persist)
else:
raise ValueError('persist argument must be either a'
'URI string or Storage object')
return persist
# ----------------------------------------------------------------------
# The actual API specific for persistence.
# Only BLZ, HDF5, CSV and JSON formats are supported currently.
def from_blz(persist, **kwargs):
"""Open an existing persistent BLZ array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
d = blz.barray(rootdir=persist.path, **kwargs)
dd = BLZDataDescriptor(d)
return Array(dd)
def from_csv(persist, **kwargs):
"""Open an existing persistent CSV array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = CSVDataDescriptor(persist.path, **kwargs)
return Array(dd)
def from_json(persist, **kwargs):
"""Open an existing persistent JSON array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = JSONDataDescriptor(persist.path, **kwargs)
return Array(dd)
def from_hdf5(persist, **kwargs):
"""Open an existing persistent HDF5 array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = HDF5DataDescriptor(persist.path, **kwargs)
return Array(dd)
def drop(persist):
"""Remove a persistent storage."""
persist = _persist_convert(persist)
if persist.format == 'blz':
from shutil import rmtree
rmtree(persist.path)
elif persist.format in ('csv', 'json', 'hdf5'):
import os
os.unlink(persist.path)
| mwiebe/blaze | blaze/io/storage.py | Python | bsd-3-clause | 7,420 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
from __future__ import with_statement
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from pyglet.app.base import PlatformEventLoop
from pyglet.libs.darwin import *
class CocoaEventLoop(PlatformEventLoop):
def __init__(self):
super(CocoaEventLoop, self).__init__()
# Prepare the default application.
NSApplication.sharedApplication()
# Create an autorelease pool for menu creation and finishLaunching
pool = NSAutoreleasePool.alloc().init()
self._create_application_menu()
# The value for the ApplicationPolicy is 0 as opposed to the
# constant name NSApplicationActivationPolicyRegular, as it
# doesn't appear to be in the bridge support in Apple's pyObjC
# as of OS X 10.6.7
NSApp().setActivationPolicy_(0)
NSApp().finishLaunching()
NSApp().activateIgnoringOtherApps_(True)
# Then get rid of the pool when we're done.
del pool
def _create_application_menu(self):
# Sets up a menu and installs a "quit" item so that we can use
# Command-Q to exit the application.
# See http://cocoawithlove.com/2010/09/minimalist-cocoa-programming.html
# This could also be done much more easily with a NIB.
menubar = NSMenu.alloc().init()
appMenuItem = NSMenuItem.alloc().init()
menubar.addItem_(appMenuItem)
NSApp().setMainMenu_(menubar)
appMenu = NSMenu.alloc().init()
processName = NSProcessInfo.processInfo().processName()
hideItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
"Hide " + processName, "hide:", "h")
appMenu.addItem_(hideItem)
appMenu.addItem_(NSMenuItem.separatorItem())
quitItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
"Quit " + processName, "terminate:", "q")
appMenu.addItem_(quitItem)
appMenuItem.setSubmenu_(appMenu)
def start(self):
pass
def step(self, timeout=None):
# Create an autorelease pool for this iteration.
pool = NSAutoreleasePool.alloc().init()
# Determine the timeout date.
if timeout is None:
# Using distantFuture as untilDate means that nextEventMatchingMask
# will wait until the next event comes along.
timeout_date = NSDate.distantFuture()
else:
timeout_date = NSDate.dateWithTimeIntervalSinceNow_(timeout)
# Retrieve the next event (if any). We wait for an event to show up
# and then process it, or if timeout_date expires we simply return.
# We only process one event per call of step().
self._is_running.set()
event = NSApp().nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, timeout_date, NSDefaultRunLoopMode, True)
# Dispatch the event (if any).
if event is not None:
event_type = event.type()
if event_type != NSApplicationDefined:
# Send out event as normal. Responders will still receive
# keyUp:, keyDown:, and flagsChanged: events.
NSApp().sendEvent_(event)
# Resend key events as special pyglet-specific messages
# which supplant the keyDown:, keyUp:, and flagsChanged: messages
# because NSApplication translates multiple key presses into key
# equivalents before sending them on, which means that some keyUp:
# messages are never sent for individual keys. Our pyglet-specific
# replacements ensure that we see all the raw key presses & releases.
# We also filter out key-down repeats since pyglet only sends one
# on_key_press event per key press.
if event_type == NSKeyDown and not event.isARepeat():
NSApp().sendAction_to_from_("pygletKeyDown:", None, event)
elif event_type == NSKeyUp:
NSApp().sendAction_to_from_("pygletKeyUp:", None, event)
elif event_type == NSFlagsChanged:
NSApp().sendAction_to_from_("pygletFlagsChanged:", None, event)
NSApp().updateWindows()
did_time_out = False
else:
did_time_out = True
self._is_running.clear()
# Destroy the autorelease pool used for this step.
del pool
return did_time_out
def stop(self):
pass
def notify(self):
pool = NSAutoreleasePool.alloc().init()
notifyEvent = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSApplicationDefined, # type
NSPoint(0.0, 0.0), # location
0, # modifierFlags
0, # timestamp
0, # windowNumber
None, # graphicsContext
0, # subtype
0, # data1
0, # data2
)
NSApp().postEvent_atStart_(notifyEvent, False)
del pool
| ardekantur/pyglet | pyglet/app/cocoa.py | Python | bsd-3-clause | 7,023 |
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class InputTests(TranspileTestCase):
pass
# FIXME: This test can't run without a redirection for stdin.
# class BuiltinInputFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
# functions = ["input"]
# not_implemented = [
# 'test_bool',
# 'test_bytearray',
# 'test_bytes',
# 'test_class',
# 'test_complex',
# 'test_dict',
# 'test_float',
# 'test_frozenset',
# 'test_int',
# 'test_list',
# 'test_None',
# 'test_NotImplemented',
# 'test_set',
# 'test_str',
# 'test_tuple',
# ]
| glasnt/voc | tests/builtins/test_input.py | Python | bsd-3-clause | 687 |
from bokeh.plotting import figure, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="Simple line example", x_axis_label="x", y_axis_label="y")
# add a line renderer with legend and line thickness
p.line(x, y, legend_label="Temp.", line_width=2)
# show the results
show(p)
| bokeh/bokeh | sphinx/source/docs/first_steps/examples/first_steps_1_simple_line.py | Python | bsd-3-clause | 358 |
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestRtiOpCode(OpCodeTestBase):
def test_execute_rti_implied_command_calls_and_method(self):
self.assert_opcode_execution(OpCodeDefinitions.rti_implied_command, self.target.get_rti_command_executed)
| jeroanan/Nes2 | Tests/OpCodeTests/TestRtiOpCode.py | Python | bsd-3-clause | 313 |
#encoding=utf-8
"""
26. Invalid models
This example exists purely to point out errors in models.
"""
from __future__ import unicode_literals
from django.db import connection, models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
decimalfield4 = models.DecimalField(max_digits=9, decimal_places=10)
decimalfield5 = models.DecimalField(max_digits=10, decimal_places=10)
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() # 1960
make = models.CharField(max_length=10) # Aston Martin
name = models.CharField(max_length=10) # DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class UnicodeForeignKeys(models.Model):
"""Foreign keys which can translate to ascii should be OK, but fail if
they're not."""
good = models.ForeignKey('FKTarget')
also_good = models.ManyToManyField('FKTarget', related_name='unicode2')
# In Python 3 this should become legal, but currently causes unicode errors
# when adding the errors in core/management/validation.py
#bad = models.ForeignKey('★')
class PrimaryKeyNull(models.Model):
my_pk_field = models.IntegerField(primary_key=True, null=True)
class OrderByPKModel(models.Model):
"""
Model to test that ordering by pk passes validation.
Refs #8291
"""
name = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ('pk',)
class SwappableModel(models.Model):
"""A model that can be, but isn't swapped out.
References to this model *shoudln't* raise any validation error.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class SwappedModel(models.Model):
"""A model that is swapped out.
References to this model *should* raise a validation error.
Requires TEST_SWAPPED_MODEL to be defined in the test environment;
this is guaranteed by the test runner using @override_settings.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class BadSwappableValue(models.Model):
"""A model that can be swapped out; during testing, the swappable
value is not of the format app.model
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
class BadSwappableModel(models.Model):
"""A model that can be swapped out; during testing, the swappable
value references an unknown model.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
class HardReferenceModel(models.Model):
fk_1 = models.ForeignKey(SwappableModel, related_name='fk_hardref1')
fk_2 = models.ForeignKey('invalid_models.SwappableModel', related_name='fk_hardref2')
fk_3 = models.ForeignKey(SwappedModel, related_name='fk_hardref3')
fk_4 = models.ForeignKey('invalid_models.SwappedModel', related_name='fk_hardref4')
m2m_1 = models.ManyToManyField(SwappableModel, related_name='m2m_hardref1')
m2m_2 = models.ManyToManyField('invalid_models.SwappableModel', related_name='m2m_hardref2')
m2m_3 = models.ManyToManyField(SwappedModel, related_name='m2m_hardref3')
m2m_4 = models.ManyToManyField('invalid_models.SwappedModel', related_name='m2m_hardref4')
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield4": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
invalid_models.hardreferencemodel: 'fk_3' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'fk_4' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'm2m_3' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'm2m_4' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.badswappablevalue: TEST_SWAPPED_MODEL_BAD_VALUE is not of the form 'app_label.app_name'.
invalid_models.badswappablemodel: Model has been swapped out for 'not_an_app.Target' which has not been installed or is abstract.
"""
if not connection.features.interprets_empty_strings_as_nulls:
model_errors += """invalid_models.primarykeynull: "my_pk_field": Primary key fields cannot have null=True.
"""
| RaoUmer/django | tests/modeltests/invalid_models/invalid_models/models.py | Python | bsd-3-clause | 29,360 |
import sys
import os
import commands
import nipype.pipeline.engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from utils import *
from CPAC.vmhc import *
from nipype.interfaces.afni import preprocess
from CPAC.registration import create_wf_calculate_ants_warp, \
create_wf_c3d_fsl_to_itk, \
create_wf_collect_transforms, \
create_wf_apply_ants_warp
def create_vmhc(use_ants):
"""
Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.
Parameters
----------
None
Returns
-------
vmhc_workflow : workflow
Voxel Mirrored Homotopic Connectivity Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_
Workflow Inputs::
inputspec.brain : string (existing nifti file)
Anatomical image(without skull)
inputspec.brain_symmetric : string (existing nifti file)
MNI152_T1_2mm_brain_symmetric.nii.gz
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )
inputspec.reorient : string (existing nifti file)
RPI oriented anatomical data
inputspec.example_func2highres_mat : string (existing affine transformation .mat file)
Specifies an affine transform that should be applied to the example_func before non linear warping
inputspec.standard : string (existing nifti file)
MNI152_T1_standard_resolution_brain.nii.gz
inputspec.symm_standard : string (existing nifti file)
MNI152_T1_2mm_symmetric.nii.gz
inputspec.twomm_brain_mask_dil : string (existing nifti file)
MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
inputspec.config_file_twomm_symmetric : string (existing .cnf file)
T1_2_MNI152_2mm_symmetric.cnf
inputspec.rest_mask : string (existing nifti file)
A mask functional volume(derived by dilation from motion corrected functional volume)
fwhm_input.fwhm : list (float)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
inputspec.mean_functional : string (existing nifti file)
The mean functional image for use in the func-to-anat registration matrix conversion
to ITK (ANTS) format, if the user selects to use ANTS.
Workflow Outputs::
outputspec.highres2symmstandard : string (nifti file)
Linear registration of T1 image to symmetric standard image
outputspec.highres2symmstandard_mat : string (affine transformation .mat file)
An affine transformation .mat file from linear registration and used in non linear registration
outputspec.highres2symmstandard_warp : string (nifti file)
warp file from Non Linear registration of T1 to symmetrical standard brain
outputspec.fnirt_highres2symmstandard : string (nifti file)
Non Linear registration of T1 to symmetrical standard brain
outputspec.highres2symmstandard_jac : string (nifti file)
jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain
outputspec.rest_res_2symmstandard : string (nifti file)
nonlinear registration (func to standard) image
outputspec.VMHC_FWHM_img : string (nifti file)
pearson correlation between res2standard and flipped res2standard
outputspec.VMHC_Z_FWHM_img : string (nifti file)
Fisher Z transform map
outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)
Z statistic map
Order of commands:
- Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_::
flirt
-ref MNI152_T1_2mm_brain_symmetric.nii.gz
-in mprage_brain.nii.gz
-out highres2symmstandard.nii.gz
-omat highres2symmstandard.mat
-cost corratio
-searchcost corratio
-dof 12
-interp trilinear
- Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_::
fnirt
--in=head.nii.gz
--aff=highres2symmstandard.mat
--cout=highres2symmstandard_warp.nii.gz
--iout=fnirt_highres2symmstandard.nii.gz
--jout=highres2symmstandard_jac.nii.gz
--config=T1_2_MNI152_2mm_symmetric.cnf
--ref=MNI152_T1_2mm_symmetric.nii.gz
--refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
--warpres=10,10,10
- Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_::
fslmaths rest_res_filt.nii.gz
-kernel gauss FWHM/ sqrt(8-ln(2))
-fmean -mas rest_mask.nii.gz
rest_res_filt_FWHM.nii.gz
- Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_::
applywarp
--ref=MNI152_T1_2mm_symmetric.nii.gz
--in=rest_res_filt_FWHM.nii.gz
--out=rest_res_2symmstandard.nii.gz
--warp=highres2symmstandard_warp.nii.gz
--premat=example_func2highres.mat
- Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_::
fslswapdim
rest_res_2symmstandard.nii.gz
-x y z
tmp_LRflipped.nii.gz
- Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_::
3dTcorrelate
-pearson
-polort -1
-prefix VMHC_FWHM.nii.gz
rest_res_2symmstandard.nii.gz
tmp_LRflipped.nii.gz
- Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc
-a VMHC_FWHM.nii.gz
-expr 'log((a+1)/(1-a))/2'
-prefix VMHC_FWHM_Z.nii.gz
- Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::
-Use Nibabel to do this
- Compute the Z statistic map ::
3dcalc
-a VMHC_FWHM_Z.nii.gz
-expr 'a*sqrt('${nvols}'-3)'
-prefix VMHC_FWHM_Z_stat.nii.gz
Workflow:
.. image:: ../images/vmhc_graph.dot.png
:width: 500
Workflow Detailed:
.. image:: ../images/vmhc_detailed_graph.dot.png
:width: 500
References
----------
.. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010
Examples
--------
>>> vmhc_w = create_vmhc()
>>> vmhc_w.inputs.inputspec.brain_symmetric = 'MNI152_T1_2mm_brain_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.symm_standard = 'MNI152_T1_2mm_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz'
>>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf'
>>> vmhc_w.inputs.inputspec.standard = 'MNI152_T1_2mm.nii.gz'
>>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6]
>>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6])
>>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz')
>>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz')
>>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz')
>>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat')
>>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz')
>>> vmhc_w.run() # doctest: +SKIP
"""
vmhc = pe.Workflow(name='vmhc_workflow')
inputNode = pe.Node(util.IdentityInterface(fields=['brain',
'brain_symmetric',
'rest_res',
'reorient',
'example_func2highres_mat',
'symm_standard',
'twomm_brain_mask_dil',
'config_file_twomm',
'rest_mask',
'standard',
'mean_functional']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['highres2symmstandard',
'highres2symmstandard_mat',
'highres2symmstandard_warp',
'fnirt_highres2symmstandard',
'highres2symmstandard_jac',
'rest_res_2symmstandard',
'VMHC_FWHM_img',
'VMHC_Z_FWHM_img',
'VMHC_Z_stat_FWHM_img'
]),
name='outputspec')
inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),
name='fwhm_input')
if use_ants == False:
## Linear registration of T1 --> symmetric standard
linear_T1_to_symmetric_standard = pe.Node(interface=fsl.FLIRT(),
name='linear_T1_to_symmetric_standard')
linear_T1_to_symmetric_standard.inputs.cost = 'corratio'
linear_T1_to_symmetric_standard.inputs.cost_func = 'corratio'
linear_T1_to_symmetric_standard.inputs.dof = 12
linear_T1_to_symmetric_standard.inputs.interp = 'trilinear'
## Perform nonlinear registration
##(higres to standard) to symmetric standard brain
nonlinear_highres_to_symmetric_standard = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_highres_to_symmetric_standard')
nonlinear_highres_to_symmetric_standard.inputs.fieldcoeff_file = True
nonlinear_highres_to_symmetric_standard.inputs.jacobian_file = True
nonlinear_highres_to_symmetric_standard.inputs.warp_resolution = (10, 10, 10)
# needs new inputs. needs input from resources for the field coeff of the template->symmetric.
# and needs the field coeff of the anatomical-to-template registration
## Apply nonlinear registration (func to standard)
nonlinear_func_to_standard = pe.Node(interface=fsl.ApplyWarp(),
name='nonlinear_func_to_standard')
elif use_ants == True:
# ANTS warp image etc.
calculate_ants_xfm_vmhc = create_wf_calculate_ants_warp(name='calculate_ants_xfm_vmhc')
fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc')
collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc')
apply_ants_xfm_vmhc = create_wf_apply_ants_warp(0,name='apply_ants_xfm_vmhc')
calculate_ants_xfm_vmhc.inputs.inputspec.dimension = 3
calculate_ants_xfm_vmhc.inputs.inputspec. \
use_histogram_matching = True
calculate_ants_xfm_vmhc.inputs.inputspec. \
winsorize_lower_quantile = 0.01
calculate_ants_xfm_vmhc.inputs.inputspec. \
winsorize_upper_quantile = 0.99
calculate_ants_xfm_vmhc.inputs.inputspec. \
metric = ['MI','MI','CC']
calculate_ants_xfm_vmhc.inputs.inputspec.metric_weight = [1,1,1]
calculate_ants_xfm_vmhc.inputs.inputspec. \
radius_or_number_of_bins = [32,32,4]
calculate_ants_xfm_vmhc.inputs.inputspec. \
sampling_strategy = ['Regular','Regular',None]
calculate_ants_xfm_vmhc.inputs.inputspec. \
sampling_percentage = [0.25,0.25,None]
calculate_ants_xfm_vmhc.inputs.inputspec. \
number_of_iterations = [[1000,500,250,100], \
[1000,500,250,100], [100,100,70,20]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
convergence_threshold = [1e-8,1e-8,1e-9]
calculate_ants_xfm_vmhc.inputs.inputspec. \
convergence_window_size = [10,10,15]
calculate_ants_xfm_vmhc.inputs.inputspec. \
transforms = ['Rigid','Affine','SyN']
calculate_ants_xfm_vmhc.inputs.inputspec. \
transform_parameters = [[0.1],[0.1],[0.1,3,0]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
shrink_factors = [[8,4,2,1],[8,4,2,1],[6,4,2,1]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
smoothing_sigmas = [[3,2,1,0],[3,2,1,0],[3,2,1,0]]
apply_ants_xfm_vmhc.inputs.inputspec.interpolation = 'Gaussian'
apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3
## copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name='copy_and_L_R_swap')
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
## caculate vmhc
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name='pearson_correlation')
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
z_trans = pe.Node(interface=preprocess.Calc(),
name='z_trans')
z_trans.inputs.expr = 'log((1+a)/(1-a))/2'
z_trans.inputs.outputtype = 'NIFTI_GZ'
z_stat = pe.Node(interface=preprocess.Calc(),
name='z_stat')
z_stat.inputs.outputtype = 'NIFTI_GZ'
NVOLS = pe.Node(util.Function(input_names=['in_files'],
output_names=['nvols'],
function=get_img_nvols),
name='NVOLS')
generateEXP = pe.Node(util.Function(input_names=['nvols'],
output_names=['expr'],
function=get_operand_expression),
name='generateEXP')
smooth = pe.Node(interface=fsl.MultiImageMaths(),
name='smooth')
if use_ants == False:
vmhc.connect(inputNode, 'brain',
linear_T1_to_symmetric_standard, 'in_file')
vmhc.connect(inputNode, 'brain_symmetric',
linear_T1_to_symmetric_standard, 'reference')
vmhc.connect(inputNode, 'reorient',
nonlinear_highres_to_symmetric_standard, 'in_file')
vmhc.connect(linear_T1_to_symmetric_standard, 'out_matrix_file',
nonlinear_highres_to_symmetric_standard, 'affine_file')
vmhc.connect(inputNode, 'symm_standard',
nonlinear_highres_to_symmetric_standard, 'ref_file')
vmhc.connect(inputNode, 'twomm_brain_mask_dil',
nonlinear_highres_to_symmetric_standard, 'refmask_file')
vmhc.connect(inputNode, 'config_file_twomm',
nonlinear_highres_to_symmetric_standard, 'config_file')
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
nonlinear_func_to_standard, 'in_file')
vmhc.connect(inputNode, 'standard',
nonlinear_func_to_standard, 'ref_file')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'fieldcoeff_file',
nonlinear_func_to_standard, 'field_file')
## func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
nonlinear_func_to_standard, 'premat')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
copy_and_L_R_swap, 'in_file')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
pearson_correlation, 'xset')
elif use_ants == True:
# connections for ANTS stuff
# registration calculation stuff -- might go out the window
vmhc.connect(inputNode, 'brain',
calculate_ants_xfm_vmhc, 'inputspec.anatomical_brain')
vmhc.connect(inputNode, 'brain_symmetric',
calculate_ants_xfm_vmhc, 'inputspec.reference_brain')
# functional apply warp stuff
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
apply_ants_xfm_vmhc, 'inputspec.input_image')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_rigid_xfm',
collect_transforms_vmhc, 'inputspec.linear_rigid')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_affine_xfm',
collect_transforms_vmhc, 'inputspec.linear_affine')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.warp_field',
collect_transforms_vmhc, 'inputspec.warp_file')
## func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
fsl_to_itk_vmhc, 'inputspec.affine_file')
vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc,
'inputspec.reference_file')
vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc,
'inputspec.source_file')
vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform',
collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine')
'''
vmhc.connect(inputNode, 'brain',
apply_ants_xfm_vmhc, 'inputspec.conversion_reference')
vmhc.connect(inputNode, 'mean_functional',
apply_ants_xfm_vmhc, 'inputspec.conversion_source')
'''
vmhc.connect(inputNode, 'brain_symmetric',
apply_ants_xfm_vmhc, 'inputspec.reference_image')
vmhc.connect(collect_transforms_vmhc, \
'outputspec.transformation_series', \
apply_ants_xfm_vmhc, 'inputspec.transforms')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
copy_and_L_R_swap, 'in_file')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
pearson_correlation, 'xset')
vmhc.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
vmhc.connect(pearson_correlation, 'out_file',
z_trans, 'in_file_a')
vmhc.connect(copy_and_L_R_swap, 'out_file',
NVOLS, 'in_files')
vmhc.connect(NVOLS, 'nvols',
generateEXP, 'nvols')
vmhc.connect(z_trans, 'out_file',
z_stat, 'in_file_a')
vmhc.connect(generateEXP, 'expr',
z_stat, 'expr')
if use_ants == False:
vmhc.connect(linear_T1_to_symmetric_standard, 'out_file',
outputNode, 'highres2symmstandard')
vmhc.connect(linear_T1_to_symmetric_standard, 'out_matrix_file',
outputNode, 'highres2symmstandard_mat')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'jacobian_file',
outputNode, 'highres2symmstandard_jac')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'fieldcoeff_file',
outputNode, 'highres2symmstandard_warp')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'warped_file',
outputNode, 'fnirt_highres2symmstandard')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
outputNode, 'rest_res_2symmstandard')
elif use_ants == True:
# ANTS warp outputs to outputnode
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_affine_xfm',
outputNode, 'highres2symmstandard_mat')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.warp_field',
outputNode, 'highres2symmstandard_warp')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.normalized_output_brain',
outputNode, 'fnirt_highres2symmstandard')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
outputNode, 'rest_res_2symmstandard')
vmhc.connect(pearson_correlation, 'out_file',
outputNode, 'VMHC_FWHM_img')
vmhc.connect(z_trans, 'out_file',
outputNode, 'VMHC_Z_FWHM_img')
vmhc.connect(z_stat, 'out_file',
outputNode, 'VMHC_Z_stat_FWHM_img')
return vmhc
| sgiavasis/C-PAC | CPAC/vmhc/vmhc.py | Python | bsd-3-clause | 22,733 |
# -*- coding: utf-8 -*-
import mock
from rest_framework import serializers
from waffle.testutils import override_switch
from olympia.amo.tests import (
BaseTestCase, addon_factory, collection_factory, TestCase, user_factory)
from olympia.bandwagon.models import CollectionAddon
from olympia.bandwagon.serializers import (
CollectionAddonSerializer, CollectionAkismetSpamValidator,
CollectionSerializer, CollectionWithAddonsSerializer)
from olympia.lib.akismet.models import AkismetReport
class TestCollectionAkismetSpamValidator(TestCase):
def setUp(self):
self.validator = CollectionAkismetSpamValidator(
('name', 'description'))
serializer = mock.Mock()
serializer.instance = collection_factory(
name='name', description='Big Cheese')
request = mock.Mock()
request.user = user_factory()
request.META = {}
serializer.context = {'request': request}
self.validator.set_context(serializer)
self.data = {
'name': {'en-US': 'Collection', 'fr': u'Collection'},
'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'},
'random_data': {'en-US': 'to ignore'},
'slug': 'cheese'}
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_waffle_off(self, comment_check_mock):
self.validator(self.data)
# No Akismet checks
assert AkismetReport.objects.count() == 0
comment_check_mock.assert_not_called()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_spam(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
with self.assertRaises(serializers.ValidationError):
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
# After the first comment_check was spam, additinal ones are skipped.
assert comment_check_mock.call_count == 1
class TestCollectionSerializer(BaseTestCase):
serializer = CollectionSerializer
def setUp(self):
super(TestCollectionSerializer, self).setUp()
self.user = user_factory()
self.collection = collection_factory()
self.collection.update(author=self.user)
def serialize(self):
return self.serializer(self.collection).data
def test_basic(self):
data = self.serialize()
assert data['id'] == self.collection.id
assert data['uuid'] == self.collection.uuid
assert data['name'] == {'en-US': self.collection.name}
assert data['description'] == {'en-US': self.collection.description}
assert data['url'] == self.collection.get_abs_url()
assert data['addon_count'] == self.collection.addon_count
assert data['modified'] == (
self.collection.modified.replace(microsecond=0).isoformat() + 'Z')
assert data['author']['id'] == self.user.id
assert data['slug'] == self.collection.slug
assert data['public'] == self.collection.listed
assert data['default_locale'] == self.collection.default_locale
class TestCollectionAddonSerializer(BaseTestCase):
def setUp(self):
self.collection = collection_factory()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.item = CollectionAddon.objects.get(addon=self.addon,
collection=self.collection)
self.item.comments = u'Dis is nice'
self.item.save()
def serialize(self):
return CollectionAddonSerializer(self.item).data
def test_basic(self):
data = self.serialize()
assert data['addon']['id'] == self.collection.addons.all()[0].id
assert data['notes'] == {'en-US': self.item.comments}
class TestCollectionWithAddonsSerializer(TestCollectionSerializer):
serializer = CollectionWithAddonsSerializer
def setUp(self):
super(TestCollectionWithAddonsSerializer, self).setUp()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
def serialize(self):
mock_viewset = mock.MagicMock()
collection_addons = CollectionAddon.objects.filter(
addon=self.addon, collection=self.collection)
mock_viewset.get_addons_queryset.return_value = collection_addons
return self.serializer(
self.collection, context={'view': mock_viewset}).data
def test_basic(self):
super(TestCollectionWithAddonsSerializer, self).test_basic()
collection_addon = CollectionAddon.objects.get(
addon=self.addon, collection=self.collection)
data = self.serialize()
assert data['addons'] == [
CollectionAddonSerializer(collection_addon).data
]
assert data['addons'][0]['addon']['id'] == self.addon.id
| atiqueahmedziad/addons-server | src/olympia/bandwagon/tests/test_serializers.py | Python | bsd-3-clause | 6,482 |
from django.utils.safestring import mark_safe
from corehq.apps.data_interfaces.dispatcher import EditDataInterfaceDispatcher
from corehq.apps.groups.models import Group
from django.core.urlresolvers import reverse
from corehq.apps.reports import util
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.standard.cases.basic import CaseListMixin
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
class DataInterface(GenericReportView):
# overriding properties from GenericReportView
section_name = ugettext_noop("Data")
base_template = "reports/standard/base_template.html"
asynchronous = True
dispatcher = EditDataInterfaceDispatcher
exportable = False
@property
def default_report_url(self):
return reverse('data_interfaces_default', args=[self.request.project])
class CaseReassignmentInterface(CaseListMixin, DataInterface):
name = ugettext_noop("Reassign Cases")
slug = "reassign_cases"
report_template_path = 'data_interfaces/interfaces/case_management.html'
asynchronous = False
ajax_pagination = True
@property
@memoized
def all_case_sharing_groups(self):
return Group.get_case_sharing_groups(self.domain)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(mark_safe('Select <a href="#" class="select-all btn btn-mini btn-inverse">all</a> <a href="#" class="select-none btn btn-mini btn-warning">none</a>'), sortable=False, span=2),
DataTablesColumn(_("Case Name"), span=3, prop_name="name.exact"),
DataTablesColumn(_("Case Type"), span=2, prop_name="type.exact"),
DataTablesColumn(_("Owner"), span=2, prop_name="owner_display", sortable=False),
DataTablesColumn(_("Last Modified"), span=3, prop_name="modified_on"),
)
return headers
@property
def rows(self):
checkbox = mark_safe('<input type="checkbox" class="selected-commcare-case" data-bind="event: {change: updateCaseSelection}" data-caseid="%(case_id)s" data-owner="%(owner)s" data-ownertype="%(owner_type)s" />')
for row in self.es_results['hits'].get('hits', []):
case = self.get_case(row)
display = CaseDisplay(self, case)
yield [
checkbox % dict(case_id=case['_id'], owner=display.owner_id, owner_type=display.owner_type),
display.case_link,
display.case_type,
display.owner_display,
util.format_relative_date(display.parse_date(display.case['modified_on']))['html'],
]
@property
def report_context(self):
context = super(CaseReassignmentInterface, self).report_context
active_users = self.get_all_users_by_domain(user_filter=tuple(HQUserType.use_defaults()), simplified=True)
context.update(
users=[dict(ownerid=user.get('user_id'), name=user.get('username_in_report'), type="user")
for user in active_users],
groups=[dict(ownerid=group.get_id, name=group.name, type="group")
for group in self.all_case_sharing_groups],
user_ids=self.user_ids,
)
return context
| gmimano/commcaretest | corehq/apps/data_interfaces/interfaces.py | Python | bsd-3-clause | 3,559 |
import numpy as np
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from featuretools.demo.mock_customer import load_mock_customer
from featuretools.wrappers import DFSTransformer
def select_numeric(df):
return df.select_dtypes(exclude=['object'])
@pytest.fixture(scope='module')
def es():
es = load_mock_customer(n_customers=15,
n_products=15,
n_sessions=75,
n_transactions=1000,
random_seed=0,
return_entityset=True)
return es
@pytest.fixture(scope='module')
def df(es):
df = es['customers'].df
df['target'] = np.random.randint(1, 3, df.shape[0]) # 1 or 2 values
return df
@pytest.fixture(scope='module')
def pipeline(es):
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers",
max_features=20)),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('imp', SimpleImputer()),
('et', ExtraTreesClassifier(n_estimators=10))
])
return pipeline
def test_sklearn_transformer(es, df):
# Using with transformers
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers")),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('sc', StandardScaler()),
])
X_train = pipeline.fit(df['customer_id']).transform(df['customer_id'])
assert X_train.shape[0] == 15
def test_sklearn_estimator(df, pipeline):
# Using with estimator
pipeline.fit(df['customer_id'].values, y=df.target.values) \
.predict(df['customer_id'].values)
result = pipeline.score(df['customer_id'].values, df.target.values)
assert isinstance(result, (float))
# Pickling / Unpickling Pipeline
# TODO fix this
# s = pickle.dumps(pipeline)
# pipe_pickled = pickle.loads(s)
# result = pipe_pickled.score(df['customer_id'].values, df.target.values)
# assert isinstance(result, (float))
def test_sklearn_cross_val_score(df, pipeline):
# Using with cross_val_score
results = cross_val_score(pipeline,
X=df['customer_id'].values,
y=df.target.values,
cv=2,
scoring="accuracy")
assert isinstance(results[0], (float))
assert isinstance(results[1], (float))
def test_sklearn_gridsearchcv(df, pipeline):
# Using with GridSearchCV
params = {
'et__max_depth': [5, 10]
}
grid = GridSearchCV(estimator=pipeline,
param_grid=params,
cv=3)
grid.fit(df['customer_id'].values, df.target.values)
assert len(grid.predict(df['customer_id'].values)) == 15
def test_sklearn_cuttoff(pipeline):
# Using cuttof_time to filter data
ct = pd.DataFrame()
ct['customer_id'] = [1, 2, 3]
ct['time'] = pd.to_datetime(['2014-1-1 04:00',
'2014-1-1 04:00',
'2014-1-1 04:00'])
ct['label'] = [True, True, False]
results = pipeline.fit(ct, y=ct.label).predict(ct)
assert len(results) == 3
| Featuretools/featuretools | featuretools/tests/wrappers/test_sklearn_wrapper.py | Python | bsd-3-clause | 3,577 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-09-24 19:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('controlled_vocabularies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='property',
name='label',
field=models.TextField(help_text=b'The value for the added property.'),
),
migrations.AlterField(
model_name='property',
name='property_name',
field=models.CharField(choices=[(b'definition', b'Definition'), (b'description', b'Description'), (b'note', b'Note'), (b'system', b'System')], help_text=b"The name of the added property; e.g., 'Description'.", max_length=50, verbose_name=b'Property Type'),
),
migrations.AlterField(
model_name='term',
name='label',
field=models.CharField(help_text=b'The human-readable name of the term.', max_length=255),
),
migrations.AlterField(
model_name='term',
name='name',
field=models.CharField(help_text=b'The name or key that uniquely identifies the term within the vocabulary.', max_length=50),
),
migrations.AlterField(
model_name='term',
name='order',
field=models.IntegerField(blank=True, help_text=b'The preferred order for viewing the term in the vocabulary.', null=True),
),
migrations.AlterField(
model_name='term',
name='vocab_list',
field=models.ForeignKey(help_text=b'The vocabulary that the term needs to be added to.', on_delete=django.db.models.deletion.CASCADE, to='controlled_vocabularies.Vocabulary', verbose_name=b'Vocabulary'),
),
migrations.AlterField(
model_name='vocabulary',
name='definition',
field=models.TextField(blank=True, help_text=b'A brief statement of the meaning of the vocabulary.'),
),
migrations.AlterField(
model_name='vocabulary',
name='label',
field=models.CharField(help_text=b'The human-readable name of the vocabulary.', max_length=255),
),
migrations.AlterField(
model_name='vocabulary',
name='maintainer',
field=models.CharField(help_text=b'The person responsible for creating and updating the vocabulary.', max_length=50),
),
migrations.AlterField(
model_name='vocabulary',
name='maintainerEmail',
field=models.CharField(help_text=b'E-mail address of maintainer.', max_length=50, verbose_name=b'Maintainer E-mail'),
),
migrations.AlterField(
model_name='vocabulary',
name='name',
field=models.CharField(help_text=b'The name or key that uniquely identifies the vocabulary.', max_length=50, unique=True),
),
migrations.AlterField(
model_name='vocabulary',
name='order',
field=models.CharField(choices=[(b'name', b'name'), (b'label', b'label'), (b'order', b'order')], help_text=b'The preferred order for viewing the UNTL list of controlled vocabularies.', max_length=10),
),
]
| unt-libraries/django-controlled-vocabularies | controlled_vocabularies/migrations/0002_auto_20180924_1938.py | Python | bsd-3-clause | 3,376 |
{% block meta %}
name: Base
description: SMACH base template.
language: Python
framework: SMACH
type: Base
tags: [core]
includes: []
extends: []
variables:
- - manifest:
description: ROS manifest name.
type: str
- - node_name:
description: ROS node name for the state machine.
type: str
- outcomes:
description: A list of possible outcomes of the state machine.
type: list
- - userdata:
description: Definitions for userdata needed by child states.
type: dict
- - function_name:
description: A name for the main executable state machine function.
type: str
input_keys: []
output_keys: []
{% endblock meta %}
{% from "Utils.tpl.py" import import_module, render_outcomes, render_userdata %}
{% set defined_headers = [] %}
{% set local_vars = [] %}
{% block base_header %}
#!/usr/bin/env python
{{ base_header }}
{% endblock base_header %}
{% block imports %}
{{ import_module(defined_headers, 'smach') }}
{{ imports }}
{% endblock imports %}
{% block defs %}
{{ defs }}
{% endblock defs %}
{% block class_defs %}
{{ class_defs }}
{% endblock class_defs %}
{% block cb_defs %}
{{ cb_defs }}
{% endblock cb_defs %}
{% if name is defined %}{% set sm_name = name | lower() %}{% else %}{% set sm_name = 'sm' %}{% endif %}
{% block main_def %}
def {% if function_name is defined %}{{ function_name | lower() }}{% else %}main{% endif %}():
{{ main_def | indent(4) }}
{% endblock main_def %}
{% block body %}
{{ sm_name }} = smach.StateMachine({{ render_outcomes(outcomes) }})
{# Container header insertion variable indexed by container state name #}
{% if name in header %}{{ header[name] | indent(4, true) }}{% endif %}
{# Render container userdata #}
{% if userdata is defined %}{{ render_userdata(name | lower(), userdata) | indent(4) }}{% endif %}
{# Render state userdata #}
{% if name in header_userdata %}{{ header_userdata[name] | indent(4, true) }}{% endif %}
with {{ sm_name }}:
{# Container body insertion variable #}
{{ body | indent(8) }}
{% endblock body %}
{% block footer %}
{{ footer | indent(8) }}
{% endblock footer %}
{% block execute %}
{{ execute | indent(4) }}
outcome = {{ sm_name }}.execute()
{% endblock execute %}
{% block base_footer %}
{{ base_footer | indent(4) }}
{% endblock base_footer %}
{% block main %}
if __name__ == '__main__':
{{ '' | indent(4, true) }}{% if function_name is defined %}{{ function_name | lower() }}{% else %}main{% endif %}()
{% endblock main %}
| ReconCell/smacha | smacha/src/smacha/templates/Base.tpl.py | Python | bsd-3-clause | 2,545 |
# Copyright (C) 2014 - The MITRE Corporation
# For license information, see the LICENSE.txt file
#: Namespace map of namespaces libtaxii knows about
NS_MAP = {
'taxii': 'http://taxii.mitre.org/messages/taxii_xml_binding-1',
'taxii_11': 'http://taxii.mitre.org/messages/taxii_xml_binding-1.1',
'tdq': 'http://taxii.mitre.org/query/taxii_default_query-1',
}
#: alias for NS_MAP for backward compatibility
ns_map = NS_MAP
#: Constant identifying a Status Message
MSG_STATUS_MESSAGE = 'Status_Message'
#: Constant identifying a Discovery Request Message
MSG_DISCOVERY_REQUEST = 'Discovery_Request'
#: Constant identifying a Discovery Response Message
MSG_DISCOVERY_RESPONSE = 'Discovery_Response'
#: Constant identifying a Feed Information Request Message
MSG_FEED_INFORMATION_REQUEST = 'Feed_Information_Request'
#: Constant identifying a Feed Information Response Message
MSG_FEED_INFORMATION_RESPONSE = 'Feed_Information_Response'
#: Constant identifying a Subscription Management Request Message
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
#: Constant identifying a Subscription Management Response Message
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
#: Constant identifying a Poll Request Message
MSG_POLL_REQUEST = 'Poll_Request'
#: Constant identifying a Poll Response Message
MSG_POLL_RESPONSE = 'Poll_Response'
#: Constant identifying a Inbox Message
MSG_INBOX_MESSAGE = 'Inbox_Message'
#: TAXII 1.0 Message Types
MSG_TYPES_10 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_FEED_INFORMATION_REQUEST, MSG_FEED_INFORMATION_RESPONSE,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST,
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE)
# New Message Types in TAXII 1.1
#: Constant identifying a Status Message
MSG_POLL_FULFILLMENT_REQUEST = 'Poll_Fulfillment'
#: Constant identifying a Collection Information Request
MSG_COLLECTION_INFORMATION_REQUEST = 'Collection_Information_Request'
#: Constant identifying a Collection Information Response
MSG_COLLECTION_INFORMATION_RESPONSE = 'Collection_Information_Response'
#: Constant identifying a Subscription Request
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
#: Constant identifying a Subscription Response
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
#: Tuple of all TAXII 1.1 Message Types
MSG_TYPES_11 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_COLLECTION_INFORMATION_REQUEST, MSG_COLLECTION_INFORMATION_RESPONSE,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE, MSG_POLL_FULFILLMENT_REQUEST)
# TAXII 1.0 Status Types
#: Constant identifying a Status Type of Bad Message
ST_BAD_MESSAGE = 'BAD_MESSAGE'
#: Constant identifying a Status Type of Denied
ST_DENIED = 'DENIED'
#: Constant identifying a Status Type of Failure
ST_FAILURE = 'FAILURE'
#: Constant identifying a Status Type of Not Found
ST_NOT_FOUND = 'NOT_FOUND'
#: Constant identifying a Status Type of Polling Unsupported
ST_POLLING_UNSUPPORTED = 'POLLING_UNSUPPORTED'
#: Constant identifying a Status Type of Retry
ST_RETRY = 'RETRY'
#: Constant identifying a Status Type of Success
ST_SUCCESS = 'SUCCESS'
#: Constant identifying a Status Type of Unauthorized
ST_UNAUTHORIZED = 'UNAUTHORIZED'
#: Constant identifying a Status Type of Unsupported Message Binding
ST_UNSUPPORTED_MESSAGE_BINDING = 'UNSUPPORTED_MESSAGE'
#: Constant identifying a Status Type of Unsupported Content Binding
ST_UNSUPPORTED_CONTENT_BINDING = 'UNSUPPORTED_CONTENT'
#: Constant identifying a Status Type of Unsupported Protocol Binding
ST_UNSUPPORTED_PROTOCOL = 'UNSUPPORTED_PROTOCOL'
#: Tuple of all TAXII 1.0 Status Types
ST_TYPES_10 = (ST_BAD_MESSAGE, ST_DENIED, ST_FAILURE, ST_NOT_FOUND,
ST_POLLING_UNSUPPORTED, ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED,
ST_UNSUPPORTED_MESSAGE_BINDING, ST_UNSUPPORTED_CONTENT_BINDING,
ST_UNSUPPORTED_PROTOCOL)
# New Status Types in TAXII 1.1
#: Constant identifying a Status Type of Asynchronous Poll Error
ST_ASYNCHRONOUS_POLL_ERROR = 'ASYNCHRONOUS_POLL_ERROR'
#: Constant identifying a Status Type of Destination Collection Error
ST_DESTINATION_COLLECTION_ERROR = 'DESTINATION_COLLECTION_ERROR'
#: Constant identifying a Status Type of Invalid Response Part
ST_INVALID_RESPONSE_PART = 'INVALID_RESPONSE_PART'
#: Constant identifying a Status Type of Network Error
ST_NETWORK_ERROR = 'NETWORK_ERROR'
#: Constant identifying a Status Type of Pending
ST_PENDING = 'PENDING'
#: Constant identifying a Status Type of Unsupported Query Format
ST_UNSUPPORTED_QUERY = 'UNSUPPORTED_QUERY'
#: Tuple of all TAXII 1.1 Status types
ST_TYPES_11 = (ST_ASYNCHRONOUS_POLL_ERROR, ST_BAD_MESSAGE, ST_DENIED,
ST_DESTINATION_COLLECTION_ERROR, ST_FAILURE, ST_INVALID_RESPONSE_PART,
ST_NETWORK_ERROR, ST_NOT_FOUND, ST_PENDING, ST_POLLING_UNSUPPORTED,
ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED, ST_UNSUPPORTED_MESSAGE_BINDING,
ST_UNSUPPORTED_CONTENT_BINDING, ST_UNSUPPORTED_PROTOCOL,
ST_UNSUPPORTED_QUERY)
# TAXII 1.0 Action Types
#: Constant identifying an Action of Subscribe
ACT_SUBSCRIBE = 'SUBSCRIBE'
#: Constant identifying an Action of Unsubscribe
ACT_UNSUBSCRIBE = 'UNSUBSCRIBE'
#: Constant identifying an Action of Status
ACT_STATUS = 'STATUS'
#: Tuple of all TAXII 1.0 Action Types
ACT_TYPES_10 = (ACT_SUBSCRIBE, ACT_UNSUBSCRIBE, ACT_STATUS)
#: Constant identifying an Action of Pause
ACT_PAUSE = 'PAUSE'
#: Constant identifying an Action of Resume
ACT_RESUME = 'RESUME'
#: Tuple of all TAXII 1.1 Action types
ACT_TYPES_11 = (ACT_SUBSCRIBE, ACT_PAUSE, ACT_RESUME, ACT_UNSUBSCRIBE, ACT_STATUS)
# TAXII 1.0 Service Types
#: Constant identifying a Service Type of Inbox
SVC_INBOX = 'INBOX'
#: Constant identifying a Service Type of Poll
SVC_POLL = 'POLL'
#: Constant identifying a Service Type of Feed Management
SVC_FEED_MANAGEMENT = 'FEED_MANAGEMENT'
#: Constant identifying a Service Type of Discovery
SVC_DISCOVERY = 'DISCOVERY'
#: Tuple of all TAXII 1.0 Service Types
SVC_TYPES_10 = (SVC_INBOX, SVC_POLL, SVC_FEED_MANAGEMENT, SVC_DISCOVERY)
# Renamed Status Types in TAXII 1.1
#: Constant identifying a Service Type of Collection Management.
#: "Feed Management" was renamed to "Collection Management" in TAXII 1.1.
SVC_COLLECTION_MANAGEMENT = 'COLLECTION_MANAGEMENT'
#: Tuple of all TAXII 1.1 Service Types
SVC_TYPES_11 = (SVC_INBOX, SVC_POLL, SVC_COLLECTION_MANAGEMENT, SVC_DISCOVERY)
# TAXII 1.1 Subscription Statuses
#: Subscription Status of Active
SS_ACTIVE = 'ACTIVE'
#: Subscription Status of Paused
SS_PAUSED = 'PAUSED'
#: Subscription Status of Unsubscribed
SS_UNSUBSCRIBED = 'UNSUBSCRIBED'
#: Tuple of all TAXII 1.1 Subscription Statues
SS_TYPES_11 = (SS_ACTIVE, SS_PAUSED, SS_UNSUBSCRIBED)
# TAXII 1.1 Response Types
#: Constant identifying a response type of Full
RT_FULL = 'FULL'
#: Constant identifying a response type of Count only
RT_COUNT_ONLY = 'COUNT_ONLY'
#: Tuple of all TAXII 1.1 Response Types
RT_TYPES_11 = (RT_FULL, RT_COUNT_ONLY)
# TAXII 1.1 Response Types
#: Constant identifying a collection type of Data Feed
CT_DATA_FEED = 'DATA_FEED'
#: Constant identifying a collection type of Data Set
CT_DATA_SET = 'DATA_SET'
#: Tuple of all TAXII 1.1 Collection Types
CT_TYPES_11 = (CT_DATA_FEED, CT_DATA_SET)
# TAXII 1.1 Status Detail Keys
#: Constant Identifying the Acceptable Destination Status Detail
SD_ACCEPTABLE_DESTINATION = 'ACCEPTABLE_DESTINATION'
#: Constant Identifying the Max Part Number Status Detail
SD_MAX_PART_NUMBER = 'MAX_PART_NUMBER'
#: Constant Identifying the Item Status Detail
SD_ITEM = 'ITEM'
#: Constant Identifying the Estimated Wait Status Detail
SD_ESTIMATED_WAIT = 'ESTIMATED_WAIT'
#: Constant Identifying the Result ID Status Detail
SD_RESULT_ID = 'RESULT_ID'
#: Constant Identifying the Will Push Status Detail
SD_WILL_PUSH = 'WILL_PUSH'
#: Constant Identifying the Supported Binding Status Detail
SD_SUPPORTED_BINDING = 'SUPPORTED_BINDING'
#: Constant Identifying the Supported Content Status Detail
SD_SUPPORTED_CONTENT = 'SUPPORTED_CONTENT'
#: Constant Identifying the Supported Protocol Status Detail
SD_SUPPORTED_PROTOCOL = 'SUPPORTED_PROTOCOL'
#: Constant Identifying the Supported Query Status Detail
SD_SUPPORTED_QUERY = 'SUPPORTED_QUERY'
#: Tuple of all TAXII 1.1 Status Detail Keys
SD_TYPES_11 = (SD_ACCEPTABLE_DESTINATION, SD_MAX_PART_NUMBER, SD_ITEM,
SD_ESTIMATED_WAIT, SD_RESULT_ID, SD_WILL_PUSH,
SD_SUPPORTED_BINDING, SD_SUPPORTED_CONTENT, SD_SUPPORTED_PROTOCOL,
SD_SUPPORTED_QUERY)
#: (For TAXII Default Query) Constant identifying supported Capability Modules
SD_CAPABILITY_MODULE = 'CAPABILITY_MODULE'
#: (For TAXII Default Query) Constant identifying Preferred Scopes
SD_PREFERRED_SCOPE = 'PREFERRED_SCOPE'
#: (For TAXII Default Query) Constant identifying Allowed Scopes
SD_ALLOWED_SCOPE = 'ALLOWED_SCOPE'
#: (For TAXII Default Query) Constant identifying supported Targeting Expression IDs
SD_TARGETING_EXPRESSION_ID = 'TARGETING_EXPRESSION_ID'
#: Format ID for this version of TAXII Default Query
FID_TAXII_DEFAULT_QUERY_10 = 'urn:taxii.mitre.org:query:default:1.0'
# Capability Module IDs
#: Capability Module ID for Core
CM_CORE = 'urn:taxii.mitre.org:query:capability:core-1'
#: Capability Module ID for Regex
CM_REGEX = 'urn:taxii.mitre.org:query:capability:regex-1'
#: Capability Module ID for Timestamp
CM_TIMESTAMP = 'urn:taxii.mitre.org:query:capability:timestamp-1'
#: Tuple of all capability modules defined in TAXII Default Query 1.0
CM_IDS = (CM_CORE, CM_REGEX, CM_TIMESTAMP)
# Operators
#: Operator OR
OP_OR = 'OR'
#: Operator AND
OP_AND = 'AND'
#: Tuple of all operators
OP_TYPES = (OP_OR, OP_AND)
#: Status Type indicating an unsupported capability module
ST_UNSUPPORTED_CAPABILITY_MODULE = 'UNSUPPORTED_CAPABILITY_MODULE'
#: Status Type indicating an unsupported targeting expression
ST_UNSUPPORTED_TARGETING_EXPRESSION = 'UNSUPPORTED_TARGETING_EXPRESSION'
#: Status Type indicating an unsupported targeting expression id
ST_UNSUPPORTED_TARGETING_EXPRESSION_ID = 'UNSUPPORTED_TARGETING_EXPRESSION_ID'
#: Parameter name: value
P_VALUE = 'value'
#: Parameter name: match_type
P_MATCH_TYPE = 'match_type'
#: Parameter name: case_sensitive
P_CASE_SENSITIVE = 'case_sensitive'
#: Tuple of all parameter names
P_NAMES = (P_VALUE, P_MATCH_TYPE, P_CASE_SENSITIVE)
#: Relationship name: equals
R_EQUALS = 'equals'
#: Relationship name: not_requals
R_NOT_EQUALS = 'not_equals'
#: Relationship name: greater_than
R_GREATER_THAN = 'greater_than'
#: Relationship name: greater_than_or_equal
R_GREATER_THAN_OR_EQUAL = 'greater_than_or_equal'
#: Relationship name: less_than
R_LESS_THAN = 'less_than'
#: Relationship name: less_than_or_equal
R_LESS_THAN_OR_EQUAL = 'less_than_or_equal'
#: Relationship name: does_not_exist
R_DOES_NOT_EXIST = 'does_not_exist'
#: Relationship name: exists
R_EXISTS = 'exists'
#: Relationship name: begins_with
R_BEGINS_WITH = 'begins_with'
#: Relationship name: ends_with
R_ENDS_WITH = 'ends_with'
#: Relationship name: contains
R_CONTAINS = 'contains'
#: Relationship name: matches
R_MATCHES = 'matches'
#: Tuple of all relationship names
R_NAMES = (R_EQUALS, R_NOT_EQUALS, R_GREATER_THAN,
R_GREATER_THAN_OR_EQUAL, R_LESS_THAN,
R_LESS_THAN_OR_EQUAL, R_DOES_NOT_EXIST,
R_EXISTS, R_BEGINS_WITH, R_ENDS_WITH,
R_CONTAINS, R_MATCHES)
# TAXII Version IDs #
#: Version ID for the TAXII Services Specification 1.0
VID_TAXII_SERVICES_10 = 'urn:taxii.mitre.org:services:1.0'
#: Version ID for the TAXII Services Specification 1.1
VID_TAXII_SERVICES_11 = 'urn:taxii.mitre.org:services:1.1'
#: Version ID for the TAXII XML Message Binding Specification 1.0
VID_TAXII_XML_10 = 'urn:taxii.mitre.org:message:xml:1.0'
#: Version ID for the TAXII XML Message Binding Specification 1.1
VID_TAXII_XML_11 = 'urn:taxii.mitre.org:message:xml:1.1'
#: Version ID for the TAXII HTTP Protocol Binding Specification 1.0
VID_TAXII_HTTP_10 = 'urn:taxii.mitre.org:protocol:http:1.0'
#: Version ID for the TAXII HTTPS Protocol Binding Specification 1.0
VID_TAXII_HTTPS_10 = 'urn:taxii.mitre.org:protocol:https:1.0'
# Third Party Version IDs
#: Version ID for the CERT EU JSON Message Binding
VID_CERT_EU_JSON_10 = 'urn:cert.europa.eu:message:json:1.0'
# TAXII Content Bindings #
#: Content Binding ID for STIX XML 1.0
CB_STIX_XML_10 = 'urn:stix.mitre.org:xml:1.0'
#: Content Binding ID for STIX XML 1.0.1
CB_STIX_XML_101 = 'urn:stix.mitre.org:xml:1.0.1'
#: Content Binding ID for STIX XML 1.1
CB_STIX_XML_11 = 'urn:stix.mitre.org:xml:1.1'
#: Content Binding ID for STIX XML 1.1.1
CB_STIX_XML_111 = 'urn:stix.mitre.org:xml:1.1.1'
#: Content Binding ID for CAP 1.1
CB_CAP_11 = 'urn:oasis:names:tc:emergency:cap:1.1'
#: Content Binding ID for XML Encryption
CB_XENC_122002 = 'http://www.w3.org/2001/04/xmlenc#'
#: Content Binding ID for SMIME
CB_SMIME = 'application/x-pkcs7-mime'
STD_INDENT = ' ' # A "Standard Indent" to use for to_text() methods
| Intelworks/libtaxii | libtaxii/constants.py | Python | bsd-3-clause | 13,355 |
#!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Linux specific tests."""
import contextlib
import errno
import io
import os
import pprint
import re
import shutil
import socket
import struct
import tempfile
import textwrap
import time
import warnings
try:
from unittest import mock # py3
except ImportError:
import mock # requires "pip install mock"
import psutil
from psutil import LINUX
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import call_until
from psutil.tests import get_kernel_version
from psutil.tests import importlib
from psutil.tests import MEMORY_TOLERANCE
from psutil.tests import pyrun
from psutil.tests import reap_children
from psutil.tests import retry_before_failing
from psutil.tests import run_test_module_by_name
from psutil.tests import sh
from psutil.tests import skip_on_not_implemented
from psutil.tests import TESTFN
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import which
HERE = os.path.abspath(os.path.dirname(__file__))
SIOCGIFADDR = 0x8915
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
# =====================================================================
# utils
# =====================================================================
def get_ipv4_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname))[20:24])
def get_mac_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
info = fcntl.ioctl(
s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
if PY3:
def ord(x):
return x
else:
import __builtin__
ord = __builtin__.ord
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def free_swap():
"""Parse 'free' cmd and return swap memory's s total, used and free
values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Swap'):
_, total, used, free = line.split()
return (int(total) * 1024, int(used) * 1024, int(free) * 1024)
def free_physmem():
"""Parse 'free' cmd and return physical memory's total, used
and free values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Mem'):
total, used, free, shared, buffers, cached = \
[int(x) * 1024 for x in line.split()[1:]]
return (total, used, free, shared, buffers, cached)
# =====================================================================
# system memory
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemMemory(unittest.TestCase):
def test_vmem_total(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertEqual(total, psutil.virtual_memory().total)
@retry_before_failing()
def test_vmem_used(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(used, psutil.virtual_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(free, psutil.virtual_memory().free,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
buffers = int(sh('vmstat').split('\n')[2].split()[4]) * 1024
self.assertAlmostEqual(buffers, psutil.virtual_memory().buffers,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
cached = int(sh('vmstat').split('\n')[2].split()[5]) * 1024
self.assertAlmostEqual(cached, psutil.virtual_memory().cached,
delta=MEMORY_TOLERANCE)
def test_swapmem_total(self):
total, used, free = free_swap()
return self.assertAlmostEqual(total, psutil.swap_memory().total,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_used(self):
total, used, free = free_swap()
return self.assertAlmostEqual(used, psutil.swap_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_free(self):
total, used, free = free_swap()
return self.assertAlmostEqual(free, psutil.swap_memory().free,
delta=MEMORY_TOLERANCE)
# --- mocked tests
def test_virtual_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.virtual_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'cached', 'active' and 'inactive' memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.cached, 0)
self.assertEqual(ret.active, 0)
self.assertEqual(ret.inactive, 0)
def test_swap_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_swap_memory_mocked_no_vmstat(self):
# see https://github.com/giampaolo/psutil/issues/722
with mock.patch('psutil._pslinux.open', create=True,
side_effect=IOError) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined and were set to 0",
str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
# =====================================================================
# system CPU
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemCPU(unittest.TestCase):
@unittest.skipIf(TRAVIS, "unknown failure on travis")
def test_cpu_times(self):
fields = psutil.cpu_times()._fields
kernel_ver = re.findall('\d+\.\d+\.\d+', os.uname()[2])[0]
kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
if kernel_ver_info >= (2, 6, 11):
self.assertIn('steal', fields)
else:
self.assertNotIn('steal', fields)
if kernel_ver_info >= (2, 6, 24):
self.assertIn('guest', fields)
else:
self.assertNotIn('guest', fields)
if kernel_ver_info >= (3, 2, 0):
self.assertIn('guest_nice', fields)
else:
self.assertNotIn('guest_nice', fields)
@unittest.skipUnless(which("nproc"), "nproc utility not available")
def test_cpu_count_logical_w_nproc(self):
num = int(sh("nproc --all"))
self.assertEqual(psutil.cpu_count(logical=True), num)
@unittest.skipUnless(which("lscpu"), "lscpu utility not available")
def test_cpu_count_logical_w_lscpu(self):
out = sh("lscpu -p")
num = len([x for x in out.split('\n') if not x.startswith('#')])
self.assertEqual(psutil.cpu_count(logical=True), num)
def test_cpu_count_logical_mocked(self):
import psutil._pslinux
original = psutil._pslinux.cpu_count_logical()
# Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in
# order to cause the parsing of /proc/cpuinfo and /proc/stat.
with mock.patch(
'psutil._pslinux.os.sysconf', side_effect=ValueError) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
assert m.called
# Let's have open() return emtpy data and make sure None is
# returned ('cause we mimick os.cpu_count()).
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_logical())
self.assertEqual(m.call_count, 2)
# /proc/stat should be the last one
self.assertEqual(m.call_args[0][0], '/proc/stat')
# Let's push this a bit further and make sure /proc/cpuinfo
# parsing works as expected.
with open('/proc/cpuinfo', 'rb') as f:
cpuinfo_data = f.read()
fake_file = io.BytesIO(cpuinfo_data)
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
def test_cpu_count_physical_mocked(self):
# Have open() return emtpy data and make sure None is returned
# ('cause we want to mimick os.cpu_count())
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_physical())
assert m.called
# =====================================================================
# system network
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemNetwork(unittest.TestCase):
def test_net_if_addrs_ips(self):
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == psutil.AF_LINK:
self.assertEqual(addr.address, get_mac_address(name))
elif addr.family == socket.AF_INET:
self.assertEqual(addr.address, get_ipv4_address(name))
# TODO: test for AF_INET6 family
@unittest.skipUnless(which('ip'), "'ip' utility not available")
@unittest.skipIf(TRAVIS, "skipped on Travis")
def test_net_if_names(self):
out = sh("ip addr").strip()
nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
found = 0
for line in out.split('\n'):
line = line.strip()
if re.search("^\d+:", line):
found += 1
name = line.split(':')[1].strip()
self.assertIn(name, nics)
self.assertEqual(len(nics), found, msg="%s\n---\n%s" % (
pprint.pformat(nics), out))
@mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
@mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
def test_net_connections_ipv6_unsupported(self, supports_ipv6, inet_ntop):
# see: https://github.com/giampaolo/psutil/issues/623
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(("::1", 0))
except socket.error:
pass
psutil.net_connections(kind='inet6')
# =====================================================================
# system disk
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemDisks(unittest.TestCase):
@unittest.skipUnless(
hasattr(os, 'statvfs'), "os.statvfs() function not available")
@skip_on_not_implemented()
def test_disk_partitions_and_usage(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -P -B 1 "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total, used, free = int(total), int(used), int(free)
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_disk_partitions_mocked(self):
# Test that ZFS partitions are returned.
with open("/proc/filesystems", "r") as f:
data = f.read()
if 'zfs' in data:
for part in psutil.disk_partitions():
if part.fstype == 'zfs':
break
else:
self.fail("couldn't find any ZFS partition")
else:
# No ZFS partitions on this system. Let's fake one.
fake_file = io.StringIO(u("nodev\tzfs\n"))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m1:
with mock.patch(
'psutil._pslinux.cext.disk_partitions',
return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2:
ret = psutil.disk_partitions()
assert m1.called
assert m2.called
assert ret
self.assertEqual(ret[0].fstype, 'zfs')
# =====================================================================
# misc
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestMisc(unittest.TestCase):
@mock.patch('psutil.traceback.print_exc')
def test_no_procfs_on_import(self, tb):
my_procfs = tempfile.mkdtemp()
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
try:
orig_open = open
def open_mock(name, *args):
if name.startswith('/proc'):
raise IOError(errno.ENOENT, 'rejecting access for test')
return orig_open(name, *args)
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
importlib.reload(psutil)
assert tb.called
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.cpu_percent)
self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
self.assertRaises(IOError, psutil.cpu_times_percent)
self.assertRaises(
IOError, psutil.cpu_times_percent, percpu=True)
psutil.PROCFS_PATH = my_procfs
self.assertEqual(psutil.cpu_percent(), 0)
self.assertEqual(sum(psutil.cpu_times_percent()), 0)
# since we don't know the number of CPUs at import time,
# we awkwardly say there are none until the second call
per_cpu_percent = psutil.cpu_percent(percpu=True)
self.assertEqual(sum(per_cpu_percent), 0)
# ditto awkward length
per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)
# much user, very busy
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
self.assertNotEqual(psutil.cpu_percent(), 0)
self.assertNotEqual(
sum(psutil.cpu_percent(percpu=True)), 0)
self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
self.assertNotEqual(
sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
finally:
shutil.rmtree(my_procfs)
importlib.reload(psutil)
self.assertEqual(psutil.PROCFS_PATH, '/proc')
@unittest.skipUnless(
get_kernel_version() >= (2, 6, 36),
"prlimit() not available on this Linux kernel version")
def test_prlimit_availability(self):
# prlimit() should be available starting from kernel 2.6.36
p = psutil.Process(os.getpid())
p.rlimit(psutil.RLIMIT_NOFILE)
# if prlimit() is supported *at least* these constants should
# be available
self.assertTrue(hasattr(psutil, "RLIM_INFINITY"))
self.assertTrue(hasattr(psutil, "RLIMIT_AS"))
self.assertTrue(hasattr(psutil, "RLIMIT_CORE"))
self.assertTrue(hasattr(psutil, "RLIMIT_CPU"))
self.assertTrue(hasattr(psutil, "RLIMIT_DATA"))
self.assertTrue(hasattr(psutil, "RLIMIT_FSIZE"))
self.assertTrue(hasattr(psutil, "RLIMIT_LOCKS"))
self.assertTrue(hasattr(psutil, "RLIMIT_MEMLOCK"))
self.assertTrue(hasattr(psutil, "RLIMIT_NOFILE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NPROC"))
self.assertTrue(hasattr(psutil, "RLIMIT_RSS"))
self.assertTrue(hasattr(psutil, "RLIMIT_STACK"))
@unittest.skipUnless(
get_kernel_version() >= (3, 0),
"prlimit constants not available on this Linux kernel version")
def test_resource_consts_kernel_v(self):
# more recent constants
self.assertTrue(hasattr(psutil, "RLIMIT_MSGQUEUE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NICE"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTPRIO"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTTIME"))
self.assertTrue(hasattr(psutil, "RLIMIT_SIGPENDING"))
def test_boot_time_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
RuntimeError,
psutil._pslinux.boot_time)
assert m.called
def test_users_mocked(self):
# Make sure ':0' and ':0.0' (returned by C ext) are converted
# to 'localhost'.
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0.0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
# ...otherwise it should be returned as-is
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', 'foo',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'foo')
assert m.called
def test_procfs_path(self):
tdir = tempfile.mkdtemp()
try:
psutil.PROCFS_PATH = tdir
self.assertRaises(IOError, psutil.virtual_memory)
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.boot_time)
# self.assertRaises(IOError, psutil.pids)
self.assertRaises(IOError, psutil.net_connections)
self.assertRaises(IOError, psutil.net_io_counters)
self.assertRaises(IOError, psutil.net_if_stats)
self.assertRaises(IOError, psutil.disk_io_counters)
self.assertRaises(IOError, psutil.disk_partitions)
self.assertRaises(psutil.NoSuchProcess, psutil.Process)
finally:
psutil.PROCFS_PATH = "/proc"
os.rmdir(tdir)
# =====================================================================
# test process
# =====================================================================
@unittest.skipUnless(LINUX, "not a Linux system")
class TestProcess(unittest.TestCase):
def test_memory_maps(self):
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % TESTFN)
sproc = pyrun(src)
self.addCleanup(reap_children)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN)
p = psutil.Process(sproc.pid)
time.sleep(.1)
maps = p.memory_maps(grouped=False)
pmap = sh('pmap -x %s' % p.pid).split('\n')
# get rid of header
del pmap[0]
del pmap[0]
while maps and pmap:
this = maps.pop(0)
other = pmap.pop(0)
addr, _, rss, dirty, mode, path = other.split(None, 5)
if not path.startswith('[') and not path.endswith(']'):
self.assertEqual(path, os.path.basename(this.path))
self.assertEqual(int(rss) * 1024, this.rss)
# test only rwx chars, ignore 's' and 'p'
self.assertEqual(mode[:3], this.perms[:3])
def test_memory_addrspace_info(self):
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % TESTFN)
sproc = pyrun(src)
self.addCleanup(reap_children)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN)
p = psutil.Process(sproc.pid)
time.sleep(.1)
mem = p.memory_addrspace_info()
maps = p.memory_maps(grouped=False)
self.assertEqual(
mem.uss, sum([x.private_dirty + x.private_clean for x in maps]))
self.assertEqual(
mem.pss, sum([x.pss for x in maps]))
self.assertEqual(
mem.swap, sum([x.swap for x in maps]))
def test_open_files_file_gone(self):
# simulates a file which gets deleted during open_files()
# execution
p = psutil.Process()
files = p.open_files()
with tempfile.NamedTemporaryFile():
# give the kernel some time to see the new file
call_until(p.open_files, "len(ret) != %i" % len(files))
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.ENOENT, "")) as m:
files = p.open_files()
assert not files
assert m.called
# also simulate the case where os.readlink() returns EINVAL
# in which case psutil is supposed to 'continue'
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.EINVAL, "")) as m:
self.assertEqual(p.open_files(), [])
assert m.called
# --- mocked tests
def test_terminal_mocked(self):
with mock.patch('psutil._pslinux._psposix._get_terminal_map',
return_value={}) as m:
self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal())
assert m.called
def test_num_ctx_switches_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).num_ctx_switches)
assert m.called
def test_num_threads_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).num_threads)
assert m.called
def test_ppid_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).ppid)
assert m.called
def test_uids_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).uids)
assert m.called
def test_gids_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).gids)
assert m.called
def test_cmdline_mocked(self):
# see: https://github.com/giampaolo/psutil/issues/639
p = psutil.Process()
fake_file = io.StringIO(u('foo\x00bar\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar']
assert m.called
fake_file = io.StringIO(u('foo\x00bar\x00\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar', '']
assert m.called
def test_io_counters_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).io_counters)
assert m.called
def test_readlink_path_deleted_mocked(self):
with mock.patch('psutil._pslinux.os.readlink',
return_value='/home/foo (deleted)'):
self.assertEqual(psutil.Process().exe(), "/home/foo")
self.assertEqual(psutil.Process().cwd(), "/home/foo")
if __name__ == '__main__':
run_test_module_by_name(__file__)
| 0-wiz-0/psutil | psutil/tests/test_linux.py | Python | bsd-3-clause | 27,328 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
import django.contrib.postgres.fields.hstore
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0035_remove_deprecated_fields'),
]
operations = [
migrations.AddField(
model_name='contributor',
name='identifiers',
field=django.contrib.postgres.fields.hstore.HStoreField(default={}),
),
migrations.AddField(
model_name='creator',
name='identifiers',
field=django.contrib.postgres.fields.hstore.HStoreField(default={}),
),
]
| hydroshare/hydroshare | hs_core/migrations/0036_auto_20171117_0422.py | Python | bsd-3-clause | 639 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import sys
import time
import pytest
try:
import yajl
except ImportError:
yajl = None
try:
import simplejson
except ImportError:
simplejson = None
try:
import json
except ImportError:
json = None
try:
import rapidjson
except ImportError:
rapidjson = None
try:
import ujson
except ImportError:
ujson = None
default_data = {
'words': """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Mauris adipiscing adipiscing placerat.
Vestibulum augue augue,
pellentesque quis sollicitudin id, adipiscing.
""",
'list': list(range(200)),
'dict': dict((str(i), 'a') for i in list(range(200))),
'int': 100100100,
'float': 100999.123456
}
user = {
"userId": 3381293,
"age": 213,
"username": "johndoe",
"fullname": u"John Doe the Second",
"isAuthorized": True,
"liked": 31231.31231202,
"approval": 31.1471,
"jobs": [1, 2],
"currJob": None
}
friends = [user, user, user, user, user, user, user, user]
def time_func(func, data, iterations):
start = time.time()
while iterations:
iterations -= 1
func(data)
return time.time() - start
def run_client_test(
name, serialize, deserialize, iterations=100 * 1000, data=default_data
):
squashed_data = serialize(data)
serialize_profile = time_func(serialize, data, iterations)
deserialize_profile = time_func(deserialize, squashed_data, iterations)
return serialize_profile, deserialize_profile
contenders = []
if yajl:
contenders.append(('yajl', yajl.Encoder().encode, yajl.Decoder().decode))
if simplejson:
contenders.append(('simplejson', simplejson.dumps, simplejson.loads))
if json:
contenders.append(('stdlib json', json.dumps, json.loads))
if rapidjson:
contenders.append(
('rapidjson', rapidjson.dumps, rapidjson.loads)
)
if ujson:
contenders.append(
('ujson', ujson.dumps, ujson.loads)
)
doubles = []
unicode_strings = []
strings = []
booleans = []
list_dicts = []
dict_lists = {}
medium_complex = [
[user, friends], [user, friends], [user, friends],
[user, friends], [user, friends], [user, friends]
]
for x in range(256):
doubles.append(sys.maxsize * random.random())
unicode_strings.append(
"نظام الحكم سلطاني وراثي في الذكور من ذرية السيد تركي بن سعيد بن سلطان ويشترط فيمن يختار لولاية الحكم من بينهم ان يكون مسلما رشيدا عاقلا ًوابنا شرعيا لابوين عمانيين ")
strings.append("A pretty long string which is in a list")
booleans.append(True)
for y in range(100):
arrays = []
list_dicts.append({str(random.random() * 20): int(random.random() * 1000000)})
for x in range(100):
arrays.append({str(random.random() * 20): int(random.random() * 1000000)})
dict_lists[str(random.random() * 20)] = arrays
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_serialization(name, serialize, deserialize, benchmark):
ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize)
msg = "\n%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_unicode_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 unicode strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=unicode_strings,
iterations=5000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_scii_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 ascii strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=strings,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_booleans(name, serialize, deserialize, benchmark):
print("\nArray with 256 True's:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=booleans,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_list_of_dictionaries(name, serialize, deserialize, benchmark):
print("\nArray of 100 dictionaries:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=list_dicts,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_dictionary_of_lists(name, serialize, deserialize, benchmark):
print("\nDictionary of 100 Arrays:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=dict_lists,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_medium_complex_objects(name, serialize, deserialize, benchmark):
print("\n256 Medium Complex objects:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=medium_complex,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
def test_double_performance_float_precision(benchmark):
print("\nArray with 256 doubles:")
name = 'rapidjson (precise)'
serialize = rapidjson.dumps
deserialize = rapidjson.loads
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=doubles,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
| thedrow/cyrapidjson | tests/test_benchmarks.py | Python | bsd-3-clause | 7,481 |
from __future__ import absolute_import
########################################################################
# File based on https://github.com/Blosc/bcolz
########################################################################
#
# License: BSD
# Created: October 5, 2015
# Author: Carst Vaartjes - [email protected]
#
########################################################################
import codecs
import os
from setuptools import setup, Extension, find_packages
from os.path import abspath
from sys import version_info as v
from setuptools.command.build_ext import build_ext as _build_ext
# Check this Python version is supported
if any([v < (2, 6), (3,) < v < (3, 5)]):
raise Exception("Unsupported Python version %d.%d. Requires Python >= 2.7 "
"or >= 3.5." % v[:2])
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def get_version():
version = {}
with open("bqueryd/version.py") as fp:
exec (fp.read(), version)
return version
# Sources & libraries
inc_dirs = [abspath('bqueryd')]
try:
import numpy as np
inc_dirs.append(np.get_include())
except ImportError as e:
pass
lib_dirs = []
libs = []
def_macros = []
sources = []
cmdclass = {'build_ext': build_ext}
optional_libs = ['numexpr>=2.6.9']
install_requires = [
'bquery>=0.2.10',
'pyzmq>=17.1.2',
'redis>=3.0.1',
'boto3>=1.9.82',
'smart_open>=1.9.0',
'netifaces>=0.10.9',
'configobj>=5.0.6',
'psutil>=5.0.0',
'azure-storage-blob==12.0.0',
]
setup_requires = []
tests_requires = [
'pandas>=0.23.1',
'pytest>=4.0.0',
'pytest-cov>=2.6.0',
'codacy-coverage>=1.3.7',
]
extras_requires = []
ext_modules = []
package_data = {}
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
setup(
name="bqueryd",
version=get_version()['__version__'],
description='A distribution framework for Bquery',
long_description=read("README.md"),
long_description_content_type='text/markdown',
classifiers=classifiers,
author='Carst Vaartjes',
author_email='[email protected]',
maintainer='Carst Vaartjes',
maintainer_email='[email protected]',
url='https://github.com/visualfabriq/bqueryd',
license='GPL2',
platforms=['any'],
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
extras_require=dict(
optional=extras_requires,
test=tests_requires
),
packages=find_packages(),
package_data=package_data,
include_package_data=True,
zip_safe=True,
entry_points={
'console_scripts': [
'bqueryd = bqueryd.node:main'
]
}
)
| visualfabriq/bqueryd | setup.py | Python | bsd-3-clause | 4,002 |
from copy import deepcopy
from operator import mul
import joblib
import numpy as np
from scipy import sparse
import pandas as pd
import pytest
import anndata as ad
from anndata._core.index import _normalize_index
from anndata._core.views import ArrayView, SparseCSRView, SparseCSCView
from anndata.utils import asarray
from anndata.tests.helpers import (
gen_adata,
subset_func,
slice_subset,
single_subset,
assert_equal,
)
# ------------------------------------------------------------------------------
# Some test data
# ------------------------------------------------------------------------------
# data matrix of shape n_obs x n_vars
X_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# annotation of observations / rows
obs_dict = dict(
row_names=["name1", "name2", "name3"], # row annotation
oanno1=["cat1", "cat2", "cat2"], # categorical annotation
oanno2=["o1", "o2", "o3"], # string annotation
oanno3=[2.1, 2.2, 2.3], # float annotation
)
# annotation of variables / columns
var_dict = dict(vanno1=[3.1, 3.2, 3.3])
# unstructured annotation
uns_dict = dict(oanno1_colors=["#000000", "#FFFFFF"], uns2=["some annotation"])
subset_func2 = subset_func
class NDArraySubclass(np.ndarray):
def view(self, dtype=None, typ=None):
return self
@pytest.fixture
def adata():
adata = ad.AnnData(np.zeros((100, 100)))
adata.obsm["o"] = np.zeros((100, 50))
adata.varm["o"] = np.zeros((100, 50))
return adata
@pytest.fixture(params=[asarray, sparse.csr_matrix, sparse.csc_matrix])
def adata_parameterized(request):
return gen_adata(shape=(200, 300), X_type=request.param)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"],
)
def matrix_type(request):
return request.param
@pytest.fixture(params=["layers", "obsm", "varm"])
def mapping_name(request):
return request.param
# ------------------------------------------------------------------------------
# The test functions
# ------------------------------------------------------------------------------
def test_views():
X = np.array(X_list)
adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype="int32")
assert adata[:, 0].is_view
assert adata[:, 0].X.tolist() == np.reshape([1, 4, 7], (3, 1)).tolist()
adata[:2, 0].X = [0, 0]
assert adata[:, 0].X.tolist() == np.reshape([0, 0, 7], (3, 1)).tolist()
adata_subset = adata[:2, [0, 1]]
assert adata_subset.is_view
# now transition to actual object
adata_subset.obs["foo"] = range(2)
assert not adata_subset.is_view
assert adata_subset.obs["foo"].tolist() == list(range(2))
def test_modify_view_component(matrix_type, mapping_name):
adata = ad.AnnData(
np.zeros((10, 10)),
**{mapping_name: dict(m=matrix_type(asarray(sparse.random(10, 10))))},
)
init_hash = joblib.hash(adata)
subset = adata[:5, :][:, :5]
assert subset.is_view
m = getattr(subset, mapping_name)["m"]
m[0, 0] = 100
assert not subset.is_view
assert getattr(subset, mapping_name)["m"][0, 0] == 100
assert init_hash == joblib.hash(adata)
# TODO: These tests could probably be condensed into a fixture
# based test for obsm and varm
def test_set_obsm_key(adata):
init_hash = joblib.hash(adata)
orig_obsm_val = adata.obsm["o"].copy()
subset_obsm = adata[:50]
assert subset_obsm.is_view
subset_obsm.obsm["o"] = np.ones((50, 20))
assert not subset_obsm.is_view
assert np.all(adata.obsm["o"] == orig_obsm_val)
assert init_hash == joblib.hash(adata)
def test_set_varm_key(adata):
init_hash = joblib.hash(adata)
orig_varm_val = adata.varm["o"].copy()
subset_varm = adata[:, :50]
assert subset_varm.is_view
subset_varm.varm["o"] = np.ones((50, 20))
assert not subset_varm.is_view
assert np.all(adata.varm["o"] == orig_varm_val)
assert init_hash == joblib.hash(adata)
def test_set_obs(adata, subset_func):
init_hash = joblib.hash(adata)
subset = adata[subset_func(adata.obs_names), :]
new_obs = pd.DataFrame(
dict(a=np.ones(subset.n_obs), b=np.ones(subset.n_obs)),
index=subset.obs_names,
)
assert subset.is_view
subset.obs = new_obs
assert not subset.is_view
assert np.all(subset.obs == new_obs)
assert joblib.hash(adata) == init_hash
def test_set_var(adata, subset_func):
init_hash = joblib.hash(adata)
subset = adata[:, subset_func(adata.var_names)]
new_var = pd.DataFrame(
dict(a=np.ones(subset.n_vars), b=np.ones(subset.n_vars)),
index=subset.var_names,
)
assert subset.is_view
subset.var = new_var
assert not subset.is_view
assert np.all(subset.var == new_var)
assert joblib.hash(adata) == init_hash
def test_drop_obs_column():
adata = ad.AnnData(np.array(X_list), obs=obs_dict, dtype="int32")
subset = adata[:2]
assert subset.is_view
# returns a copy of obs
assert subset.obs.drop(columns=["oanno1"]).columns.tolist() == ["oanno2", "oanno3"]
assert subset.is_view
# would modify obs, so it should actualize subset and not modify adata
subset.obs.drop(columns=["oanno1"], inplace=True)
assert not subset.is_view
assert subset.obs.columns.tolist() == ["oanno2", "oanno3"]
assert adata.obs.columns.tolist() == ["oanno1", "oanno2", "oanno3"]
def test_set_obsm(adata):
init_hash = joblib.hash(adata)
dim0_size = np.random.randint(2, adata.shape[0] - 1)
dim1_size = np.random.randint(1, 99)
orig_obsm_val = adata.obsm["o"].copy()
subset_idx = np.random.choice(adata.obs_names, dim0_size, replace=False)
subset = adata[subset_idx, :]
assert subset.is_view
subset.obsm = dict(o=np.ones((dim0_size, dim1_size)))
assert not subset.is_view
assert np.all(orig_obsm_val == adata.obsm["o"]) # Checking for mutation
assert np.all(subset.obsm["o"] == np.ones((dim0_size, dim1_size)))
subset = adata[subset_idx, :]
subset_hash = joblib.hash(subset)
with pytest.raises(ValueError):
subset.obsm = dict(o=np.ones((dim0_size + 1, dim1_size)))
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))
assert subset_hash == joblib.hash(subset)
# Only modification have been made to a view
assert init_hash == joblib.hash(adata)
def test_set_varm(adata):
init_hash = joblib.hash(adata)
dim0_size = np.random.randint(2, adata.shape[1] - 1)
dim1_size = np.random.randint(1, 99)
orig_varm_val = adata.varm["o"].copy()
subset_idx = np.random.choice(adata.var_names, dim0_size, replace=False)
subset = adata[:, subset_idx]
assert subset.is_view
subset.varm = dict(o=np.ones((dim0_size, dim1_size)))
assert not subset.is_view
assert np.all(orig_varm_val == adata.varm["o"]) # Checking for mutation
assert np.all(subset.varm["o"] == np.ones((dim0_size, dim1_size)))
subset = adata[:, subset_idx]
subset_hash = joblib.hash(subset)
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size + 1, dim1_size)))
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))
# subset should not be changed by failed setting
assert subset_hash == joblib.hash(subset)
assert init_hash == joblib.hash(adata)
# TODO: Determine if this is the intended behavior,
# or just the behaviour we’ve had for a while
def test_not_set_subset_X(matrix_type, subset_func):
adata = ad.AnnData(matrix_type(asarray(sparse.random(20, 20))))
init_hash = joblib.hash(adata)
orig_X_val = adata.X.copy()
while True:
subset_idx = slice_subset(adata.obs_names)
if len(adata[subset_idx, :]) > 2:
break
subset = adata[subset_idx, :]
subset = adata[:, subset_idx]
internal_idx = _normalize_index(
subset_func(np.arange(subset.X.shape[1])), subset.var_names
)
assert subset.is_view
subset.X[:, internal_idx] = 1
assert not subset.is_view
assert not np.any(asarray(adata.X != orig_X_val))
assert init_hash == joblib.hash(adata)
def test_set_scalar_subset_X(matrix_type, subset_func):
adata = ad.AnnData(matrix_type(np.zeros((10, 10))))
orig_X_val = adata.X.copy()
subset_idx = slice_subset(adata.obs_names)
adata_subset = adata[subset_idx, :]
adata_subset.X = 1
assert adata_subset.is_view
assert np.all(asarray(adata[subset_idx, :].X) == 1)
assert asarray((orig_X_val != adata.X)).sum() == mul(*adata_subset.shape)
# TODO: Use different kind of subsetting for adata and view
def test_set_subset_obsm(adata, subset_func):
init_hash = joblib.hash(adata)
orig_obsm_val = adata.obsm["o"].copy()
while True:
subset_idx = slice_subset(adata.obs_names)
if len(adata[subset_idx, :]) > 2:
break
subset = adata[subset_idx, :]
internal_idx = _normalize_index(
subset_func(np.arange(subset.obsm["o"].shape[0])), subset.obs_names
)
assert subset.is_view
subset.obsm["o"][internal_idx] = 1
assert not subset.is_view
assert np.all(adata.obsm["o"] == orig_obsm_val)
assert init_hash == joblib.hash(adata)
def test_set_subset_varm(adata, subset_func):
init_hash = joblib.hash(adata)
orig_varm_val = adata.varm["o"].copy()
while True:
subset_idx = slice_subset(adata.var_names)
if (adata[:, subset_idx]).shape[1] > 2:
break
subset = adata[:, subset_idx]
internal_idx = _normalize_index(
subset_func(np.arange(subset.varm["o"].shape[0])), subset.var_names
)
assert subset.is_view
subset.varm["o"][internal_idx] = 1
assert not subset.is_view
assert np.all(adata.varm["o"] == orig_varm_val)
assert init_hash == joblib.hash(adata)
@pytest.mark.parametrize("attr", ["obsm", "varm", "obsp", "varp", "layers"])
def test_view_failed_delitem(attr):
adata = gen_adata((10, 10))
view = adata[5:7, :][:, :5]
adata_hash = joblib.hash(adata)
view_hash = joblib.hash(view)
with pytest.raises(KeyError):
getattr(view, attr).__delitem__("not a key")
assert view.is_view
assert adata_hash == joblib.hash(adata)
assert view_hash == joblib.hash(view)
@pytest.mark.parametrize("attr", ["obsm", "varm", "obsp", "varp", "layers"])
def test_view_delitem(attr):
adata = gen_adata((10, 10))
getattr(adata, attr)["to_delete"] = np.ones((10, 10))
# Shouldn’t be a subclass, should be an ndarray
assert type(getattr(adata, attr)["to_delete"]) is np.ndarray
view = adata[5:7, :][:, :5]
adata_hash = joblib.hash(adata)
view_hash = joblib.hash(view)
getattr(view, attr).__delitem__("to_delete")
assert not view.is_view
assert "to_delete" not in getattr(view, attr)
assert "to_delete" in getattr(adata, attr)
assert adata_hash == joblib.hash(adata)
assert view_hash != joblib.hash(view)
@pytest.mark.parametrize(
"attr", ["X", "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]
)
def test_view_delattr(attr, subset_func):
base = gen_adata((10, 10))
orig_hash = joblib.hash(base)
subset = base[subset_func(base.obs_names), subset_func(base.var_names)]
empty = ad.AnnData(obs=subset.obs[[]], var=subset.var[[]])
delattr(subset, attr)
assert not subset.is_view
# Should now have same value as default
assert_equal(getattr(subset, attr), getattr(empty, attr))
assert orig_hash == joblib.hash(base) # Original should not be modified
@pytest.mark.parametrize(
"attr", ["obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]
)
def test_view_setattr_machinery(attr, subset_func, subset_func2):
# Tests that setting attributes on a view doesn't mess anything up too bad
adata = gen_adata((10, 10))
view = adata[subset_func(adata.obs_names), subset_func2(adata.var_names)]
actual = view.copy()
setattr(view, attr, getattr(actual, attr))
assert_equal(actual, view, exact=True)
def test_layers_view():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
L = np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]])
real_adata = ad.AnnData(X)
real_adata.layers["L"] = L
view_adata = real_adata[1:, 1:]
real_hash = joblib.hash(real_adata)
view_hash = joblib.hash(view_adata)
assert view_adata.is_view
with pytest.raises(ValueError):
view_adata.layers["L2"] = L + 2
assert view_adata.is_view # Failing to set layer item makes adata not view
assert real_hash == joblib.hash(real_adata)
assert view_hash == joblib.hash(view_adata)
view_adata.layers["L2"] = L[1:, 1:] + 2
assert not view_adata.is_view
assert real_hash == joblib.hash(real_adata)
assert view_hash != joblib.hash(view_adata)
# TODO: This can be flaky. Make that stop
def test_view_of_view(matrix_type, subset_func, subset_func2):
adata = gen_adata((30, 15), X_type=matrix_type)
adata.raw = adata
if subset_func is single_subset:
pytest.xfail("Other subset generating functions have trouble with this")
var_s1 = subset_func(adata.var_names, min_size=4)
var_view1 = adata[:, var_s1]
var_s2 = subset_func2(var_view1.var_names)
var_view2 = var_view1[:, var_s2]
assert var_view2._adata_ref is adata
obs_s1 = subset_func(adata.obs_names, min_size=4)
obs_view1 = adata[obs_s1, :]
obs_s2 = subset_func2(obs_view1.obs_names)
assert adata[obs_s1, :][:, var_s1][obs_s2, :]._adata_ref is adata
view_of_actual_copy = adata[:, var_s1].copy()[obs_s1, :].copy()[:, var_s2].copy()
view_of_view_copy = adata[:, var_s1][obs_s1, :][:, var_s2].copy()
assert_equal(view_of_actual_copy, view_of_view_copy, exact=True)
def test_view_of_view_modification():
adata = ad.AnnData(np.zeros((10, 10)))
adata[0, :][:, 5:].X = np.ones(5)
assert np.all(adata.X[0, 5:] == np.ones(5))
adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2))
assert np.all(adata.X[1:3, 1:3] == np.ones((2, 2)))
adata.X = sparse.csr_matrix(adata.X)
adata[0, :][:, 5:].X = np.ones(5) * 2
assert np.all(asarray(adata.X)[0, 5:] == np.ones(5) * 2)
adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2)) * 2
assert np.all(asarray(adata.X)[1:3, 1:3] == np.ones((2, 2)) * 2)
def test_double_index(subset_func, subset_func2):
adata = gen_adata((10, 10))
obs_subset = subset_func(adata.obs_names)
var_subset = subset_func2(adata.var_names)
v1 = adata[obs_subset, var_subset]
v2 = adata[obs_subset, :][:, var_subset]
assert np.all(asarray(v1.X) == asarray(v2.X))
assert np.all(v1.obs == v2.obs)
assert np.all(v1.var == v2.var)
def test_view_retains_ndarray_subclass():
adata = ad.AnnData(np.zeros((10, 10)))
adata.obsm["foo"] = np.zeros((10, 5)).view(NDArraySubclass)
view = adata[:5, :]
assert isinstance(view.obsm["foo"], NDArraySubclass)
assert view.obsm["foo"].shape == (5, 5)
def test_modify_uns_in_copy():
# https://github.com/theislab/anndata/issues/571
adata = ad.AnnData(np.ones((5, 5)), uns={"parent": {"key": "value"}})
adata_copy = adata[:3].copy()
adata_copy.uns["parent"]["key"] = "new_value"
assert adata.uns["parent"]["key"] != adata_copy.uns["parent"]["key"]
@pytest.mark.parametrize("index", [-101, 100, (slice(None), -101), (slice(None), 100)])
def test_invalid_scalar_index(adata, index):
# https://github.com/theislab/anndata/issues/619
with pytest.raises(IndexError, match=r".*index.* out of range\."):
_ = adata[index]
@pytest.mark.parametrize("obs", [False, True])
@pytest.mark.parametrize("index", [-100, -50, -1])
def test_negative_scalar_index(adata, index: int, obs: bool):
pos_index = index + (adata.n_obs if obs else adata.n_vars)
if obs:
adata_pos_subset = adata[pos_index]
adata_neg_subset = adata[index]
else:
adata_pos_subset = adata[:, pos_index]
adata_neg_subset = adata[:, index]
np.testing.assert_array_equal(
adata_pos_subset.obs_names, adata_neg_subset.obs_names
)
np.testing.assert_array_equal(
adata_pos_subset.var_names, adata_neg_subset.var_names
)
@pytest.mark.parametrize("spmat", [sparse.csr_matrix, sparse.csc_matrix])
def test_deepcopy_subset(adata, spmat: type):
adata.obsp["arr"] = np.zeros((adata.n_obs, adata.n_obs))
adata.obsp["spmat"] = spmat((adata.n_obs, adata.n_obs))
adata = deepcopy(adata[:10].copy())
assert isinstance(adata.obsp["arr"], np.ndarray)
assert not isinstance(adata.obsp["arr"], ArrayView)
np.testing.assert_array_equal(adata.obsp["arr"].shape, (10, 10))
assert isinstance(adata.obsp["spmat"], spmat)
assert not isinstance(
adata.obsp["spmat"],
SparseCSRView if spmat is sparse.csr_matrix else SparseCSCView,
)
np.testing.assert_array_equal(adata.obsp["spmat"].shape, (10, 10))
# https://github.com/theislab/anndata/issues/680
@pytest.mark.parametrize("array_type", [asarray, sparse.csr_matrix, sparse.csc_matrix])
@pytest.mark.parametrize("attr", ["X", "layers", "obsm", "varm", "obsp", "varp"])
def test_view_mixin_copies_data(adata, array_type: type, attr):
N = 100
adata = ad.AnnData(
obs=pd.DataFrame(index=np.arange(N)), var=pd.DataFrame(index=np.arange(N))
)
X = array_type(sparse.eye(N, N).multiply(np.arange(1, N + 1)))
if attr == "X":
adata.X = X
else:
getattr(adata, attr)["arr"] = X
view = adata[:50]
if attr == "X":
arr_view = view.X
else:
arr_view = getattr(view, attr)["arr"]
arr_view_copy = arr_view.copy()
if sparse.issparse(X):
assert not np.shares_memory(arr_view.indices, arr_view_copy.indices)
assert not np.shares_memory(arr_view.indptr, arr_view_copy.indptr)
assert not np.shares_memory(arr_view.data, arr_view_copy.data)
arr_view_copy.data[0] = -5
assert not np.array_equal(arr_view_copy.data, arr_view.data)
else:
assert not np.shares_memory(arr_view, arr_view_copy)
arr_view_copy[0, 0] = -5
assert not np.array_equal(arr_view_copy, arr_view)
def test_copy_X_dtype():
adata = ad.AnnData(sparse.eye(50, dtype=np.float64, format="csr"))
adata_c = adata[::2].copy()
assert adata_c.X.dtype == adata.X.dtype
| theislab/anndata | anndata/tests/test_views.py | Python | bsd-3-clause | 18,418 |
#!/usr/bin/env python
'''use marquise_telemetry to build throughput info as visible from the client
e.g.:
$ marquse_telemetry broker | marquise_throughput.py
'''
import sys
from time import *
import os
import fcntl
class TimeAware(object):
'''simple timing aware mixin
The default implementation of on_tick_change() is to call every function
passed to the constructor in tick_handlers
'''
def __init__(self, ticklen=1, tick_handlers=[]):
self.last_tick = self.start_time = time()
self.ticklen = ticklen
self.tick_handlers = tick_handlers
self.n_ticks = 0
self.totalticktime = 0
def check_for_tick_changed(self):
'''run on_tick_change once for every ticklen seconds that has passed since last_tick
'''
tnow = time()
while tnow - self.last_tick >= self.ticklen:
self.n_ticks += 1
self.totalticktime += self.ticklen
self.last_tick += self.ticklen
self.on_tick_change()
def on_tick_change(self):
'''handler for a tick change
the timestamp marking the 'tick' being handled is in self.last_tick
The current time may however be significantly after self.last_tick if
check_for_tick_changed is not called more often than self.ticklen
'''
for f in self.tick_handlers: f()
def run_forever(self,sleep_time=None):
'''run in a loop regularly calling on_tick_change
'''
if sleep_time == None: sleep_time = self.ticklen/10.0
while True:
self.check_for_tick_changed()
sleep(sleep_time)
class TimeHistogram(TimeAware):
'''implements a rolling histogram'''
def __init__(self, nbins, seconds_per_bin=1):
TimeAware.__init__(self, seconds_per_bin)
self.nbins = nbins
self._bins = [0 for n in range(nbins)]
self.current_bin = 0
def on_tick_change(self):
self.current_bin = (self.current_bin + 1) % self.nbins
self._bins[self.current_bin] = 0
def add(self, n=1):
'''add 'n' to the current histogram bin
'''
self.check_for_tick_changed()
self._bins[self.current_bin] += n
def sum(self, k=60):
'''return the total entries per second over the last k seconds
'''
bins_to_check = k/self.ticklen
return sum(self.bins[-bins_to_check:])
def mean(self, k=60):
'''return the mean entries per second over the last k seconds
'''
if self.totalticktime < k:
k = self.totalticktime # Only average over the time we've been running
bins_to_check = k/self.ticklen
return self.sum(k) / float(bins_to_check) if bins_to_check else 0
@property
def bins(self):
'''get bins in time order, oldest to newest'''
self.check_for_tick_changed()
return self._bins[self.current_bin+1:]+self._bins[:self.current_bin+1]
class ThroughputCounter(object):
def __init__(self, input_stream=sys.stdin):
self.input_stream=input_stream
self.point_hist = TimeHistogram(600)
self.burst_hist = TimeHistogram(600)
self.acked_burst_hist = TimeHistogram(600)
self.latency_hist = TimeHistogram(600)
self.ack_hist = TimeHistogram(600)
self.defer_write_points_hist = TimeHistogram(600)
self.defer_read_points_hist = TimeHistogram(600)
self.timed_out_points_hist = TimeHistogram(600)
self.outstanding_points = 0
self.outstanding_bursts = {} # burstid -> start timestamp,points
self._reader_state = {}
self.using_marquised = set() # Hosts that relay through marquised
def get_outstanding(self,last_n_seconds=[600,60,1]):
total_burst_counts = map(self.point_hist.sum, last_n_seconds)
total_ack_counts = map(self.ack_hist.sum, last_n_seconds)
return [nbursts-nacks for nbursts,nacks in zip(total_burst_counts,total_ack_counts)]
def get_total_outstanding_points(self):
return sum(points for timestamp,points in self.outstanding_bursts.itervalues())
def get_points_per_seconds(self,over_seconds=[600,60,1]):
return map(self.point_hist.mean, over_seconds)
def get_total_bursts(self,over_seconds=[600,60,1]):
return map(self.burst_hist.mean, over_seconds)
def get_acks_per_second(self,over_seconds=[600,60,1]):
return map(self.ack_hist.mean, over_seconds)
def get_deferred_points_written_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_write_points_hist.mean, over_seconds)
def get_timed_out_points_per_second(self,over_seconds=[600,60,1]):
return map(self.timed_out_points_hist.mean, over_seconds)
def get_deferred_points_read_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_read_points_hist.mean, over_seconds)
def get_average_latencies(self,over_seconds=[600,60,1]):
burst_counts = map(self.acked_burst_hist.sum, over_seconds)
latency_sums = map(self.latency_hist.sum, over_seconds)
return [latencysum/float(nbursts) if nbursts > 0 else 0 for latencysum,nbursts in zip(latency_sums,burst_counts)]
def process_burst(self, data):
if not all(k in data for k in ('identity','message id','points')):
print >> sys.stderr, 'malformed databurst info. ignoring'
return
msgtag = data['identity']+data['message id']
points = int(data['points'])
timestamp = time()
self.outstanding_bursts[msgtag] = timestamp,points
self.outstanding_points += points
self.burst_hist.add(1)
self.point_hist.add(points)
def _msg_tag_from_data(self, data):
return (data['identity'].replace('marquised:',''))+data['message id']
def process_deferred_write(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_write_points_hist.add(points)
def process_deferred_read(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_read_points_hist.add(points)
def process_send_timeout(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.timed_out_points_hist.add(points)
def process_ack(self, data):
if not all(k in data for k in ('identity','message id')):
print >> sys.stderr, 'malformed ack info. ignoring'
return
if data['identity'][:10] == 'marquised:':
# ACK is coming back to marquised from the broker
host = data['identity'][10:]
self.using_marquised.add(host)
else:
host = data['identity']
if host in self.using_marquised:
# If a client is using marquised, that client will
# recieve an ack back from marquised immediately.
#
# We ignore this ack here, and wait for the one
# received by marquised
return
msgtag = host+data['message id']
burst_timestamp,points = self.outstanding_bursts.pop(msgtag,(None,None))
if burst_timestamp == None:
# Got an ACK we didn't see the burst for. Ignoring it.
return
latency = time() - burst_timestamp
self.ack_hist.add(points)
self.acked_burst_hist.add(1)
self.latency_hist.add(latency)
self.outstanding_points -= points
def process_line(self, line):
'''process a line of marquise telemetry
At the moment, only look at bursts being created by the collate_thread
and acked by the marquise poller_thread
sample:
fishhook.engineroom.anchor.net.au 1395212041732118000 8c087c0b collator_thread created_databurst frames = 1618 compressed_bytes = 16921
....
marquised:astrolabe.syd1.anchor.net.au 1395375377705126042 c87ba112 poller_thread rx_msg_from collate_thread
....
fishhook.engineroom.anchor.net.au 1395212082492520000 8c087c0b poller_thread rx_ack_from broker msg_id = 5553
CAVEAT: In the above, the marquised 'collate_thread' is actually the
collate thread in a different process, received by marquised. We can
use the knowledge that this is happening to note that astrolabe is
passing stuff through marquised, and to ignore the ACK that marquised
sends back to the original client on astrolabe when tracking end-to-end
latency
'''
fields = line.strip().split()
if len(fields) < 4: return
# Keep track of hosts using marquised. This is a bit bruteforce, but we need to catch this
# sort of thing early to not accidentally double-track ACKs
#
if fields[0][:10] == 'marquised:':
self.using_marquised.add(fields[0][10:])
key = ' '.join(fields[3:6])
if key == 'collator_thread created_databurst frames':
identity,message_id,points = fields[0],fields[2],int(fields[7])
self.process_burst({ 'identity': identity, 'message id': message_id, 'points': points })
# Anything past here is only in the poller thread. Skips a lot of stuff
if fields[3] != 'poller_thread': return
if key == 'poller_thread rx_ack_from broker':
identity,message_id = fields[0],fields[2]
self.process_ack({ 'identity': identity, 'message id': message_id })
elif fields[4] == 'defer_to_disk':
identity,message_id = fields[0],fields[2]
data = { 'identity': identity, 'message id': message_id }
self.process_deferred_write({ 'identity': identity, 'message id': message_id })
if fields[5] == 'timeout_waiting_for_ack':
self.process_send_timeout({ 'identity': identity, 'message id': message_id })
elif fields[4] == 'read_from_disk':
identity,message_id = fields[0],fields[2]
self.process_deferred_read({ 'identity': identity, 'message id': message_id })
def process_lines_from_stream(self):
'''process any lines from our streams that are available to read'''
while True:
try:
l = self.input_stream.readline()
self.process_line(l)
except IOError:
# Nothing left to read at the moment
return
class ThroughputPrinter(object):
def __init__(self, counter, outstream=sys.stdout, avgtimes=(600,60,1)):
self.counter = counter
self.outstream = outstream
self.avgtimes = avgtimes
self.lines_printed = 0
def print_header(self):
colbreak = " " * 3
header = '#'
header += "mean points per second".center(29) + colbreak
header += "mean acks per second".center(30) + colbreak
header += "mean latency per point".center(30) + colbreak
header += "deferred points written/s".center(30) + colbreak
header += "deferred points read/s".center(30) + colbreak
header += "points timed out sending/s".center(30) + colbreak
header += "unacked".rjust(10) + '\n'
header += "#"
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)[1:]
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak + "points".rjust(10) + '\n'
header += '# ' + '-'*28 + colbreak + '-'*30 + colbreak + '-'*30
header += colbreak + '-'*30 + colbreak + '-'*30 + colbreak + '-'*30
header += colbreak + '-'*10 + '\n'
self.outstream.write(header)
self.outstream.flush()
def print_throughput(self):
bursts_per_second = self.counter.get_points_per_seconds(self.avgtimes)
acks_per_second = self.counter.get_acks_per_second(self.avgtimes)
mean_latencies = self.counter.get_average_latencies(self.avgtimes)
outstanding_points = self.counter.get_total_outstanding_points()
points_deferred_to_disk = self.counter.get_deferred_points_written_per_second(self.avgtimes)
points_read_from_disk = self.counter.get_deferred_points_read_per_second(self.avgtimes)
points_timed_out_sending = self.counter.get_timed_out_points_per_second(self.avgtimes)
# RENDER ALL THE THINGS!
out = ""
colbreak = " " * 3
out += "".join((" %9.2f" % b for b in bursts_per_second))
out += colbreak
out += "".join((" %9.2f" % b for b in acks_per_second))
out += colbreak
out += "".join((" %9.2f" % b for b in mean_latencies))
out += colbreak
out += "".join((" %9.2f" % b for b in points_deferred_to_disk))
out += colbreak
out += "".join((" %9.2f" % b for b in points_read_from_disk))
out += colbreak
out += "".join((" %9.2f" % b for b in points_timed_out_sending))
out += colbreak
out += "%10d" % outstanding_points + '\n'
if self.lines_printed % 20 == 0:
self.print_header()
self.outstream.write(out)
self.outstream.flush()
self.lines_printed += 1
if __name__ == '__main__':
# Make stdin non-blocking
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
reader = ThroughputCounter(sys.stdin)
writer = ThroughputPrinter(reader, sys.stdout)
# Run an event loop to process outstanding input every second
# and then output the processed data
event_loop = TimeAware(1, [ reader.process_lines_from_stream,
writer.print_throughput ])
event_loop.run_forever()
# vim: set tabstop=4 expandtab:
| anchor/vaultaire-tools | telemetry/marquise_throughput.py | Python | bsd-3-clause | 14,550 |
DEBUG = False
BASEDIR = ''
SUBDIR = ''
PREFIX = ''
QUALITY = 85
CONVERT = '/usr/bin/convert'
WVPS = '/usr/bin/wvPS'
PROCESSORS = (
'populous.thumbnail.processors.colorspace',
'populous.thumbnail.processors.autocrop',
'populous.thumbnail.processors.scale_and_crop',
'populous.thumbnail.processors.filters',
)
| caiges/populous | populous/thumbnail/defaults.py | Python | bsd-3-clause | 324 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY teleporterCT, INteleporterCT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action detruire_sortie."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Détruit une sortie d'une salle."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.detruire_sortie, "Salle", "str")
@staticmethod
def detruire_sortie(salle, direction):
"""Détruit une sortie de salle et sa réciproque de la destination.
La direction est à choisir parmi est, ouest, nord, sud, nord-est,
nord-ouest, sud-est, sud-ouest, haut et bas.
"""
try:
direction = salle.sorties.get_nom_long(direction)
except KeyError:
raise ErreurExecution("direction {} inconnue".format(direction))
if not salle.sorties.sortie_existe(direction):
raise ErreurExecution("sortie {} non définie".format(direction))
d_salle = salle.sorties[direction].salle_dest
dir_opposee = salle.sorties.get_nom_oppose(direction)
d_salle.sorties.supprimer_sortie(dir_opposee)
salle.sorties.supprimer_sortie(direction)
| vlegoff/tsunami | src/primaires/scripting/actions/detruire_sortie.py | Python | bsd-3-clause | 2,726 |
from flask import Flask, render_template, flash, session, redirect, url_for
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from flask.ext.wtf import Form
from flask.ext.wtf.recaptcha import RecaptchaField
DEBUG = True
SECRET_KEY = 'secret'
# keys for localhost. Change as appropriate.
RECAPTCHA_PUBLIC_KEY = '6LeYIbsSAAAAACRPIllxA7wvXjIE411PfdB2gt2J'
RECAPTCHA_PRIVATE_KEY = '6LeYIbsSAAAAAJezaIq3Ft_hSTo0YtyeFG-JgRtu'
app = Flask(__name__)
app.config.from_object(__name__)
class CommentForm(Form):
comment = TextAreaField("Comment", validators=[DataRequired()])
recaptcha = RecaptchaField()
@app.route("/")
def index(form=None):
if form is None:
form = CommentForm()
comments = session.get("comments", [])
return render_template("index.html",
comments=comments,
form=form)
@app.route("/add/", methods=("POST",))
def add_comment():
form = CommentForm()
if form.validate_on_submit():
comments = session.pop('comments', [])
comments.append(form.comment.data)
session['comments'] = comments
flash("You have added a new comment")
return redirect(url_for("index"))
return index(form)
if __name__ == "__main__":
app.run()
| Maxence1/flask-wtf | examples/recaptcha/app.py | Python | bsd-3-clause | 1,299 |
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
| yephper/django | tests/template_tests/syntax_tests/test_exceptions.py | Python | bsd-3-clause | 2,158 |
import collections
from .settings import preferences_settings
from .exceptions import CachedValueNotFound, DoesNotExist
class PreferencesManager(collections.Mapping):
"""Handle retrieving / caching of preferences"""
def __init__(self, model, registry, **kwargs):
self.model = model
self.registry = registry
self.queryset = self.model.objects.all()
self.instance = kwargs.get('instance')
if self.instance:
self.queryset = self.queryset.filter(instance=self.instance)
@property
def cache(self):
from django.core.cache import caches
return caches['default']
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
section, name = self.parse_lookup(key)
self.update_db_pref(section=section, name=name, value=value)
def __repr__(self):
return repr(self.all())
def __iter__(self):
return self.all().__iter__()
def __len__(self):
return len(self.all())
def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name)
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, section, name, self.instance.pk)
def from_cache(self, section, name):
"""Return a preference raw_value from cache"""
cached_value = self.cache.get(
self.get_cache_key(section, name), CachedValueNotFound)
if cached_value is CachedValueNotFound:
raise CachedValueNotFound
return self.registry.get(section=section, name=name).serializer.deserialize(cached_value)
def to_cache(self, pref):
"""Update/create the cache value for the given preference model instance"""
self.cache.set(
self.get_cache_key(pref.section, pref.name), pref.raw_value, None)
def pref_obj(self, section, name):
return self.registry.get(section=section, name=name)
def parse_lookup(self, lookup):
try:
section, name = lookup.split(
preferences_settings.SECTION_KEY_SEPARATOR)
except ValueError:
name = lookup
section = None
return section, name
def get(self, key, model=False):
"""Return the value of a single preference using a dotted path key"""
section, name = self.parse_lookup(key)
if model:
return self.get_db_pref(setion=section, name=name)
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
def get_db_pref(self, section, name):
try:
pref = self.queryset.get(section=section, name=name)
except self.model.DoesNotExist:
pref_obj = self.pref_obj(section=section, name=name)
pref = self.create_db_pref(
section=section, name=name, value=pref_obj.default)
return pref
def update_db_pref(self, section, name, value):
try:
db_pref = self.queryset.get(section=section, name=name)
db_pref.value = value
db_pref.save()
except self.model.DoesNotExist:
return self.create_db_pref(section, name, value)
return db_pref
def create_db_pref(self, section, name, value):
if self.instance:
db_pref = self.model(
section=section, name=name, instance=self.instance)
else:
db_pref = self.model(section=section, name=name)
db_pref.value = value
db_pref.save()
return db_pref
def all(self):
"""Return a dictionnary containing all preferences by section
Loaded from cache or from db in case of cold cache
"""
a = {}
try:
for preference in self.registry.preferences():
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
except CachedValueNotFound:
return self.load_from_db()
return a
def load_from_db(self):
"""Return a dictionnary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section, name=preference.name, value=preference.default)
self.to_cache(db_pref)
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
return a
| willseward/django-dynamic-preferences | dynamic_preferences/managers.py | Python | bsd-3-clause | 4,981 |
import unittest
import helper.config
import mock
from vetoes import config
class FeatureFlagMixinTests(unittest.TestCase):
def test_that_flags_are_processed_during_initialize(self):
settings = helper.config.Data({
'features': {'on': 'on', 'off': 'false'}
})
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertTrue(consumer.feature_flags['on'])
self.assertFalse(consumer.feature_flags['off'])
def test_that_invalid_flags_arg_ignored(self):
settings = helper.config.Data({
'features': {'one': 'not valid', 'two': None}
})
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertEqual(consumer.feature_flags, {})
| dave-shawley/vetoes | tests/feature_flag_tests.py | Python | bsd-3-clause | 751 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_Logit_PolyTrend_7_12_20.py | Python | bsd-3-clause | 262 |
"""
Module containing functions to differentiate functions using tensorflow.
"""
try:
import tensorflow as tf
from tensorflow.python.ops.gradients import _hessian_vector_product
except ImportError:
tf = None
from ._backend import Backend, assert_backend_available
class TensorflowBackend(Backend):
def __init__(self):
if tf is not None:
self._session = tf.Session()
def __str__(self):
return "tensorflow"
def is_available(self):
return tf is not None
@assert_backend_available
def is_compatible(self, objective, argument):
if isinstance(objective, tf.Tensor):
if (argument is None or not
isinstance(argument, tf.Variable) and not
all([isinstance(arg, tf.Variable)
for arg in argument])):
raise ValueError(
"Tensorflow backend requires an argument (or sequence of "
"arguments) with respect to which compilation is to be "
"carried out")
return True
return False
@assert_backend_available
def compile_function(self, objective, argument):
if not isinstance(argument, list):
def func(x):
feed_dict = {argument: x}
return self._session.run(objective, feed_dict)
else:
def func(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(objective, feed_dict)
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
"""
Compute the gradient of 'objective' and return as a function.
"""
tfgrad = tf.gradients(objective, argument)
if not isinstance(argument, list):
def grad(x):
feed_dict = {argument: x}
return self._session.run(tfgrad[0], feed_dict)
else:
def grad(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(tfgrad, feed_dict)
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
if not isinstance(argument, list):
argA = tf.Variable(tf.zeros(tf.shape(argument)))
tfhess = _hessian_vector_product(objective, [argument], [argA])
def hess(x, a):
feed_dict = {argument: x, argA: a}
return self._session.run(tfhess[0], feed_dict)
else:
argA = [tf.Variable(tf.zeros(tf.shape(arg)))
for arg in argument]
tfhess = _hessian_vector_product(objective, argument, argA)
def hess(x, a):
feed_dict = {i: d for i, d in zip(argument+argA, x+a)}
return self._session.run(tfhess, feed_dict)
return hess
| j-towns/pymanopt | pymanopt/tools/autodiff/_tensorflow.py | Python | bsd-3-clause | 2,913 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import photolib.models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('uuid', models.CharField(max_length=32, blank=True)),
('filename', models.CharField(help_text='A descriptive file name', max_length=128)),
('alt', models.CharField(help_text='alt attribute text for accessibility', max_length=255, blank=True)),
('caption', models.TextField(help_text='Recommended text to be used as photo caption.', blank=True)),
('notes', models.TextField(help_text='Any other notable information about this photo.', blank=True)),
('credits', models.TextField(help_text='Credits and copyright/left.', blank=True)),
('source', models.CharField(choices=[('Flickr', 'Flickr'), ('iStockphoto', 'iStockphoto')], max_length=32, blank=True)),
('source_url', models.URLField(help_text='Important when citation requires link to source.', blank=True)),
('image', models.ImageField(upload_to=photolib.models.upload_path)),
('uploaded', models.DateTimeField(default=datetime.datetime.utcnow)),
('last_updated', models.DateTimeField(default=datetime.datetime.utcnow, blank=True)),
('photo_tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', help_text='A comma-separated list of tags.', through='taggit.TaggedItem', blank=True)),
],
options={
'ordering': ('-uploaded',),
},
bases=(models.Model,),
),
]
| sunlightlabs/horseradish | photolib/migrations/0001_initial.py | Python | bsd-3-clause | 1,986 |
"""empty message
Revision ID: 2357b6b3d76
Revises: fecca96b9d
Create Date: 2015-10-27 10:26:52.074526
"""
# revision identifiers, used by Alembic.
revision = '2357b6b3d76'
down_revision = 'fecca96b9d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('citizen_complaints', sa.Column('service_type', sa.String(length=255), nullable=True))
op.add_column('citizen_complaints', sa.Column('source', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('citizen_complaints', 'source')
op.drop_column('citizen_complaints', 'service_type')
### end Alembic commands ###
| codeforamerica/comport | migrations/versions/2357b6b3d76_.py | Python | bsd-3-clause | 794 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.education'
db.add_column('person_person', 'education',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'Person.birthday'
db.add_column('person_person', 'birthday',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.education'
db.delete_column('person_person', 'education')
# Deleting field 'Person.birthday'
db.delete_column('person_person', 'birthday')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'education': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '7'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'subscribing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['person'] | elin-moco/ffclub | ffclub/person/migrations/0005_auto__add_field_person_education__add_field_person_birthday.py | Python | bsd-3-clause | 5,302 |
"""
Given a positive integer n and you can do operations as follow:
If n is even, replace n with n/2.
If n is odd, you can replace n with either n + 1 or n - 1.
What is the minimum number of replacements needed for n to become 1?
Example 1:
Input:
8
Output:
3
Explanation:
8 -> 4 -> 2 -> 1
Example 2:
Input:
7
Output:
4
Explanation:
7 -> 8 -> 4 -> 2 -> 1
or
7 -> 6 -> 3 -> 2 -> 1
"""
class Solution(object):
def integerReplacement(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
while n > 1:
count += 1
if n % 2 == 0:
n /= 2
elif (n+1) % 4 == 0 and (n-1) > 2:
n += 1
else:
n -= 1
return count
| shub0/algorithm-data-structure | python/integer_replacement.py | Python | bsd-3-clause | 763 |
# -*- coding: utf-8 -*-
#
# django-intercom documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 23 13:50:08 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
#import settings
#from django.core.management import setup_environ
#setup_environ(settings)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-intercom'
copyright = u'2012, Ken Cochrane'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
sys.path.insert(0, os.pardir)
m = __import__("intercom")
version = m.__version__
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-intercomdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-intercom.tex', u'django-intercom Documentation',
u'Ken Cochrane', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-intercom', u'django-intercom Documentation',
[u'Ken Cochrane'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-intercom', u'django-intercom Documentation',
u'Ken Cochrane', 'django-intercom', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'django-intercom'
epub_author = u'Ken Cochrane'
epub_publisher = u'Ken Cochrane'
epub_copyright = u'2012, Ken Cochrane'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| Kagiso-Future-Media/django-intercom | docs/conf.py | Python | bsd-3-clause | 9,137 |
from log4mongo.handlers import MongoHandler
from pymongo.errors import PyMongoError
from StringIO import StringIO
import unittest
import logging
import sys
class TestMongoHandler(unittest.TestCase):
host_name = 'localhost'
database_name = 'log4mongo_test'
collection_name = 'logs_test'
def setUp(self):
self.handler = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log = logging.getLogger('testLogger')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(self.handler)
self.old_stderr = sys.stdout
sys.stderr = StringIO()
def tearDown(self):
self.handler.connection.drop_database(self.database_name)
self.handler.close()
self.log.removeHandler(self.handler)
self.log = None
self.handler = None
sys.stderr.close()
sys.stderr = self.old_stderr
def test_connect(self):
handler = MongoHandler(host='localhost', database_name=self.database_name, collection=self.collection_name)
self.assertTrue(isinstance(handler, MongoHandler))
self.handler.connection.drop_database(self.database_name)
handler.close()
def test_connect_failed(self):
with self.assertRaises(PyMongoError):
MongoHandler(host='unknow_host', database_name=self.database_name, collection=self.collection_name)
def test_connect_failed_silent(self):
handler = MongoHandler(host='unknow_host', database_name=self.database_name, collection=self.collection_name, fail_silently=True)
self.assertTrue(isinstance(handler, MongoHandler))
self.handler.connection.drop_database(self.database_name)
handler.close()
def test_emit(self):
self.log.warning('test message')
document = self.handler.collection.find_one({'message': 'test message', 'level': 'WARNING'})
self.assertEqual(document['message'], 'test message')
self.assertEqual(document['level'], 'WARNING')
def test_emit_exception(self):
try:
raise Exception('exc1')
except:
self.log.exception('test message')
document = self.handler.collection.find_one({'message': 'test message', 'level': 'ERROR'})
self.assertEqual(document['message'], 'test message')
self.assertEqual(document['level'], 'ERROR')
self.assertEqual(document['exception']['message'], 'exc1')
def test_emit_fail(self):
self.handler.collection = ''
self.log.warn('test warning')
self.assertRegexpMatches(sys.stderr.getvalue(), r"AttributeError: 'str' object has no attribute 'save'")
def test_email_fail_silent(self):
self.handler.fail_silently = True
self.handler.collection = ''
self.log.warn('test warming')
self.assertEqual(sys.stderr.getvalue(), '')
def test_contextual_info(self):
self.log.info('test message with contextual info', extra={'ip': '127.0.0.1', 'host': 'localhost'})
document = self.handler.collection.find_one({'message': 'test message with contextual info', 'level': 'INFO'})
self.assertEqual(document['message'], 'test message with contextual info')
self.assertEqual(document['level'], 'INFO')
self.assertEqual(document['ip'], '127.0.0.1')
self.assertEqual(document['host'], 'localhost')
def test_contextual_info_adapter(self):
adapter = logging.LoggerAdapter(self.log, {'ip': '127.0.0.1', 'host': 'localhost'})
adapter.info('test message with contextual info')
document = self.handler.collection.find_one({'message': 'test message with contextual info', 'level': 'INFO'})
self.assertEqual(document['message'], 'test message with contextual info')
self.assertEqual(document['level'], 'INFO')
self.assertEqual(document['ip'], '127.0.0.1')
self.assertEqual(document['host'], 'localhost')
class TestCappedMongoHandler(TestMongoHandler):
capped_max = 10
def setUp(self):
self.handler = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log = logging.getLogger('testLogger')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(self.handler)
self.old_stderr = sys.stdout
sys.stderr = StringIO()
def test_capped(self):
options = self.handler.db.command('collstats', self.collection_name)
self.assertEqual(options['max'], 10)
self.assertEqual(options['capped'], 1)
def test_capped_max(self):
for i in range(self.capped_max * 2):
self.log.info('test capped info')
documents = self.handler.collection.find()
self.assertEqual(documents.count(), 10)
def test_override_no_capped_collection(self):
# Creating no capped handler
self.handler_no_capped = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log.removeHandler(self.handler)
self.log.addHandler(self.handler_no_capped)
self.log.info('test info')
# Creating capped handler
self.handler_capped = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log.addHandler(self.handler)
self.log.info('test info')
def test_override_capped_collection(self):
# Creating capped handler
self.handler_capped = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log.removeHandler(self.handler)
self.log.addHandler(self.handler)
self.log.info('test info')
# Creating no capped handler
self.handler_no_capped = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log.addHandler(self.handler_no_capped)
self.log.info('test info')
| EzyInsights/log4mongo-python | log4mongo/test/test_handlers.py | Python | bsd-3-clause | 6,198 |
import logging
import os
import os.path
import shutil
import sys
import tempfile
import unittest
import pytest
import fiona
from fiona.collection import supported_drivers
from fiona.errors import FionaValueError, DriverError, SchemaError, CRSError
from fiona.ogrext import calc_gdal_version_num, get_gdal_version_num
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class ReadingTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@pytest.mark.skipif(not os.path.exists('tests/data/coutwildrnp.gpkg'),
reason="Requires geopackage fixture")
def test_gpkg(self):
if get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
self.assertRaises(DriverError, fiona.open, 'tests/data/coutwildrnp.gpkg', 'r', driver="GPKG")
else:
with fiona.open('tests/data/coutwildrnp.gpkg', 'r', driver="GPKG") as c:
self.assertEquals(len(c), 48)
class WritingTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
@pytest.mark.skipif(not os.path.exists('tests/data/coutwildrnp.gpkg'),
reason="Requires geopackage fixture")
def test_gpkg(self):
schema = {'geometry': 'Point',
'properties': [('title', 'str')]}
crs = {
'a': 6370997,
'lon_0': -100,
'y_0': 0,
'no_defs': True,
'proj': 'laea',
'x_0': 0,
'units': 'm',
'b': 6370997,
'lat_0': 45}
path = os.path.join(self.tempdir, 'foo.gpkg')
if get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
self.assertRaises(DriverError,
fiona.open,
path,
'w',
driver='GPKG',
schema=schema,
crs=crs)
else:
with fiona.open(path, 'w',
driver='GPKG',
schema=schema,
crs=crs) as c:
c.writerecords([{
'geometry': {'type': 'Point', 'coordinates': [0.0, 0.0]},
'properties': {'title': 'One'}}])
c.writerecords([{
'geometry': {'type': 'Point', 'coordinates': [2.0, 3.0]},
'properties': {'title': 'Two'}}])
with fiona.open(path) as c:
self.assertEquals(c.schema['geometry'], 'Point')
self.assertEquals(len(c), 2)
| perrygeo/Fiona | tests/test_geopackage.py | Python | bsd-3-clause | 2,690 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Basic infrastructure for extracting localizable messages from source files.
This module defines an extensible system for collecting localizable message
strings from a variety of sources. A native extractor for Python source files
is builtin, extractors for other sources can be added using very simple plugins.
The main entry points into the extraction functionality are the functions
`extract_from_dir` and `extract_from_file`.
"""
import os
import sys
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
__all__ = ['extract', 'extract_from_dir', 'extract_from_file']
__docformat__ = 'restructuredtext en'
GROUP_NAME = 'babel.extractors'
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None
}
DEFAULT_MAPPING = [('**.py', 'python')]
empty_msgid_warning = (
'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
'returns the header entry with meta information, not the empty string.')
def _strip_comment_tags(comments, tags):
"""Helper function for `extract` that strips comment tags from strings
in a list of comment lines. This functions operates in-place.
"""
def _strip(line):
for tag in tags:
if line.startswith(tag):
return line[len(tag):].strip()
return line
comments[:] = map(_strip, comments)
def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING,
options_map=None, keywords=DEFAULT_KEYWORDS,
comment_tags=(), callback=None, strip_comment_tags=False):
"""Extract messages from any source files found in the given directory.
This function generates tuples of the form:
``(filename, lineno, message, comments)``
Which extraction method is used per file is determined by the `method_map`
parameter, which maps extended glob patterns to extraction method names.
For example, the following is the default mapping:
>>> method_map = [
... ('**.py', 'python')
... ]
This basically says that files with the filename extension ".py" at any
level inside the directory should be processed by the "python" extraction
method. Files that don't match any of the mapping patterns are ignored. See
the documentation of the `pathmatch` function for details on the pattern
syntax.
The following extended mapping would also use the "genshi" extraction
method on any file in "templates" subdirectory:
>>> method_map = [
... ('**/templates/**.*', 'genshi'),
... ('**.py', 'python')
... ]
The dictionary provided by the optional `options_map` parameter augments
these mappings. It uses extended glob patterns as keys, and the values are
dictionaries mapping options names to option values (both strings).
The glob patterns of the `options_map` do not necessarily need to be the
same as those used in the method mapping. For example, while all files in
the ``templates`` folders in an application may be Genshi applications, the
options for those files may differ based on extension:
>>> options_map = {
... '**/templates/**.txt': {
... 'template_class': 'genshi.template:TextTemplate',
... 'encoding': 'latin-1'
... },
... '**/templates/**.html': {
... 'include_attrs': ''
... }
... }
:param dirname: the path to the directory to extract messages from
:param method_map: a list of ``(pattern, method)`` tuples that maps of
extraction method names to extended glob patterns
:param options_map: a dictionary of additional options (optional)
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of tags of translator comments to search for
and include in the results
:param callback: a function that is called for every file that message are
extracted from, just before the extraction itself is
performed; the function is passed the filename, the name
of the extraction method and and the options dictionary as
positional arguments, in that order
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: an iterator over ``(filename, lineno, funcname, message)`` tuples
:rtype: ``iterator``
:see: `pathmatch`
"""
if options_map is None:
options_map = {}
absname = os.path.abspath(dirname)
for root, dirnames, filenames in os.walk(absname):
for subdir in dirnames:
if subdir.startswith('.') or subdir.startswith('_'):
dirnames.remove(subdir)
dirnames.sort()
filenames.sort()
for filename in filenames:
filename = relpath(
os.path.join(root, filename).replace(os.sep, '/'),
dirname
)
for pattern, method in method_map:
if pathmatch(pattern, filename):
filepath = os.path.join(absname, filename)
options = {}
for opattern, odict in options_map.items():
if pathmatch(opattern, filename):
options = odict
if callback:
callback(filename, method, options)
for lineno, message, comments in \
extract_from_file(method, filepath,
keywords=keywords,
comment_tags=comment_tags,
options=options,
strip_comment_tags=
strip_comment_tags):
yield filename, lineno, message, comments
break
def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
comment_tags=(), options=None, strip_comment_tags=False):
"""Extract messages from a specific file.
This function returns a list of tuples of the form:
``(lineno, funcname, message)``
:param filename: the path to the file to extract messages from
:param method: a string specifying the extraction method (.e.g. "python")
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:param options: a dictionary of additional options (optional)
:return: the list of extracted messages
:rtype: `list`
"""
fileobj = open(filename, 'U')
try:
return list(extract(method, fileobj, keywords, comment_tags, options,
strip_comment_tags))
finally:
fileobj.close()
def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
options=None, strip_comment_tags=False):
"""Extract messages from the given file-like object using the specified
extraction method.
This function returns a list of tuples of the form:
``(lineno, message, comments)``
The implementation dispatches the actual extraction to plugins, based on the
value of the ``method`` parameter.
>>> source = '''# foo module
... def run(argv):
... print _('Hello, world!')
... '''
>>> from StringIO import StringIO
>>> for message in extract('python', StringIO(source)):
... print message
(3, u'Hello, world!', [])
:param method: a string specifying the extraction method (.e.g. "python");
if this is a simple name, the extraction function will be
looked up by entry point; if it is an explicit reference
to a function (of the form ``package.module:funcname`` or
``package.module.funcname``), the corresponding function
will be imported and used
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: the list of extracted messages
:rtype: `list`
:raise ValueError: if the extraction method is not registered
"""
func = None
if ':' in method or '.' in method:
if ':' not in method:
lastdot = method.rfind('.')
module, attrname = method[:lastdot], method[lastdot + 1:]
else:
module, attrname = method.split(':', 1)
func = getattr(__import__(module, {}, {}, [attrname]), attrname)
else:
try:
from pkg_resources import working_set
except ImportError:
# pkg_resources is not available, so we resort to looking up the
# builtin extractors directly
builtin = {'ignore': extract_nothing, 'python': extract_python}
func = builtin.get(method)
else:
for entry_point in working_set.iter_entry_points(GROUP_NAME,
method):
func = entry_point.load(require=True)
break
if func is None:
raise ValueError('Unknown extraction method %r' % method)
results = func(fileobj, keywords.keys(), comment_tags,
options=options or {})
for lineno, funcname, messages, comments in results:
if funcname:
spec = keywords[funcname] or (1,)
else:
spec = (1,)
if not isinstance(messages, (list, tuple)):
messages = [messages]
if not messages:
continue
# Validate the messages against the keyword's specification
msgs = []
invalid = False
# last_index is 1 based like the keyword spec
last_index = len(messages)
for index in spec:
if last_index < index:
# Not enough arguments
invalid = True
break
message = messages[index - 1]
if message is None:
invalid = True
break
msgs.append(message)
if invalid:
continue
first_msg_index = spec[0] - 1
if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileobj, 'name') and \
fileobj.name or '(unknown)', lineno)
print >> sys.stderr, empty_msgid_warning % where
continue
messages = tuple(msgs)
if len(messages) == 1:
messages = messages[0]
if strip_comment_tags:
_strip_comment_tags(comments, comment_tags)
yield lineno, messages, comments
def extract_nothing(fileobj, keywords, comment_tags, options):
"""Pseudo extractor that does not actually extract anything, but simply
returns an empty list.
"""
return []
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
funcname = lineno = message_lineno = None
call_stack = -1
buf = []
messages = []
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
tokens = generate_tokens(fileobj.readline)
for tok, value, (lineno, _), _, _ in tokens:
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
call_stack = -1
messages = []
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == ',':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif tok == NAME and value in keywords:
funcname = value
def extract_javascript(fileobj, keywords, comment_tags, options):
"""Extract messages from JavaScript source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
translator_comments = []
concatenate_next = False
encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = -1
for token in tokenize(fileobj.read().decode(encoding)):
if token.type == 'operator' and token.value == '(':
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack == -1 and token.type == 'linecomment':
value = token.value[2:].strip()
if translator_comments and \
translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
translator_comments.append((token.lineno, value.strip()))
break
elif token.type == 'multilinecomment':
# only one multi-line comment may preceed a translation
translator_comments = []
value = token.value[2:-2].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset,
line))
break
elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')':
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
# Comments don't apply unless they immediately precede the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
if messages is not None:
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = message_lineno = last_argument = None
concatenate_next = False
translator_comments = []
messages = []
call_stack = -1
elif token.type == 'string':
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = (last_argument or '') + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == 'operator':
if token.value == ',':
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif token.value == '+':
concatenate_next = True
elif call_stack > 0 and token.type == 'operator' \
and token.value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif call_stack == -1 and token.type == 'name' and \
token.value in keywords and \
(last_token is None or last_token.type != 'name' or
last_token.value != 'function'):
funcname = token.value
last_token = token
| mbr/Babel-CLDR | babel/messages/extract.py | Python | bsd-3-clause | 22,451 |
from django.utils import translation
from nose.tools import eq_
from olympia import amo
from olympia.amo.tests import TestCase, ESTestCase
from olympia.addons.models import Addon
from olympia.reviews import tasks
from olympia.reviews.models import (
check_spam, GroupedRating, Review, ReviewFlag, Spam)
from olympia.users.models import UserProfile
class TestReviewModel(TestCase):
fixtures = ['reviews/test_models']
def test_translations(self):
translation.activate('en-US')
# There's en-US and de translations. We should get en-US.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title en', 'en-US')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
translation.activate('de')
# en and de exist, we get de.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title de', 'de')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
def test_soft_delete(self):
eq_(Review.objects.count(), 2)
eq_(Review.unfiltered.count(), 2)
Review.objects.get(id=1).delete()
eq_(Review.objects.count(), 1)
eq_(Review.unfiltered.count(), 2)
Review.objects.filter(id=2).delete()
eq_(Review.objects.count(), 0)
eq_(Review.unfiltered.count(), 2)
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
addon = review.addon
assert review in addon._reviews.all()
# Delete the review: it shouldn't be listed anymore.
review.update(deleted=True)
addon = Addon.objects.get(pk=addon.pk)
assert review not in addon._reviews.all()
def test_no_filter_for_relations(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
flag = ReviewFlag.objects.create(review=review,
flag='review_flag_reason_spam')
assert flag.review == review
# Delete the review: reviewflag.review should still work.
review.update(deleted=True)
flag = ReviewFlag.objects.get(pk=flag.pk)
assert flag.review == review
class TestGroupedRating(TestCase):
fixtures = ['reviews/dev-reply']
grouped_ratings = [(1, 0), (2, 0), (3, 0), (4, 1), (5, 0)]
def test_get_none(self):
eq_(GroupedRating.get(3, update_none=False), None)
def test_set(self):
eq_(GroupedRating.get(1865, update_none=False), None)
GroupedRating.set(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_cron(self):
eq_(GroupedRating.get(1865, update_none=False), None)
tasks.addon_grouped_rating(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_update_none(self):
eq_(GroupedRating.get(1865, update_none=False), None)
eq_(GroupedRating.get(1865, update_none=True), self.grouped_ratings)
class TestSpamTest(TestCase):
fixtures = ['reviews/test_models']
def test_create_not_there(self):
Review.objects.all().delete()
eq_(Review.objects.count(), 0)
check_spam(1)
def test_add(self):
assert Spam().add(Review.objects.all()[0], 'numbers')
class TestRefreshTest(ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRefreshTest, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.user = UserProfile.objects.all()[0]
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
def get_bayesian_rating(self):
q = Addon.search().filter(id=self.addon.id)
return list(q.values_dict('bayesian_rating'))[0]['bayesian_rating'][0]
def test_created(self):
eq_(self.get_bayesian_rating(), 0.0)
Review.objects.create(addon=self.addon, user=self.user, rating=4)
self.refresh()
eq_(self.get_bayesian_rating(), 4.0)
def test_edited(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.rating = 1
r.save()
self.refresh()
eq_(self.get_bayesian_rating(), 2.5)
def test_deleted(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.delete()
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
| jpetto/olympia | src/olympia/reviews/tests/test_models.py | Python | bsd-3-clause | 4,623 |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.agents import Agent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import copy
import random
class Seq2seqAgent(Agent):
"""Simple agent which uses an LSTM to process incoming text observations."""
@staticmethod
def add_cmdline_args(argparser):
argparser.add_arg('-hs', '--hiddensize', type=int, default=64,
help='size of the hidden layers and embeddings')
argparser.add_arg('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
argparser.add_arg('-lr', '--learningrate', type=float, default=0.5,
help='learning rate')
argparser.add_arg('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
argparser.add_arg('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
argparser.add_arg('--gpu', type=int, default=-1,
help='which GPU device to use')
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared and 'dictionary' in shared:
# only set up everything for the main instance
self.dict = shared['dictionary']
self.EOS = self.dict.eos_token
self.EOS_TENSOR = torch.LongTensor(self.dict.parse(self.EOS))
self.id = 'Seq2Seq'
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.use_cuda = opt.get('cuda', False)
self.longest_label = 2 # TODO: 1
if 'babi' in opt['task']:
self.babi_mode = True
self.dirs = set(['n', 's', 'e', 'w'])
self.criterion = nn.NLLLoss()
self.lt = nn.Embedding(len(self.dict), hsz, padding_idx=0,
scale_grad_by_freq=True)
self.encoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.decoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.d2o = nn.Linear(hsz, len(self.dict))
self.dropout = nn.Dropout(opt['dropout'])
self.softmax = nn.LogSoftmax()
lr = opt['learningrate']
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'd2o': optim.SGD(self.d2o.parameters(), lr=lr),
}
if self.use_cuda:
self.cuda()
self.episode_done = True
def parse(self, text):
return torch.LongTensor(self.dict.txt2vec(text))
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.d2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, drop=False):
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.d2o(hidden)
if drop:
scores = self.dropout(scores)
scores = self.softmax(scores)
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def init_zeros(self, bsz=1):
t = torch.zeros(self.num_layers, bsz, self.hidden_size)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def init_rand(self, bsz=1):
t = torch.FloatTensor(self.num_layers, bsz, self.hidden_size)
t.uniform_(0.05)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def observe(self, observation):
observation = copy.deepcopy(observation)
if not self.episode_done:
# if the last example wasn't the end of an episode, then we need to
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def update(self, xs, ys):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
output_lines = [[] for _ in range(batchsize)]
self.zero_grad()
# update model
loss = 0
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
# use the true token as the next input
xes = self.lt(y).unsqueeze(0)
# hn = self.dropout(hn)
for j in range(preds.size(0)):
token = self.v2t([preds.data[j][0]])
output_lines[j].append(token)
loss.backward()
self.update_params()
if random.random() < 0.1:
true = self.v2t(ys.data[0])
print('loss:', round(loss.data[0], 2), ' '.join(output_lines[0]), '(true: {})'.format(true))
return output_lines
def predict(self, xs):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
output_lines = [[] for _ in range(batchsize)]
while(total_done < batchsize) and max_len < self.longest_label:
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=False)
xes = self.lt(preds.t())
max_len += 1
for i in range(preds.size(0)):
if not done[i]:
token = self.v2t(preds.data[i])
if token == self.EOS:
done[i] = True
total_done += 1
else:
output_lines[i].append(token)
if self.babi_mode and token not in self.dirs:
# for babi, only output one token except when
# giving directions
done[i] = True
total_done += 1
if random.random() < 0.1:
print('prediction:', ' '.join(output_lines[0]))
return output_lines
def batchify(self, obs):
exs = [ex for ex in obs if 'text' in ex]
valid_inds = [i for i, ex in enumerate(obs) if 'text' in ex]
batchsize = len(exs)
parsed = [self.parse(ex['text']) for ex in exs]
max_x_len = max([len(x) for x in parsed])
xs = torch.LongTensor(batchsize, max_x_len).fill_(0)
for i, x in enumerate(parsed):
offset = max_x_len - len(x)
for j, idx in enumerate(x):
xs[i][j + offset] = idx
if self.use_cuda:
xs = xs.cuda(async=True)
xs = Variable(xs)
ys = None
if 'labels' in exs[0]:
labels = [random.choice(ex['labels']) + ' ' + self.EOS for ex in exs]
parsed = [self.parse(y) for y in labels]
max_y_len = max(len(y) for y in parsed)
ys = torch.LongTensor(batchsize, max_y_len).fill_(0)
for i, y in enumerate(parsed):
for j, idx in enumerate(y):
ys[i][j] = idx
if self.use_cuda:
ys = ys.cuda(async=True)
ys = Variable(ys)
return xs, ys, valid_inds
def batch_act(self, observations):
batchsize = len(observations)
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
xs, ys, valid_inds = self.batchify(observations)
if len(xs) == 0:
return batch_reply
# Either train or predict
if ys is not None:
predictions = self.update(xs, ys)
else:
predictions = self.predict(xs)
for i in range(len(predictions)):
batch_reply[valid_inds[i]]['text'] = ' '.join(
c for c in predictions[i] if c != self.EOS)
return batch_reply
def act(self):
return self.batch_act([self.observation])[0]
def save(self, path):
model = {}
model['lt'] = self.lt.state_dict()
model['encoder'] = self.encoder.state_dict()
model['decoder'] = self.decoder.state_dict()
model['d2o'] = self.d2o.state_dict()
model['longest_label'] = self.longest_label
with open(path, 'wb') as write:
torch.save(model, write)
def load(self, path):
with open(path, 'rb') as read:
model = torch.load(read)
self.lt.load_state_dict(model['lt'])
self.encoder.load_state_dict(model['encoder'])
self.decoder.load_state_dict(model['decoder'])
self.d2o.load_state_dict(model['d2o'])
self.longest_label = model['longest_label']
| calee88/ParlAI | parlai/agents/rnn_baselines/seq2seq.py | Python | bsd-3-clause | 10,385 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
from datatank_py.DTStructuredGrid2D import DTStructuredGrid2D, _squeeze2d
import numpy as np
class DTStructuredMesh2D(object):
"""2D structured mesh object.
This class corresponds to DataTank's DTStructuredMesh2D.
"""
dt_type = ("2D Structured Mesh",)
"""Type strings allowed by DataTank"""
def __init__(self, values, grid=None):
"""
:param values: 2D array of values
:param grid: DTStructuredGrid2D object (defaults to unit grid) or the name of a previously saved grid
Note that the values array must be ordered as (y, x) for compatibility
with the grid and DataTank.
"""
super(DTStructuredMesh2D, self).__init__()
values = _squeeze2d(values)
shape = np.shape(values)
assert len(shape) == 2, "values array must be 2D"
if isinstance(grid, basestring) == False:
if grid == None:
grid = DTStructuredGrid2D(range(shape[1]), range(shape[0]))
assert shape == grid.shape(), "grid shape %s != value shape %s" % (grid.shape(), shape)
self._grid = grid
self._values = values
def grid(self):
""":returns: a :class:`datatank_py.DTStructuredGrid2D.DTStructuredGrid2D` instance"""
return self._grid
def values(self):
""":returns: a 2D numpy array of values at each grid node"""
return self._values
def __dt_type__(self):
return "2D Structured Mesh"
def __str__(self):
return self.__dt_type__() + ":\n " + str(self._grid) + "\n" + " Values:\n " + str(self._values)
def __dt_write__(self, datafile, name):
datafile.write_anonymous(self._grid, name)
datafile.write_anonymous(self._values, name + "_V")
def write_with_shared_grid(self, datafile, name, grid_name, time, time_index):
"""Allows saving a single grid and sharing it amongst different time
values of a variable.
:param datafile: a :class:`datatank_py.DTDataFile.DTDataFile` open for writing
:param name: the mesh variable's name
:param grid_name: the grid name to be shared (will not be visible in DataTank)
:param time: the time value for this step (DataTank's ``t`` variable)
:param time_index: the corresponding integer index of this time step
This is an advanced technique, but it can give a significant space savings in
a data file. It's not widely implemented, since it's not clear yet if this
is the best API.
"""
if grid_name not in datafile:
datafile.write_anonymous(self._grid, grid_name)
datafile.write_anonymous(self.__dt_type__(), "Seq_" + name)
varname = "%s_%d" % (name, time_index)
datafile.write_anonymous(grid_name, varname)
datafile.write_anonymous(self._values, varname + "_V")
datafile.write_anonymous(np.array((time,)), varname + "_time")
@classmethod
def from_data_file(self, datafile, name):
grid = DTStructuredGrid2D.from_data_file(datafile, name)
values = datafile[name + "_V"]
return DTStructuredMesh2D(values, grid=grid)
if __name__ == '__main__':
from DTDataFile import DTDataFile
with DTDataFile("test/structured_mesh2D.dtbin", truncate=True) as df:
xvals = np.exp(np.array(range(18), dtype=np.float) / 5)
yvals = np.exp(np.array(range(20), dtype=np.float) / 5)
grid = DTStructuredGrid2D(xvals, yvals)
values = np.zeros(len(xvals) * len(yvals))
for i in xrange(len(values)):
values[i] = i
# DataTank indexes differently from numpy; the grid is z,y,x ordered
values = values.reshape(grid.shape())
mesh = DTStructuredMesh2D(values, grid=grid)
df["2D mesh"] = mesh
| amaxwell/datatank_py | datatank_py/DTStructuredMesh2D.py | Python | bsd-3-clause | 4,176 |
#: one
x = 1
print(x)
import time
time.sleep(10)
#: two
#x = 2
print(x)
| lemon24/intercessor | examples/book.py | Python | bsd-3-clause | 78 |
import errno
import os
import shutil
def mkdir(path, mode=0o777, exist_ok=False):
try:
os.mkdir(path, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def makedirs(path, mode=0o777, exist_ok=False):
try:
os.makedirs(path, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def parent_dir(path):
return os.path.normpath(os.path.join(path, os.pardir))
def existing_parent(path):
while not os.path.exists(path):
path = parent_dir(path)
return path
def remove(path, nonexist_ok=False):
try:
os.remove(path)
except OSError as e:
if not nonexist_ok or e.errno != errno.ENOENT:
raise
def copy(src, dst, recursive=False, symlink='relative', mode=None):
if symlink != 'never' and os.path.islink(src):
link = os.readlink(src)
if symlink == 'always' or not os.path.isabs(link):
remove(dst, nonexist_ok=True)
os.symlink(link, dst)
return
if os.path.isdir(src):
mkdir(dst, exist_ok=True)
if recursive:
for name in os.listdir(src):
copy(os.path.join(src, name), os.path.join(dst, name))
else:
shutil.copyfile(src, dst)
if mode is not None:
os.chmod(dst, mode)
else:
shutil.copymode(src, dst)
| jimporter/doppel | doppel/__init__.py | Python | bsd-3-clause | 1,483 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.core.urlresolvers import reverse
from vkontakte_api.admin import VkontakteModelAdmin
from .models import Album, Video
class VideoInline(admin.TabularInline):
def image(self, instance):
return '<img src="%s" />' % (instance.photo_130,)
image.short_description = 'video'
image.allow_tags = True
model = Video
fields = ('title', 'image', 'owner', 'comments_count', 'views_count')
readonly_fields = fields
extra = False
can_delete = False
class AlbumAdmin(VkontakteModelAdmin):
def image_preview(self, obj):
return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_160, obj.photo_160)
image_preview.short_description = u'Картинка'
image_preview.allow_tags = True
list_display = ('image_preview', 'remote_id', 'title', 'owner', 'videos_count')
list_display_links = ('title', 'remote_id',)
search_fields = ('title', 'description')
inlines = [VideoInline]
class VideoAdmin(VkontakteModelAdmin):
def image_preview(self, obj):
return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_130, obj.photo_130)
image_preview.short_description = u'Картинка'
image_preview.allow_tags = True
list_display = ('image_preview', 'remote_id', 'owner', 'album', 'title', 'comments_count', 'views_count', 'date')
list_display_links = ('remote_id', 'title')
list_filter = ('album',)
admin.site.register(Album, AlbumAdmin)
admin.site.register(Video, VideoAdmin)
| ramusus/django-vkontakte-video | vkontakte_video/admin.py | Python | bsd-3-clause | 1,566 |
"""
This scripts compares the autocorrelation in statsmodels with
the one that you can build using only correlate.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import statsmodels.api as sm
from signals.time_series_class import MixAr, AR
from signals.aux_functions import sidekick
plot = False
plot2 = True
# Time parameters
dt = 0.1
Tmax = 100
# Let's get the axuiliary class
amplitude = 1
w1 = 1
w2 = 5
beta = sidekick(w1, w2, dt, Tmax, amplitude)
# First we need the phi's vector
phi0 = 0.0
phi1 = -0.8
phi2 = 0.3
phi = np.array((phi0, phi1, phi2))
# Now we need the initial conditions
x0 = 1
x1 = 1
x2 = 0
initial_conditions = np.array((x0, x1, x2))
# First we construct the series without the sidekick
B = AR(phi, dt=dt, Tmax=Tmax)
B.initial_conditions(initial_conditions)
normal_series = B.construct_series()
# Second we construct the series with the mix
A = MixAr(phi, dt=dt, Tmax=Tmax, beta=beta)
A.initial_conditions(initial_conditions)
mix_series = A.construct_series()
time = A.time
if plot:
plt.subplot(3, 1, 1)
plt.plot(time, beta)
plt.subplot(3, 1, 2)
plt.plot(time, normal_series)
plt.subplot(3, 1, 3)
plt.plot(time, mix_series)
plt.show()
# Let's calculate the auto correlation
nlags = 40
normal_series -= normal_series.mean()
var = np.var(normal_series)
n = len(normal_series)
nlags1 = nlags
normalizing = np.arange(n, n - nlags1, -1)
auto_correlation1 = np.correlate(normal_series, normal_series, mode='full')
aux = auto_correlation1.size/2
auto_correlation1 = auto_correlation1[aux:aux + nlags1] / (normalizing * var)
auto_correlation2 = sm.tsa.stattools.acf(normal_series, nlags=nlags)
print 'result', np.sum(auto_correlation1 - auto_correlation2)
if plot2:
plt.subplot(2, 1, 1)
plt.plot(auto_correlation1)
plt.subplot(2, 1, 2)
plt.plot(auto_correlation2)
plt.show()
| h-mayorquin/time_series_basic | examples/auto_correlations_compare.py | Python | bsd-3-clause | 1,897 |
import os.path
from flask import url_for
from npactflask import app
# TODO: I think this is more simply a template_global:
# http://flask.pocoo.org/docs/0.10/api/#flask.Flask.template_global
@app.context_processor
def vSTATIC():
def STATICV(filename):
if app.config['DEBUG']:
vnum = os.path.getmtime(os.path.join(app.static_folder, filename))
else:
vnum = app.config['VERSION']
return (url_for('static', filename=filename, vnum=vnum))
return dict(vSTATIC=STATICV)
| victor-lin/npact | npactflask/npactflask/helpers.py | Python | bsd-3-clause | 525 |
from toyz.web import app
from toyz.web import tasks | fred3m/toyz | toyz/web/__init__.py | Python | bsd-3-clause | 51 |
from StringIO import StringIO
from django.test import TestCase
from django.test.client import Client
from corehq.apps.app_manager.models import Application, APP_V1, Module
from corehq.apps.app_manager.success_message import SuccessMessage
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser
from datetime import datetime, timedelta
from dimagi.utils.parsing import json_format_datetime
from receiver.xml import get_simple_response_xml, ResponseNature
submission_template = """<?xml version='1.0' ?>
<data xmlns="%(xmlns)s">
<meta>
<username>%(username)s</username>
<userID>%(userID)s</userID>
</meta>
</data>
"""
class SuccessMessageTest(TestCase):
message = "Thanks $first_name ($name)! You have submitted $today forms today and $week forms since Monday."
domain = "test"
username = "danny"
first_name = "Danny"
last_name = "Roberts"
password = "123"
xmlns = "http://dimagi.com/does_not_matter"
tz = timedelta(hours=0)
def setUp(self):
create_domain(self.domain)
couch_user = CommCareUser.create(self.domain, self.username, self.password)
userID = couch_user.user_id
couch_user.first_name = self.first_name
couch_user.last_name = self.last_name
couch_user.save()
self.sm = SuccessMessage(self.message, userID, tz=self.tz)
c = Client()
app = Application.new_app(self.domain, "Test App", application_version=APP_V1)
app.add_module(Module.new_module("Test Module", "en"))
form = app.new_form(0, "Test Form", "en")
form.xmlns = self.xmlns
app.success_message = {"en": self.message}
app.save()
def fake_form_submission(userID=userID, username=self.username, xmlns=self.xmlns, time=None):
submission = submission_template % {
"userID": userID,
"username": username,
"xmlns": xmlns
}
f = StringIO(submission.encode('utf-8'))
f.name = "tempfile.xml"
kwargs = dict(HTTP_X_SUBMIT_TIME=json_format_datetime(time)) if time else {}
response = c.post("/a/{self.domain}/receiver/".format(self=self), {
'xml_submission_file': f,
}, **kwargs)
return response
self.num_forms_today = 0
self.num_forms_this_week = 0
now = datetime.utcnow()
tznow = now + self.tz
week_start = tznow - timedelta(days=tznow.weekday())
week_start = datetime(week_start.year, week_start.month, week_start.day) - self.tz
day_start = datetime(tznow.year, tznow.month, tznow.day) - self.tz
spacing = 6
for h in xrange((24/spacing)*8):
time = now-timedelta(hours=spacing*h)
response = fake_form_submission(time=time)
if time > week_start:
self.num_forms_this_week += 1
if time > day_start:
self.num_forms_today += 1
self.assertEqual(
response.content,
get_simple_response_xml(("Thanks {self.first_name} ({self.first_name} {self.last_name})! "
"You have submitted {self.num_forms_today} forms today "
"and {self.num_forms_this_week} forms since Monday.").format(self=self),
nature=ResponseNature.SUBMIT_SUCCESS)
)
def testRender(self):
self.assertEqual(
self.sm.render(),
("Thanks {self.first_name} ({self.first_name} {self.last_name})! "
"You have submitted {self.num_forms_today} forms today "
"and {self.num_forms_this_week} forms since Monday.").format(self=self)
)
| gmimano/commcaretest | corehq/apps/app_manager/tests/test_success_message.py | Python | bsd-3-clause | 3,754 |
from django import forms
from oldcontrib.media.document.models import Document
class DocumentUpload(forms.ModelForm):
class Meta:
model = Document
fields = ('document',) | servee/django-servee-oldcontrib | oldcontrib/media/document/forms.py | Python | bsd-3-clause | 190 |
#!/usr/bin/env python
from setuptools import setup, Extension
setup(
name = "python-libmemcached",
version = "0.17.0",
description="python memcached client wrapped on libmemcached",
maintainer="subdragon",
maintainer_email="[email protected]",
requires = ['pyrex'],
# This assumes that libmemcache is installed with base /usr/local
ext_modules=[Extension('cmemcached', ['cmemcached.pyx'],
libraries=['memcached'],
)],
test_suite="cmemcached_test",
)
| k0001/python-libmemcached | setup.py | Python | bsd-3-clause | 517 |
#
# init_lib.py
#
# functions for initialization
#
from aws_lib import SpinupError
import base64
from boto import vpc, ec2
from os import environ
from pprint import pprint
import re
import sys
import time
from yaml_lib import yaml_attr
def read_user_data( fn ):
"""
Given a filename, returns the file's contents in a string.
"""
r = ''
with open( fn ) as fh:
r = fh.read()
fh.close()
return r
def get_tags( ec, r_id ):
"""
Takes EC2Connection object and resource ID. Returns tags associated
with that resource.
"""
return ec.get_all_tags(filters={ "resource-id": r_id })
def get_tag( ec, obj, tag ):
"""
Get the value of a tag associated with the given resource object.
Returns None if the tag is not set. Warning: EC2 tags are case-sensitive.
"""
tags = get_tags( ec, obj.id )
found = 0
for t in tags:
if t.name == tag:
found = 1
break
if found:
return t
else:
return None
def update_tag( obj, tag, val ):
"""
Given an EC2 resource object, a tag and a value, updates the given tag
to val.
"""
for x in range(0, 5):
error = False
try:
obj.add_tag( tag, val )
except:
error = True
e = sys.exc_info()[0]
print "Huh, trying again ({})".format(e)
time.sleep(5)
if not error:
print "Object {} successfully tagged.".format(obj)
break
return None
def init_region( r ):
"""
Takes a region string. Connects to that region. Returns EC2Connection
and VPCConnection objects in a tuple.
"""
# connect to region
c = vpc.connect_to_region( r )
ec = ec2.connect_to_region( r )
return ( c, ec )
def init_vpc( c, cidr ):
"""
Takes VPCConnection object (which is actually a connection to a
particular region) and a CIDR block string. Looks for our VPC in that
region. Returns the boto.vpc.vpc.VPC object corresponding to our VPC.
See:
http://boto.readthedocs.org/en/latest/ref/vpc.html#boto.vpc.vpc.VPC
"""
# look for our VPC
all_vpcs = c.get_all_vpcs()
found = 0
our_vpc = None
for v in all_vpcs:
if v.cidr_block == cidr:
our_vpc = v
found = 1
break
if not found:
raise SpinupError( "VPC {} not found".format(cidr) )
return our_vpc
def init_subnet( c, vpc_id, cidr ):
"""
Takes VPCConnection object, which is actually a connection to a
region, and a CIDR block string. Looks for our subnet in that region.
If subnet does not exist, creates it. Returns the subnet resource
object on success, raises exception on failure.
"""
# look for our VPC
all_subnets = c.get_all_subnets()
found = False
our_subnet = None
for s in all_subnets:
if s.cidr_block == cidr:
#print "Found subnet {}".format(cidr)
our_subnet = s
found = True
break
if not found:
our_subnet = c.create_subnet( vpc_id, cidr )
return our_subnet
def set_subnet_map_public_ip( ec, subnet_id ):
"""
Takes ECConnection object and SubnetId string. Attempts to set the
MapPublicIpOnLaunch attribute to True.
FIXME: give credit to source
"""
orig_api_version = ec.APIVersion
ec.APIVersion = '2014-06-15'
ec.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec.APIVersion = orig_api_version
return None
def derive_ip_address( cidr_block, delegate, final8 ):
"""
Given a CIDR block string, a delegate number, and an integer
representing the final 8 bits of the IP address, construct and return
the IP address derived from this values. For example, if cidr_block is
10.0.0.0/16, the delegate number is 10, and the final8 is 8, the
derived IP address will be 10.0.10.8.
"""
result = ''
match = re.match( r'\d+\.\d+', cidr_block )
if match:
result = '{}.{}.{}'.format( match.group(0), delegate, final8 )
else:
raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) )
return result
def get_master_instance( ec2_conn, subnet_id ):
"""
Given EC2Connection object and Master Subnet id, check that there is
just one instance running in that subnet - this is the Master. Raise
exception if the number of instances is != 0.
Return the Master instance object.
"""
instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } )
if 1 > len(instances):
raise SpinupError( "There are no instances in the master subnet" )
if 1 < len(instances):
raise SpinupError( "There are too many instances in the master subnet" )
return instances[0]
def template_token_subst( buf, key, val ):
"""
Given a string (buf), a key (e.g. '@@MASTER_IP@@') and val, replace all
occurrences of key in buf with val. Return the new string.
"""
targetre = re.compile( re.escape( key ) )
return re.sub( targetre, str(val), buf )
def process_user_data( fn, vars = [] ):
"""
Given filename of user-data file and a list of environment
variable names, replaces @@...@@ tokens with the values of the
environment variables. Returns the user-data string on success
raises exception on failure.
"""
# Get user_data string.
buf = read_user_data( fn )
for e in vars:
if not e in environ:
raise SpinupError( "Missing environment variable {}!".format( e ) )
buf = template_token_subst( buf, '@@'+e+'@@', environ[e] )
return buf
def count_instances_in_subnet( ec, subnet_id ):
"""
Given EC2Connection object and subnet ID, count number of instances
in that subnet and return it.
"""
instance_list = ec.get_only_instances(
filters={ "subnet-id": subnet_id }
)
return len(instance_list)
def make_reservation( ec, ami_id, **kwargs ):
"""
Given EC2Connection object, delegate number, AMI ID, as well as
all the kwargs referred to below, make a reservation for an instance
and return the registration object.
"""
# extract arguments to be passed to ec.run_instances()
our_kwargs = {
"key_name": kwargs['key_name'],
"subnet_id": kwargs['subnet_id'],
"instance_type": kwargs['instance_type'],
"private_ip_address": kwargs['private_ip_address']
}
# Master or minion?
if kwargs['master']:
our_kwargs['user_data'] = kwargs['user_data']
else:
# perform token substitution in user-data string
u = kwargs['user_data']
u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] )
u = template_token_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] )
u = template_token_subst( u, '@@ROLE@@', kwargs['role'] )
u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] )
our_kwargs['user_data'] = u
# Make the reservation.
reservation = ec.run_instances( ami_id, **our_kwargs )
# Return the reservation object.
return reservation
def wait_for_running( ec2_conn, instance_id ):
"""
Given an instance id, wait for its state to change to "running".
"""
print "Waiting for {} running state".format( instance_id )
while True:
instances = ec2_conn.get_only_instances( instance_ids=[ instance_id ] )
print "Current state is {}".format( instances[0].state )
if instances[0].state != 'running':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
print "Waiting another 5 seconds for good measure"
time.sleep(5)
break
def wait_for_available( ec2_conn, volume_id ):
"""
Given a volume id, wait for its state to change to "available".
"""
print "Waiting for {} available state".format( volume_id )
while True:
volumes = ec2_conn.get_all_volumes( volume_ids=[ volume_id ] )
print "Current status is {}".format( volumes[0].status )
if volumes[0].status != 'available':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
break
def wait_for_detachment( ec2_conn, v_id, i_id ):
"""
Given a volume ID and an instance ID, wait for volume to
become detached.
"""
print "Waiting for volume {} to be detached from instnace {}".format(v_id, i_id)
while True:
attached_vol = ec2_conn.get_all_volumes(
filters={
"volume-id": v_id,
"attachment.instance-id": i_id,
"attachment.device": "/dev/sdb"
}
)
print "attached_vol == {}".format(attached_vol)
if attached_vol is None or len(attached_vol) == 0:
print "Detached!"
break
else:
time.sleep(5)
print "Still attached."
| smithfarm/ceph-auto-aws | susecon2015/init_lib.py | Python | bsd-3-clause | 9,277 |
#!/usr/bin/env python
import sys
import os
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join, exists
tmp_src = join("build", "src")
# Not covered by "setup.py clean --all", so explicit deletion required.
if exists(tmp_src):
dir_util.remove_tree(tmp_src)
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['isodate', 'pyparsing']
kwargs['tests_require'] = ['html5lib']
kwargs['requires'] = [
'isodate', 'pyparsing',
'SPARQLWrapper']
kwargs['src_root'] = setup_python3()
assert setup
else:
try:
from setuptools import setup
assert setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = [
'isodate',
'pyparsing', 'SPARQLWrapper']
if sys.version_info[1]<7: # Python 2.6
kwargs['install_requires'].append('ordereddict')
if sys.version_info[1]<6: # Python 2.5
kwargs['install_requires'].append('pyparsing<=1.5.7')
kwargs['install_requires'].append('simplejson')
kwargs['install_requires'].append('html5lib==0.95')
else:
kwargs['install_requires'].append('html5lib')
except ImportError:
from distutils.core import setup
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('rdflib/__init__.py')
packages = ['rdflib',
'rdflib/extras',
'rdflib/plugins',
'rdflib/plugins/parsers',
'rdflib/plugins/parsers/pyRdfa',
'rdflib/plugins/parsers/pyRdfa/transform',
'rdflib/plugins/parsers/pyRdfa/extras',
'rdflib/plugins/parsers/pyRdfa/host',
'rdflib/plugins/parsers/pyRdfa/rdfs',
'rdflib/plugins/parsers/pyMicrodata',
'rdflib/plugins/serializers',
'rdflib/plugins/sparql',
'rdflib/plugins/sparql/results',
'rdflib/plugins/stores',
'rdflib/tools'
]
if os.environ.get('READTHEDOCS', None):
# if building docs for RTD
# install examples, to get docstrings
packages.append("examples")
setup(
name='rdflib',
version=version,
description="RDFLib is a Python library for working with RDF, a " + \
"simple yet powerful language for representing information.",
author="Daniel 'eikeon' Krech",
author_email="[email protected]",
maintainer="RDFLib Team",
maintainer_email="[email protected]",
url="https://github.com/RDFLib/rdflib",
license="https://raw.github.com/RDFLib/rdflib/master/LICENSE",
platforms=["any"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description="""\
RDFLib is a Python library for working with
RDF, a simple yet powerful language for representing information.
The library contains parsers and serializers for RDF/XML, N3,
NTriples, Turtle, TriX, RDFa and Microdata . The library presents
a Graph interface which can be backed by any one of a number of
Store implementations. The core rdflib includes store
implementations for in memory storage, persistent storage on top
of the Berkeley DB, and a wrapper for remote SPARQL endpoints.
A SPARQL 1.1 engine is also included.
If you have recently reported a bug marked as fixed, or have a craving for
the very latest, you may want the development version instead:
easy_install https://github.com/RDFLib/rdflib/tarball/master
Read the docs at:
http://rdflib.readthedocs.org
""",
packages = packages,
entry_points = {
'console_scripts': [
'rdfpipe = rdflib.tools.rdfpipe:main',
'csv2rdf = rdflib.tools.csv2rdf:main',
'rdf2dot = rdflib.tools.rdf2dot:main',
'rdfs2dot = rdflib.tools.rdfs2dot:main',
'rdfgraphisomorphism = rdflib.tools.graphisomorphism:main',
],
},
**kwargs
)
| lrowe/rdflib | setup.py | Python | bsd-3-clause | 5,590 |
try:
from primitives import Mem
except ImportError:
from mem import Mem
import sys
if sys.version >= '3':
xrange = range
class MMU():
def __init__(self, mem, size=0):
""" Initialize MMU
"""
self._enabled = False
self._mem = mem
self._wordsize = 4
self._table = []
def isEnabled(self):
return self._enabled
def enable(self):
""" Enables MMU
"""
self._enabled = True
def disable(self):
""" Disables MMU
"""
self._enabled = False
def getEntries(self, entries, startpos=None):
""" Get page entries and parse them, handle recursively
>>> from primitives import Mem
>>> m = Mem(1024*1024)
>>> m.setData(0, 0x00000100, 4)
>>> m.setData(4, 0x00001100, 4)
>>> m.setData(8, 0x00002100, 4)
>>> m.setData(12, 0x00003100, 4)
>>> u = MMU(m)
>>> entries = [(0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': True, 'userspace': False, 'size': 4*1024}), 0),
... (32768, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': True, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 64}), 65536),
... (0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 4}), 131072)]
>>> u.getEntries(entries)
[(0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (4096, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 8192), (12288, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 12288), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 65536), (0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 131072)]
"""
if startpos is None:
startpos = 0
subs = []
for (addr, flags, pos) in entries:
if flags['subtable']:
size = flags['size'] * 1024 / 4
if flags['ok']:
tmp = self.readTable(addr, size, pos)
entries = self.getEntries(tmp, startpos)
subs += entries
startpos += flags['size'] * 1024
else:
if flags['ok']:
subs.append((addr, flags, pos))
return subs
def initialize(self, tablepos, tablesize):
""" Initializes MMU with a initial page
Does recursive parsing
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(22, 0x00001111, 4)
>>> # Page, virtual start at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> # Page, virtual start at 98k, size 4k
>>> m.setData(18, 0x00018100, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> # Page at 8k, size 4k
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at 12k, size 1M
>>> m.setData(0x1004, 0x00003120, 4)
>>> u.initialize(14, 3)
[(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 0), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 65536), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 69632), (12288, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 73728)]
"""
entries = self.readTable(tablepos, tablesize)
self._table = self.getEntries(entries)
return self._table
def diffTime(self, a, b):
d = a - b
print (d.seconds*1000*1000 + d.microseconds)
def readTable(self, tablepos, tablesize, pos=None):
""" Reads table from memory
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(10, 0x00001111, 4)
>>> # Page, starts at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> tmp = u.readTable(10, 3)
>>> tmp[0][0]
4096
>>> tmp[1][0]
32768
>>> tmp[0][1]
execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False
>>> tmp[1][1]
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> tmp[0]
(4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0)
>>> tmp[1]
(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 67108864)
"""
import datetime
datas = []
if pos is None:
pos = 0
virtpos = tablepos
cnt = 0
# Optimized reading in blocks instead of one byte at time
block = self._mem.getBlock(tablepos, tablesize * 4)
oldtmp = 0
items = 0
preindex = 0
for (bpos, data) in block:
if data is None:
continue
if preindex > 0:
# Do we have old data from previous block?
if preindex == 1:
oldtmp += (data[0] << 24)
if preindex == 2:
oldtmp += (data[0] << 16) + (data[1] << 24)
if preindex == 3:
oldtmp += (data[0] << 8) + (data[1] << 16) + (data[2] << 24)
(ok, pos, res) = self.readEntry(oldtmp, pos)
if ok:
datas.append(res)
tablepos = preindex
datalen = len(data)
l = int(datalen / 4 - 1)
index = tablepos % 0x1000
for item in xrange(l):
tmp = data[index] + (data[index+1] << 8) + (data[index+2] << 16) + (data[index+3] << 24)
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
index += 4
items += 4
if index > datalen - 4:
miss = datalen - index
preindex = 0
# Check if we didn't read all the data...
if miss > 0:
oldtmp = data[index]
if miss > 1:
oldtmp += (data[index+1] << 8)
if miss > 2:
oldtmp += (data[index+2] << 16)
preindex = 4 - miss
break
if items > (tablesize + tablepos):
break
return datas
"""
for index in xrange(tablesize):
tmp = self._mem.getData(virtpos, self._wordsize)
virtpos += self._wordsize
if tmp > 0:
print tmp
cnt += 1
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
return datas
"""
def readEntry(self, data, pos=0):
""" Read entry from one page table item data
>>> m = Mem()
>>> u = MMU(m)
>>> u.readEntry(0x00000000)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001000)
(True, 4096, (4096, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001111)
(True, 67108864, (4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0))
>>> u.readEntry(0x00001022)
(True, 1048576, (4096, execute=True,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00002FFF)
(True, 68719476736, (8192, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0xFFFFFFFF)
(True, 68719476736, (4294963200, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
"""
if data > 0:
flags = MMU.Flags(data & 0xFFF)
vaddr = data & 0xFFFFF000
ok = True
else:
vaddr = 0
flags = MMU.Flags(data & 0xFFF)
ok = False
return (ok, pos, (vaddr, flags, pos))
return (ok, pos + flags['size'] * 1024, (vaddr, flags, pos))
def getRange(self, item):
addr = item[0]
flags = item[1]
pos = item[2]
endaddr = addr + (flags['size'] * 1024)
return (addr, endaddr, pos)
def virtToPhys(self, pos):
""" Converts virtual memory location to physical
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual at 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual at 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 5)
>>> u.virtToPhys(0x8000) == (0x2000)
True
>>> u.virtToPhys(0x8000)
8192
>>> u.virtToPhys(0x8001)
8193
>>> u.virtToPhys(0x2000)
73728
>>> u.virtToPhys(0x2000) == (0x2000 + 0x10000)
True
>>> u.virtToPhys(0x2010) == (0x2000 + 0x10000 + 0x10)
True
>>> u.virtToPhys(0x2FFF) == (0x2000 + 0x10000 + 0xFFF)
True
>>> u.virtToPhys(0x18000) == 0x1000
True
>>> u.virtToPhys(0x19000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.virtToPhys(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.virtToPhys(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.virtToPhys(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
index = (pos - a)
phys = c + index
return phys
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def getPageFlags(self, pos):
""" Get flags at position
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> u.enable()
>>> u.getPageFlags(0x8000)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x8001)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18000)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18010)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x19000)
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.getPageFlags(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.getPageFlags(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.getPageFlags(0x18fff)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
if not self._enabled:
return None
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
return item[1]
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def setData(self, pos, data, size=4):
""" Set data, if MMU enabled, solve physical locations first
>>> from primitives import Mem
>>> m = Mem(1024*1024*5)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M (0x1000)
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 phys
>>> m.getData(0x8000, 1)
56
>>> # Get memory contents at 0x2000 phys
>>> m.getData(0x2000, 1)
42
"""
if self._enabled:
self._mem.setData(self.virtToPhys(pos), data, size)
else:
self._mem.setData(pos, data, size)
def getData(self, pos, size=1):
""" Get data, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> u.initialize(10, 4)
[(24576, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (1126400, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 8192), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 4198400), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4263936)]
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Paging is disabled, set data to phys 0x100
>>> u.setData(0x100, 12345, 4)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 virt
>>> u.getData(0x8000, 1)
42
>>> # Get memory contents at 0x100 phys, 0x6000+0x100 virt
>>> u.getData(0x6000 + 0x100, 4)
12345
"""
if self._enabled:
return self._mem.getData(self.virtToPhys(pos), size)
else:
return self._mem.getData(pos, size)
def setRaw(self, pos, data):
""" Set one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
"""
if self._enabled:
self._mem.setRaw(self.virtToPhys(pos), data)
else:
self._mem.setRaw(pos, data)
def getRaw(self, pos):
""" Get one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
>>> u.getRaw(0x6001)
123
>>> u.getRaw(0x6000)
0
"""
if self._enabled:
return self._mem.getRaw(self.virtToPhys(pos))
else:
return self._mem.getRaw(pos)
class Flags:
def __init__(self, flags=0, solved=None):
""" Initialize flags
"""
self._flags = flags
if solved is None:
self._data = self.solveFlags(flags)
else:
self._data = solved
def solveFlags(self, flags):
""" Solve flags from given number data
>>> f = MMU.Flags()
>>> r = f.solveFlags(0x1)
>>> f
execute=False,ok=False,size=4096,size1=False,size2=False,subtable=True,userspace=False,write=False
>>> r = f.solveFlags(0x2)
>>> f
execute=True,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x4)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=True
>>> r = f.solveFlags(0x8)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=True,write=False
>>> r = f.solveFlags(0x10)
>>> f
execute=False,ok=False,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x20)
>>> f
execute=False,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x30)
>>> f
execute=False,ok=False,size=65536,size1=True,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x40)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0xFF)
>>> f
execute=True,ok=False,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
>>> r = f.solveFlags(0x1FF)
>>> f
execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
"""
data = {
'subtable': False,
'execute': False,
'write': False,
'userspace': False,
'size': 0,
'size1': False,
'size2': False,
'ok': False,
}
#'size3': False,
if flags & 0x1 == 0x1:
data['subtable'] = True
if flags & 0x2 == 0x2:
data['execute'] = True
if flags & 0x4 == 0x4:
data['write'] = True
if flags & 0x8 == 0x8:
data['userspace'] = True
if flags & 0x10 == 0x10:
data['size1'] = True
if flags & 0x20 == 0x20:
data['size2'] = True
if flags & 0x100 == 0x100:
data['ok'] = True
# Determine page size in kilobytes
if not data['size1'] and not data['size2']:
data['size'] = 4
elif data['size1'] and not data['size2']:
data['size'] = 64
elif not data['size1'] and data['size2']:
data['size'] = 1024
elif data['size1'] and data['size2']:
data['size'] = 1024 * 64
# For subtables multiply by 1024
if data['subtable']:
data['size'] *= 1024
self._data = data
return data
def isSet(self, name):
""" Checks whether element is set, or get value
>>> f = MMU.Flags(0x1F)
>>> f.isSet('size')
65536
>>> f.isSet('size1')
True
>>> f.isSet('size2')
False
>>> f.isSet('subtable')
True
"""
if not name in self._data:
return False
return self._data[name]
def __getitem__(self, name):
if not name in self._data:
return None
return self._data[name]
def dump(self):
""" Dumps the flag status
"""
return self._data
def __repr__(self):
""" Get string representation of the flags
"""
#return "%s" % self.dump()
a = self._data.keys()
res = ''
for k in sorted(a):
if res:
res += ','
res += '%s=%s' % (k, self._data[k])
return res
"""
MMU
Initial table
if __name__ == "__main__":
import doctest
doctest.run_docstring_examples(MMU.initialize, globals())
"""
| jroivas/cpus | primitives/mmu.py | Python | bsd-3-clause | 23,832 |
"""
pyfire.contact
~~~~~~~~~~
Handles Contact ("roster item") interpretation as per RFC-6121
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from sqlalchemy import Table, Column, Boolean, Integer, String, Enum, ForeignKey
from sqlalchemy.orm import relationship, backref
from pyfire.jid import JID
from pyfire.storage import Base, JIDString
contacts_groups = Table('contacts_groups', Base.metadata,
Column('contact_id', Integer, ForeignKey('contacts.id')),
Column('group_id', Integer, ForeignKey('groups.id'))
)
class Roster(Base):
"""List of contacts for a given jid"""
__tablename__ = 'rosters'
id = Column(Integer, primary_key=True)
jid = Column(JIDString, nullable=False)
def __init__(self, jid):
self.jid = JID(jid)
class Group(Base):
"""Simple group, only providing a name for now"""
__tablename__ = 'groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
class Contact(Base):
"""Jabber Contact, aka roster item. It has some really strict attribute
setting mechanism as it leads to all kinds of fantastic crashes with
clients which should be avoided in any case.
"""
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
approved = Column(Boolean)
ask = Column(Enum('subscribe'))
jid = Column(JIDString, nullable=False)
name = Column(String(255))
subscription = Column(Enum("none", "from", "to", "remove", "both"))
groups = relationship(Group, secondary=contacts_groups)
roster = relationship(Roster, backref=backref('contacts'))
roster_id = Column(Integer, ForeignKey('rosters.id'), nullable=False)
def __init__(self, jid, **kwds):
super(Contact, self).__init__()
# required
if isinstance(jid, basestring):
self.jid = JID(jid)
elif isinstance(jid, JID):
self.jid = jid
self.jid.validate(raise_error=True)
else:
raise AttributeError("Needs valid jid either as string or JID instance")
# optional
self.approved = False
self.ask = None
self.name = None
self.subscription = "none"
self.groups = []
for k, v in kwds.iteritems():
setattr(self, k, v)
def to_element(self):
"""Formats contact as `class`:ET.Element object"""
element = ET.Element("item")
if self.approved is not None:
element.set("approved", 'true' if self.approved else 'false')
if self.ask is not None:
element.set("ask", self.ask)
element.set("jid", str(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
for group in self.groups:
group_element = ET.SubElement(element, "group")
group_element.text = group
return element
@staticmethod
def from_element(element):
"""Creates contact instance from `class`:ET.Element"""
if element.tag != "item":
raise ValueError("Invalid element with tag %s" % element.tag)
cont = Contact(element.get('jid'))
cont.ask = element.get('ask')
cont.subscription = element.get('subscription')
approved = element.get('approved')
if approved == 'true':
cont.approved = True
elif approved == 'false':
cont.approved = False
else:
cont.approved = approved
for group in list(element):
if group.tag == "group":
cont.groups.append(group.text)
return cont
| IgnitedAndExploded/pyfire | pyfire/contact.py | Python | bsd-3-clause | 3,814 |
# -*- coding: utf-8 -*-
#
# example_project documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 19 10:27:46 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'example_project'
copyright = u'%d, Lincoln Loop' % datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'example_projectdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'example_project.tex', u'example_project Documentation',
u'Lincoln Loop', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| bkonkle/chef-cookbooks | example_project/docs/conf.py | Python | bsd-3-clause | 6,394 |
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
import sys
from splinter import Browser
from .base import BaseBrowserTests
from .fake_webapp import EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
@unittest.skipIf(
sys.version_info[0] > 2,
"zope.testbrowser is not currently compatible with Python 3",
)
class ZopeTestBrowserDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@classmethod
def setUpClass(cls):
cls.browser = Browser("zope.testbrowser", wait_time=0.1)
def setUp(self):
self.browser.visit(EXAMPLE_APP)
@classmethod
def tearDownClass(self):
self.browser.quit()
def test_should_support_with_statement(self):
with Browser("zope.testbrowser"):
pass
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path).read().encode("utf-8"), html)
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("zope.testbrowser")
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqual(EXAMPLE_APP, browser.url)
browser.quit()
def test_cant_switch_to_frame(self):
"zope.testbrowser should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("zope.testbrowser doesn't support frames.", e.args[0])
def test_simple_type(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_can_clear_password_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_slowly_typing(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
zope.testbrowser won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"zope.testbrowser should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"zope.testbrowser should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.iteritems():
link = self.browser.find_link_by_text(text)
self.assertEqual(key, link["id"])
| bmcculley/splinter | tests/test_zopetestbrowser.py | Python | bsd-3-clause | 5,355 |
#-*- coding: utf-8 -*-
from decimal import Decimal
from shop.cart.cart_modifiers_base import BaseCartModifier
class TextOptionsOptionsCartModifier(BaseCartModifier):
'''
This modifier adds an extra field to the cart to let the lineitem "know"
about product options and their respective price.
'''
def process_cart_item(self, cart_item, state):
'''
This adds a list of price modifiers depending on the product options
the client selected for the current cart_item (if any)
'''
# process text_options as passed through the variation object
if cart_item.variation.has_key('text_options'):
for value in cart_item.variation['text_options'].itervalues():
label = value['name'] + ': ' + value['text']
price = Decimal(value['price']) * len(value['text']) * cart_item.quantity
# Don't forget to update the running total!
cart_item.current_total += price
cart_item.extra_price_fields.append((label, price))
return cart_item
| jrief/django-shop-productvariations | shop_textoptions/cart_modifier.py | Python | bsd-3-clause | 1,084 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('My Name', '[email protected]'),
)
MANAGERS = ADMINS
import tempfile, os
from django import contrib
tempdata = tempfile.mkdtemp()
approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
adminroot = os.path.join(contrib.__path__[0], 'admin')
DATABASES = {
'default': {
'NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
}
}
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = os.path.join(approot, 'static')
MEDIA_URL = '/face/'
STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0]
STATIC_URL = '/staticfiles/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
ROOT_URLCONF = 'signalqueue.settings.urlconf'
TEMPLATE_DIRS = (
os.path.join(approot, 'templates'),
os.path.join(adminroot, 'templates'),
os.path.join(adminroot, 'templates', 'admin'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
#"django.core.context_processors.i18n", this is AMERICA
"django.core.context_processors.media",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_nose',
'djcelery',
'delegate',
'signalqueue',
)
LOGGING = dict(
version=1,
disable_existing_loggers=False,
formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, },
handlers={
'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', },
'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', },
},
loggers={
'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
},
root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
)
SQ_QUEUES = {
'default': { # you need at least one dict named 'default' in SQ_QUEUES
'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'listqueue': {
'ENGINE': 'signalqueue.worker.backends.RedisQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'db': {
'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(app_label='signalqueue',
modl_name='EnqueuedSignal'),
},
'celery': {
'ENGINE': 'signalqueue.worker.celeryqueue.CeleryQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(celery_queue_name='inactive',
transport='redis', port=8356),
},
}
SQ_ADDITIONAL_SIGNALS=['signalqueue.tests']
SQ_WORKER_PORT = 11201
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
try:
from kombu import Queue
except ImportError:
pass
else:
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_QUEUES = (
Queue('default', routing_key='default.#'),
Queue('yodogg', routing_key='yodogg.#'),
)
CELERY_ALWAYS_EAGER = True
BROKER_URL = 'redis://localhost:8356/0'
BROKER_HOST = "localhost"
BROKER_BACKEND = "redis"
REDIS_PORT = 8356
REDIS_HOST = "localhost"
BROKER_USER = ""
BROKER_PASSWORD = ""
BROKER_VHOST = "0"
REDIS_DB = 0
REDIS_CONNECT_RETRY = True
CELERY_SEND_EVENTS = True
CELERY_RESULT_BACKEND = "redis://localhost:8356/0"
CELERY_TASK_RESULT_EXPIRES = 10
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
try:
import djcelery
except ImportError:
pass
else:
djcelery.setup_loader()
# package path-extension snippet.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| fish2000/django-signalqueue | signalqueue/settings/__init__.py | Python | bsd-3-clause | 4,884 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='pfile-tools',
version='0.5.0',
author='Nathan Vack',
author_email='[email protected]',
license='BSD License',
url='https://github.com/njvack/pfile-tools',
packages=['pfile_tools'],
entry_points={
'console_scripts': [
'dump_pfile_header = pfile_tools.scripts.dump_pfile_header:main',
'anonymize_pfile = pfile_tools.scripts.anonymize_pfile:main'
]}
)
# setup(
# name='pfile-tools',
# version=pfile_tools.VERSION,
# packages=setuptools.find_packages(),
# data_files=[('', ['distribute_setup.py'])],
# license='BSD License',
# long_description=read('README'),
# url="https://github.com/njvack/pfile-tools",
# author="Nathan Vack",
# author_email="[email protected]",
# entry_points = {
# 'console_scripts': [
# 'dump_pfile_header = pfile_tools.scripts.dump_pfile_header:main',
# 'anonymize_pfile = pfile_tools.scripts.anonymize_pfile:main'
# ]
# }
# ) | njvack/pfile-tools | setup.py | Python | bsd-3-clause | 1,098 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.colors import named
from bokeh.palettes import __palettes__
# Module under test
import bokeh.core.enums as bce
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_Enumeration_default():
e = bce.Enumeration()
assert e.__slots__ == ()
class Test_enumeration(object):
def test_basic(self):
e = bce.enumeration("foo", "bar", "baz")
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_case(self):
e = bce.enumeration("foo", "bar", "baz", case_sensitive=False)
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "FOO", "bar", "bAr", "baz", "BAZ"]:
assert x in e
assert "junk" not in e
def test_default(self):
# this is private but used by properties
e = bce.enumeration("foo", "bar", "baz")
assert e._default == "foo"
def test_len(self):
e = bce.enumeration("foo", "bar", "baz")
assert len(e) == 3
class Test_bce(object):
def test_Anchor(self):
assert tuple(bce.Anchor) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_AngleUnits(self):
assert tuple(bce.AngleUnits) == ('deg', 'rad')
def test_ButtonType(self):
assert tuple(bce.ButtonType) == ("default", "primary", "success", "warning", "danger")
def test_DashPattern(self):
assert tuple(bce.DashPattern) ==("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_DateFormat(self):
assert tuple(bce.DateFormat) == ("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
def test_DatetimeUnits(self):
assert tuple(bce.DatetimeUnits) == ("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
def test_Dimension(self):
assert tuple(bce.Dimension) == ("width", "height")
def test_Dimensions(self):
assert tuple(bce.Dimensions) == ("width", "height", "both")
def test_Direction(self):
assert tuple(bce.Direction) == ("clock", "anticlock")
def test_FontStyle(self):
assert tuple(bce.FontStyle) == ('normal', 'italic', 'bold', 'bold italic')
def test_HoldPolicy(self):
assert tuple(bce.HoldPolicy) == ("combine", "collect")
def test_HorizontalLocation(self):
assert tuple(bce.HorizontalLocation) == ("left", "right")
def test_JitterRandomDistribution(self):
assert tuple(bce.JitterRandomDistribution) == ("uniform", "normal")
def test_LatLon(self):
assert tuple(bce.LatLon) == ("lat", "lon")
def test_LegendClickPolicy(self):
assert tuple(bce.LegendClickPolicy) == ("none", "hide", "mute")
def test_LegendLocation(self):
assert tuple(bce.LegendLocation) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_LineCap(self):
assert tuple(bce.LineCap) == ("butt", "round", "square")
def test_LineDash(self):
assert tuple(bce.LineDash) == ("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_LineJoin(self):
assert tuple(bce.LineJoin) == ("miter", "round", "bevel")
def test_Location(self):
assert tuple(bce.Location) == ("above", "below", "left", "right")
def test_MapType(self):
assert tuple(bce.MapType) == ("satellite", "roadmap", "terrain", "hybrid")
def test_MarkerType(self):
assert tuple(bce.MarkerType) == ("asterisk", "circle", "circle_cross", "circle_x", "cross",
"dash", "diamond", "diamond_cross", "hex", "inverted_triangle",
"square", "square_cross", "square_x", "triangle", "x")
def test_NamedColor(self):
assert len(tuple(bce.NamedColor)) == 147
assert tuple(bce.NamedColor) == tuple(named.__all__)
def test_NumeralLanguage(self):
assert tuple(bce.NumeralLanguage) == ("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
def test_Orientation(self):
assert tuple(bce.Orientation) == ("horizontal", "vertical")
def test_OutputBackend(self):
assert tuple(bce.OutputBackend) == ("canvas", "svg", "webgl")
def test_PaddingUnits(self):
assert tuple(bce.PaddingUnits) == ("percent", "absolute")
def test_Palette(self):
assert tuple(bce.Palette) == tuple(__palettes__)
def test_RenderLevel(self):
assert tuple(bce.RenderLevel) == ("image", "underlay", "glyph", "annotation", "overlay")
def test_RenderMode(self):
assert tuple(bce.RenderMode) == ("canvas", "css")
def test_RoundingFunction(self):
assert tuple(bce.RoundingFunction) == ("round", "nearest", "floor", "rounddown", "ceil", "roundup")
def test_SizingMode(self):
assert tuple(bce.SizingMode) == ("stretch_width", "stretch_height", "stretch_both", "scale_width", "scale_height", "scale_both", "fixed")
def test_SliderCallbackPolicy(self):
assert tuple(bce.SliderCallbackPolicy) == ("continuous", "throttle", "mouseup")
def test_SortDirection(self):
assert tuple(bce.SortDirection) == ("ascending", "descending")
def test_SpatialUnits(self):
assert tuple(bce.SpatialUnits) == ("screen", "data")
def test_StartEnd(self):
assert tuple(bce.StartEnd) == ("start", "end")
def test_StepMode(self):
assert tuple(bce.StepMode) == ("before", "after", "center")
def test_TextAlign(self):
assert tuple(bce.TextAlign) == ("left", "right", "center")
def test_TextBaseline(self):
assert tuple(bce.TextBaseline) == ("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
def test_TickLabelOrientation(self):
assert tuple(bce.TickLabelOrientation) == ("horizontal", "vertical", "parallel", "normal")
def test_TooltipAttachment(self):
assert tuple(bce.TooltipAttachment) == ("horizontal", "vertical", "left", "right", "above", "below")
def test_TooltipFieldFormatter(self):
assert tuple(bce.TooltipFieldFormatter) == ("numeral", "datetime", "printf")
def test_VerticalAlign(self):
assert tuple(bce.VerticalAlign) == ("top", "middle", "bottom")
def test_VerticalLocation(self):
assert tuple(bce.VerticalLocation) == ("above", "below")
# any changes to contents of bce.py easily trackable here
def test_enums_contents():
assert [x for x in dir(bce) if x[0].isupper()] == [
'Align',
'Anchor',
'AngleUnits',
'ButtonType',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'FontStyle',
'HoldPolicy',
'HorizontalLocation',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'RenderLevel',
'RenderMode',
'RoundingFunction',
'SizingMode',
'SizingPolicy',
'SliderCallbackPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TickLabelOrientation',
'TooltipAttachment',
'TooltipFieldFormatter',
'TrackPolicy',
'VerticalAlign',
'VerticalLocation',
]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/core/tests/test_enums.py | Python | bsd-3-clause | 10,191 |
"""
Methods for exporting mediawiki pages & images to a dokuwiki data/ directory.
Tested with Dokuwiki 2014-05-05 "Ponder Stibbons".
Copyright (C) 2014 Angus Gratton
Licensed under New BSD License as described in the file LICENSE.
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import os, os.path, gzip, shutil, re, requests, calendar, codecs, sys
from requests.auth import HTTPBasicAuth
import wikicontent
import simplemediawiki
import names
class Exporter(object):
def __init__(self, rootpath):
# verify the dokuwiki rootpath exists
self.root = rootpath
if not os.path.isdir(rootpath):
raise RuntimeError("Dokuwiki root path '%s' does not point to a directory" % rootpath)
# check a 'data' directory exists, establish pathes for each subdirectory
self.data = os.path.join(rootpath, "data")
if not os.path.isdir(self.data):
raise RuntimeError("Dokuwiki root path '%s' does not contain a data directory" % rootpath)
# create meta, attic, pages subdirs if they don't exist (OK to have deleted them before the import)
self.meta = os.path.join(self.data, "meta")
self.attic = os.path.join(self.data, "attic")
self.pages = os.path.join(self.data, "pages")
for subdir in [ self.meta, self.attic, self.pages]:
ensure_directory_exists(subdir)
def write_pages(self, pages):
"""
Given 'pages' as a list of mediawiki pages with revisions attached, export them to dokuwiki pages
"""
for page in pages:
self._convert_page(page)
self._aggregate_changes(self.meta, "_dokuwiki.changes")
def write_images(self, images, file_namespace, http_user=None, http_pass=None):
"""
Given 'images' as a list of mediawiki image metadata API entries,
download and write out dokuwiki images. Does not bring over revisions.
Images are all written to the file_namespace specified (file: by default), to match mediawiki.
"""
auth=None if http_user is None else HTTPBasicAuth(http_user, http_pass)
file_namespace = file_namespace.lower()
filedir = os.path.join(self.data, "media", file_namespace)
ensure_directory_exists(filedir)
filemeta = os.path.join(self.data, "media_meta", file_namespace)
ensure_directory_exists(filemeta)
for image in images:
# download the image from the Mediawiki server
print("Downloading %s..." % image['name'])
r = requests.get(image['url'], auth=auth)
# write the actual image out to the data/file directory
name = make_dokuwiki_pagename(image['name'])
imagepath = os.path.join(filedir, name)
with open(imagepath, "wb") as f:
f.write(r.content)
# set modification time appropriately
timestamp = get_timestamp(image)
os.utime(imagepath, (timestamp,timestamp))
# write a .changes file out to the media_meta/file directory
changepath = os.path.join(filemeta, "%s.changes" % name)
with codecs.open(changepath, "w", "utf-8") as f:
fields = (str(timestamp), "::1", "C", u"%s:%s"%(file_namespace,name), "", "created")
f.write(u"\t".join(fields) + "\r\n")
# aggregate all the new changes to the media_meta/_media.changes file
self._aggregate_changes(os.path.join(self.data, "media_meta"), "_media.changes")
def _convert_page(self, page):
""" Convert the supplied mediawiki page to a Dokuwiki page """
print("Converting %d revisions of page '%s'..." %
(len(page["revisions"]), page['title']))
# Sanitise the mediawiki pagename to something matching the dokuwiki pagename convention
full_title = make_dokuwiki_pagename(page['title'])
# Mediawiki pagenames can contain namespace :s, convert these to dokuwiki / paths on the filesystem (becoming : namespaces in dokuwiki)
subdir, pagename = os.path.split(full_title.replace(':','/'))
pagedir = os.path.join(self.pages, subdir)
metadir = os.path.join(self.meta, subdir)
atticdir = os.path.join(self.attic, subdir)
for d in pagedir, metadir, atticdir:
ensure_directory_exists(d)
# Walk through the list of revisions
revisions = list(reversed(page["revisions"])) # order as oldest first
for revision in revisions:
is_current = (revision == revisions[-1])
is_first = (revision == revisions[0])
content = wikicontent.convert_pagecontent(full_title, revision["*"])
timestamp = get_timestamp(revision)
comment = revision.get("comment", "").replace("\t", " ").split("\n")[0]
# path to the .changes metafile
changespath = os.path.join(metadir, "%s.changes"%pagename)
# for current revision, create 'pages' .txt
if is_current:
txtpath = os.path.join(pagedir, "%s.txt"%pagename)
with codecs.open(txtpath, "w", "utf-8") as f:
f.write(content)
os.utime(txtpath, (timestamp,timestamp))
# create gzipped attic revision
atticname = "%s.%s.txt.gz" % (pagename, timestamp)
atticpath = os.path.join(atticdir, atticname).encode("utf-8")
with gzip.open(atticpath, "wb") as f:
f.write(content.encode("utf-8"))
os.utime(atticpath, (timestamp,timestamp))
# append entry to page's 'changes' metadata index
with codecs.open(changespath, "w" if is_first else "a", "utf-8") as f:
changes_title = full_title.replace("/", ":")
fields = (str(timestamp), "::1", "C" if is_first else "E", changes_title, names.clean_user(revision["user"]), comment)
print(u"\t".join(fields), file=f)
def _aggregate_changes(self, metadir, aggregate):
"""
Rebuild the wiki-wide changelong from meta/ to meta/_dokuwiki.changes or
from media_meta to media_meta/_media.changes
This is a Pythonified version of https://www.dokuwiki.org/tips:Recreate_Wiki_Change_Log
"""
lines = []
for root, dirs, files in os.walk(metadir):
for changesfile in files:
if changesfile == aggregate or not changesfile.endswith(".changes"):
continue
with codecs.open(os.path.join(root,changesfile), "r", "utf-8") as f:
lines += f.readlines()
lines = sorted(lines, key=lambda r: int(r.split("\t")[0]))
with codecs.open(os.path.join(metadir, aggregate), "w", "utf-8") as f:
f.writelines(lines)
def fixup_permissions(self):
""" Fix permissions under the data directory
This means applying the data directory's permissions and ownership to all underlying parts.
If this fails due to insufficient privileges then it just prints a warning and continues on.
"""
stat = os.stat(self.data)
try:
for root, dirs, files in os.walk(self.data):
for name in files:
path = os.path.join(root, name)
os.chmod(path, stat.st_mode & 0o666)
os.chown(path, stat.st_uid, stat.st_gid)
for name in dirs:
path = os.path.join(root, name)
os.chmod(path, stat.st_mode)
os.chown(path, stat.st_uid, stat.st_gid)
except OSError:
print("WARNING: Failed to set permissions under the data directory (not owned by process?) May need to be manually fixed.")
def invalidate_cache(self):
""" Invalidate cached pages by updating modification date of a config file
If this fails due to insufficient privileges then it just prints a warning and continues on.
"""
confpath = os.path.join(self.root, "conf", "local.php")
try:
os.utime('myfile', None)
except OSError:
print(CACHE_WARNING_MSG % confpath)
CACHE_WARNING_MSG = """WARNING: Failed to invalidate page cache by updating config file timestamp.
If pre-existing pages exist in Dokuwiki, run the following command (with sufficient privileges):
touch "%s"
"""
def get_timestamp(node):
"""
Return a dokuwiki-Compatible Unix int timestamp for a mediawiki API page/image/revision
"""
dt = simplemediawiki.MediaWiki.parse_date(node['timestamp'])
return int(calendar.timegm(dt.utctimetuple()))
def ensure_directory_exists(path):
if not os.path.isdir(path):
os.makedirs(path)
def make_dokuwiki_pagename(mediawiki_name):
"""
Convert a canonical mediawiki pagename to a dokuwiki pagename
Any namespacing that is in the form of a / is replaced with a :
"""
result = mediawiki_name.replace(" ","_")
result = names.clean_id(camel_to_underscore(result)).replace("/",":")
result = codecs.encode(result, sys.getfilesystemencoding(), "replace")
return result
def make_dokuwiki_heading_id(mw_heading_name):
"""
Convert a Mediawiki internal anchor heading link to the Dokuwiki anchor heading link id
Equivalent function in dokuwiki is _headerToLink in inc/parser/xhtml.php
which calls sectionID in inc/pageutils.php
"""
result = names.clean_id(mw_heading_name, True)
result = re.sub(r'[:.]', '', result)
nums_stripped = result.lstrip("0123456789_-")
if len(nums_stripped):
return nums_stripped
else:
return "section"+re.sub(r"[^0-9]+", "", result)
def camel_to_underscore(camelcase):
"""
Convert a camelcased string to underscore_delimited (tweaked from this StackOverflow answer)
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
s1 = re.sub('(^/_)([A-Z][a-z]+)', r'\1_\2', camelcase)
s2 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return s2
| lordofbikes/yamdwe | dokuwiki.py | Python | bsd-3-clause | 10,121 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try: # pragma: no cover
from . import draw
__all__ = ['Trajectories', 'draw']
except ImportError: # pragma: no cover
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
| bnoi/scikit-tracker | sktracker/trajectories/__init__.py | Python | bsd-3-clause | 533 |
from setuptools import setup
import os
execfile(os.path.join('sheetsync','version.py'))
with open('README.rst') as fh:
long_description = fh.read()
with open('requirements.txt') as fh:
requirements = [line.strip() for line in fh.readlines()]
setup(
name='sheetsync',
version=__version__,
description="Synchronize rows of data with a google spreadsheet",
long_description=long_description,
author='Mark Brenig-Jones',
author_email='[email protected]',
url='https://github.com/mbrenig/SheetSync/',
packages=['sheetsync'],
platforms='any',
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
],
)
| mbrenig/SheetSync | setup.py | Python | mit | 800 |
from django.db.models import manager
from .query import QuerySet
__all__ = 'Manager',
class Manager(manager.Manager.from_queryset(QuerySet)):
use_for_related_fields = True
use_in_migrations = True
| jdzero/foundation | foundation/models/manager.py | Python | mit | 209 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImagrUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('our_date_joined_field', models.DateField(auto_now_add=True)),
('our_is_active_field', models.BooleanField(default=False)),
('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
('image_url', models.CharField(default=b'Photo Not Found', max_length=1024)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='cover',
field=models.ForeignKey(related_name='Album_cover', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='photos',
field=models.ManyToManyField(related_name='Album_photos', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| CharlesGust/django-imagr | imagr_site/imagr_app/migrations/0001_initial.py | Python | mit | 5,413 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_endor_ewok_medium4.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/building/poi/shared_endor_ewok_medium4.py | Python | mit | 449 |
# Copyright (c) 2014 Museum Victoria
# This software is released under the MIT license (see license.txt for details)
from Queue import *
import threading
import atexit
remote_action_PowerOn = RemoteAction()
remote_action_PowerOff = RemoteAction()
remote_action_SetInput = RemoteAction()
def local_action_activate(x = None):
'''{ "title": "Turn on", "desc": "Turn on." }'''
queue.put({'function': 'remote_action_PowerOn', 'delay': 120})
queue.put({'function': 'remote_action_SetInput', 'arg':{"source":"DIGITAL", "number":1}, 'delay': 5})
print 'Activated'
def local_action_deactivate(x = None):
'''{ "title": "Turn off", "desc": "Turn off." }'''
queue.put({'function': 'remote_action_PowerOff', 'delay': 120})
print 'Deactivated'
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.isSet():
if queue.empty() != True:
job = queue.get()
try:
print "Calling command " + job['function']
func = globals()[job['function']]
arg = job['args'] if 'args' in job else ''
func.call(arg)
self.event.wait(job['delay'])
queue.task_done()
except Exception, e:
print e
print "Failed to call command " + job['function']
else:
self.event.wait(1)
def stop(self):
self.event.set()
queue = Queue()
th = TimerClass()
@atexit.register
def cleanup():
print 'shutdown'
th.stop()
def main():
th.start()
print 'Nodel script started.' | museumsvictoria/nodel-recipes | (retired)/pjlinkqueue/script.py | Python | mit | 1,588 |
'''The Example from Huang and Darwiche's Procedural Guide'''
from __future__ import division
from bayesian.factor_graph import *
from bayesian.utils import make_key
def f_a(a):
return 1 / 2
def f_b(a, b):
tt = dict(
tt=0.5,
ft=0.4,
tf=0.5,
ff=0.6)
return tt[make_key(a, b)]
def f_c(a, c):
tt = dict(
tt=0.7,
ft=0.2,
tf=0.3,
ff=0.8)
return tt[make_key(a, c)]
def f_d(b, d):
tt = dict(
tt=0.9,
ft=0.5,
tf=0.1,
ff=0.5)
return tt[make_key(b, d)]
def f_e(c, e):
tt = dict(
tt=0.3,
ft=0.6,
tf=0.7,
ff=0.4)
return tt[make_key(c, e)]
def f_f(d, e, f):
tt = dict(
ttt=0.01,
ttf=0.99,
tft=0.01,
tff=0.99,
ftt=0.01,
ftf=0.99,
fft=0.99,
fff=0.01)
return tt[make_key(d, e, f)]
def f_g(c, g):
tt = dict(
tt=0.8, tf=0.2,
ft=0.1, ff=0.9)
return tt[make_key(c, g)]
def f_h(e, g, h):
tt = dict(
ttt=0.05, ttf=0.95,
tft=0.95, tff=0.05,
ftt=0.95, ftf=0.05,
fft=0.95, fff=0.05)
return tt[make_key(e, g, h)]
if __name__ == '__main__':
g = build_graph(
f_a, f_b, f_c, f_d,
f_e, f_f, f_g, f_h)
g.n_samples = 1000
g.q()
| kamijawa/ogc_server | bayesian/examples/factor_graphs/huang_darwiche.py | Python | mit | 1,341 |
def plotLearningCurve(Xtrn, Ytrn, model, param_name, param_range):
'''
Plot the bias/variance tradeoff for a given model. This curve is
the training and test error (via split) of the model as a function
of model complexity.
Wrapper for validation_curve in sklearn.
---
I:
O: Plot of the bias/var tradeoff.
'''
from sklearn.learning_curve import validation_curve
validation_curve(model, Xtrn, Ytrn, param_name, param_range, cv=5,
n_jobs=-1, pre_dispatch='all', verbose=1)
return
| mattdelhey/kaggle-galaxy | plotBiasVarTradeoff.py | Python | mit | 564 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.core.db.sqlalchemy.descriptions import RenderMode, RenderModeMixin
from indico.util.date_time import now_utc
class ReviewCommentMixin(RenderModeMixin):
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
user_backref_name = None
user_modified_backref_name = None
TIMELINE_TYPE = 'comment'
@declared_attr
def id(cls):
return db.Column(
db.Integer,
primary_key=True
)
@declared_attr
def user_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
@declared_attr
def _text(cls):
return db.Column(
'text',
db.Text,
nullable=False
)
@declared_attr
def modified_by_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True
)
@declared_attr
def created_dt(cls):
return db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
@declared_attr
def modified_dt(cls):
return db.Column(
UTCDateTime,
nullable=True
)
@declared_attr
def is_deleted(cls):
return db.Column(
db.Boolean,
nullable=False,
default=False
)
@declared_attr
def user(cls):
return db.relationship(
'User',
lazy=True,
foreign_keys=cls.user_id,
backref=db.backref(
cls.user_backref_name,
primaryjoin='({0}.user_id == User.id) & ~{0}.is_deleted'.format(cls.__name__),
lazy='dynamic'
)
)
@declared_attr
def modified_by(cls):
return db.relationship(
'User',
lazy=True,
foreign_keys=cls.modified_by_id,
backref=db.backref(
cls.user_modified_backref_name,
primaryjoin='({0}.modified_by_id == User.id) & ~{0}.is_deleted'.format(cls.__name__),
lazy='dynamic'
)
)
text = RenderModeMixin.create_hybrid_property('_text')
| mvidalgarcia/indico | indico/core/db/sqlalchemy/review_comments.py | Python | mit | 2,710 |
"""blah."""
from pyiem.util import get_dbconn
pgconn = get_dbconn("idep")
cursor = pgconn.cursor()
cursor.execute(
"""
SELECT r.hs_id, r.huc_12, p.fpath, extract(year from valid) as yr,
sum(runoff) as sum_runoff,
sum(loss) as sum_loss, sum(delivery) as sum_delivery from
results r JOIN flowpaths p on (r.hs_id = p.fid)
WHERE r.scenario = 5
GROUP by r.hs_id, r.huc_12, fpath, yr
"""
)
print("CATCHMENT,HUC12,FPATH,YEAR,RUNOFF,LOSS,DELIVERY")
for row in cursor:
fpath = row[0]
if fpath < 100:
catchment = 0
else:
catchment = int(str(fpath)[:-2])
print(str(catchment) + ",%s,%s,%s,%.4f,%.4f,%.4f" % row[1:])
| akrherz/idep | scripts/convergence/dump_results.py | Python | mit | 656 |
import functools
import sys
import traceback
from stacked import Stacked
from .xtraceback import XTraceback
class TracebackCompat(Stacked):
"""
A context manager that patches the stdlib traceback module
Functions in the traceback module that exist as a method of this class are
replaced with equivalents that use XTraceback.
:cvar NOPRINT: Exception types that we don't print for (includes None)
:type NOPRINT: tuple
:ivar defaults: Default options to apply to XTracebacks created by this
instance
:type defaults: dict
"""
NOPRINT = (None, KeyboardInterrupt)
def __init__(self, **defaults):
super(TracebackCompat, self).__init__()
self.defaults = defaults
# register patches for methods that wrap traceback functions
for key in dir(traceback):
if hasattr(self, key):
self._register_patch(traceback, key, getattr(self, key))
#def __exit__(self, etype, evalue, tb):
#if etype not in self.NOPRINT:
#self.print_exception(etype, evalue, tb)
#super(TracebackCompat, self).__exit__(etype, evalue, tb)
def _factory(self, etype, value, tb, limit=None, **options):
options["limit"] = \
getattr(sys, "tracebacklimit", None) if limit is None else limit
_options = self.defaults.copy()
_options.update(options)
return XTraceback(etype, value, tb, **_options)
def _print_factory(self, etype, value, tb, limit=None, file=None,
**options):
# late binding here may cause problems where there is no sys i.e. on
# google app engine but it is required for cases where sys.stderr is
# rebound i.e. under nose
if file is None and hasattr(sys, "stderr"):
file = sys.stderr
options["stream"] = file
return self._factory(etype, value, tb, limit, **options)
@functools.wraps(traceback.format_tb)
def format_tb(self, tb, limit=None, **options):
xtb = self._factory(None, None, tb, limit, **options)
return xtb.format_tb()
@functools.wraps(traceback.format_exception_only)
def format_exception_only(self, etype, value, **options):
xtb = self._factory(etype, value, None, **options)
return xtb.format_exception_only()
@functools.wraps(traceback.format_exception)
def format_exception(self, etype, value, tb, limit=None, **options):
xtb = self._factory(etype, value, tb, limit, **options)
return xtb.format_exception()
@functools.wraps(traceback.format_exc)
def format_exc(self, limit=None, **options):
options["limit"] = limit
return "".join(self.format_exception(*sys.exc_info(), **options))
@functools.wraps(traceback.print_tb)
def print_tb(self, tb, limit=None, file=None, **options):
xtb = self._print_factory(None, None, tb, limit, file, **options)
xtb.print_tb()
@functools.wraps(traceback.print_exception)
def print_exception(self, etype, value, tb, limit=None, file=None,
**options):
xtb = self._print_factory(etype, value, tb, limit, file, **options)
xtb.print_exception()
@functools.wraps(traceback.print_exc)
def print_exc(self, limit=None, file=None, **options):
options["limit"] = limit
options["file"] = file
self.print_exception(*sys.exc_info(), **options)
| g2p/xtraceback | xtraceback/tracebackcompat.py | Python | mit | 3,453 |
import urllib2, json, time, sys
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", dest="fahrenheit", action="store", default=False, type="string", help="Convert to FAHRENHEIT")
parser.add_option("-e", dest="end", action="store", default=False, type="string", help="START date")
parser.add_option("-s", dest="start", action="store", default=False, type="string", help="END date")
parser.add_option("-t", dest="token", action="store", default=False, type="string", help="Weather Underground TOKEN")
(options, args) = parser.parse_args()
if options.token:
token = options.token
else:
parser.print_help()
sys.exit()
if options.start:
start = options.start
else:
parser.print_help()
sys.exit()
if options.end:
end = options.end
else:
parser.print_help()
sys.exit()
if options.fahrenheit:
fahrenheit = True
else:
fahrenheit = False
start = datetime.strptime(start,'%Y-%m-%d')
end = datetime.strptime(end,'%Y-%m-%d')
url = ""
if end < start:
print "Error: end date " + str(end) + " occurs before start date " + str(start)
sys.exit()
for dt in rrule(DAILY, dtstart=start, until=end):
total = 0.0
temp = 0.0
count = 0
wunderground_url ="http://api.wunderground.com/api/" + token + "/history_" + dt.strftime("%Y%m%d") +"/q/NY/New_York_City.json"
try:
url = urllib2.urlopen(wunderground_url)
parsed_json = json.loads(url.read())
except:
print "Error reading URL " + wunderground_url
print "Is your token correct?"
url.close()
sys.exit()
try:
for mean in parsed_json['history']['observations']:
if fahrenheit:
total += float(mean['tempi'])
else:
total += float(mean['tempm'])
count += 1
temp = (total / count)
print dt.strftime("%Y-%m-%d") + "," + str(temp)
except:
print "Error retrieving temperature records for start date " + str(start) + " end date " + str(end)
url.close()
time.sleep(10)
| heatseeknyc/data-science | src/wunderground.py | Python | mit | 2,014 |
# Copyright (c) 2014 Katsuya Noguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import slack.http_client
from slack.exception import SlackError, \
InvalidAuthError, \
NotAuthedError, \
AccountInactiveError, \
ChannelNotFoundError, \
ChannelArchivedError, \
NotInChannelError, \
RateLimitedError
class TestRaiseErrorClient(unittest.TestCase):
def test_ok_response(self):
# does not raise error if response is ok
slack.http_client._raise_error_if_not_ok({ 'ok': True })
def test_invalid_auth(self):
self.assertRaises(InvalidAuthError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'invalid_auth' })
def test_not_authed(self):
self.assertRaises(NotAuthedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_authed' })
def test_account_inactive(self):
self.assertRaises(AccountInactiveError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'account_inactive' })
def test_channel_not_found(self):
self.assertRaises(ChannelNotFoundError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'channel_not_found' })
def test_is_archived(self):
self.assertRaises(ChannelArchivedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'is_archived' })
def test_not_in_channel(self):
self.assertRaises(NotInChannelError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_in_channel' })
def test_rate_limited(self):
self.assertRaises(RateLimitedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'rate_limited' })
def test_slack_error(self):
self.assertRaises(SlackError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'unknown_error' })
| DavidHHShao/slack | tests/unit/http_client/test_raise_error.py | Python | mit | 3,437 |
#ImportModules
import ShareYourSystem as SYS
#define and get two children
MyParenter=SYS.ParenterClass(
).array(
[
['-Layers'],
['|First','|Second'],
['-Neurons'],
['|E','|I']
]
).command(
'+-.values+|.values',
'#call:parent',
_AfterWalkRigidBool=True
).command(
'+-.values+|.values',
{
'#bound:recruit':lambda _InstanceVariable:_InstanceVariable[
'/Top/NeuronsDict'
].__setitem__(
_InstanceVariable.ManagementTagStr,
_InstanceVariable
)
if _InstanceVariable['/^/ParentKeyStr']=="Neurons"
else None,
'/Top/LayersDict.__setitem__':{
'#value:#map@get':["/~/ManagementTagStr",">>self"],
'#if':[
('/~/^/ParentKeyStr',SYS.operator.eq,"#direct:Layers")
]
}
},
_AfterWalkRigidBool=True
)
#print
print('MyParenter.NeuronsDict.keys() is ')
SYS._print(MyParenter.NeuronsDict.keys())
#print
print('MyParenter.LayersDict.keys() is ')
SYS._print(MyParenter.LayersDict.keys())
| Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Parenter/07_ExampleDoc.py | Python | mit | 959 |
import numpy
import chainer
from chainer import backend
from chainer import configuration
import chainer.functions as F
from chainer import link_hook
import chainer.links as L
from chainer import variable
import chainerx
from chainerx import _fallback_workarounds as fallback
def l2normalize(xp, v, eps):
"""Normalize a vector by its L2 norm.
Args:
xp (numpy or cupy):
v (numpy.ndarray or cupy.ndarray)
eps (float): Epsilon value for numerical stability.
Returns:
:class:`numpy.ndarray` or :class:`cupy.ndarray`
"""
# TODO(crcrpar): Remove this when chainerx.linalg.norm becomes available.
if xp is chainerx:
# NOTE(crcrpar): `chainerx.power` is not available as of 2019/03/27.
# See https://github.com/chainer/chainer/pull/6522
norm = chainerx.sqrt(chainerx.sum(v * v))
else:
norm = xp.linalg.norm(v)
return v / (norm + eps)
def update_approximate_vectors(
weight_matrix, u, n_power_iteration, eps):
"""Update the first left and right singular vectors.
This function updates the first left singular vector `u` and
the first right singular vector `v`.
Args:
weight_matrix (~chainer.Variable): 2D weight.
u (numpy.ndarray, cupy.ndarray, or None):
Vector that approximates the first left singular vector and
has the shape of (out_size,).
n_power_iteration (int): Number of iterations to approximate
the first right and left singular vectors.
Returns:
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first left singular vector.
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first right singular vector.
"""
weight_matrix = weight_matrix.array
xp = backend.get_array_module(weight_matrix)
for _ in range(n_power_iteration):
v = l2normalize(xp, xp.dot(u, weight_matrix), eps)
u = l2normalize(xp, xp.dot(weight_matrix, v), eps)
return u, v
def calculate_max_singular_value(weight_matrix, u, v):
"""Calculate max singular value by power iteration method.
Args:
weight_matrix (~chainer.Variable)
u (numpy.ndarray or cupy.ndarray)
v (numpy.ndarray or cupy.ndarray)
Returns:
~chainer.Variable: Max singular value via power iteration method.
"""
sigma = F.matmul(F.matmul(u, weight_matrix), v)
return sigma
class SpectralNormalization(link_hook.LinkHook):
"""Spectral Normalization link hook implementation.
This hook normalizes a weight using max singular value and this value
is computed via power iteration method. Currently, this hook is supposed to
be added to :class:`chainer.links.Linear`, :class:`chainer.links.EmbedID`,
:class:`chainer.links.Convolution2D`, :class:`chainer.links.ConvolutionND`,
:class:`chainer.links.Deconvolution2D`,
and :class:`chainer.links.DeconvolutionND`. However, you can use this to
other links like RNNs by specifying ``weight_name``.
It is highly recommended to add this hook before optimizer setup because
this hook add a scaling parameter ``gamma`` if ``use_gamma`` is True.
Otherwise, the registered ``gamma`` will not be updated.
.. math::
\\bar{\\mathbf{W}} &=& \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\
\\text{, where} \\ \\sigma(\\mathbf{W}) &:=&
\\max_{\\mathbf{h}: \\mathbf{h} \\ne 0}
\\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}
= \\max_{\\|\\mathbf{h}\\|_2 \\le 1} \\|\\mathbf{W}\\mathbf{h}\\|_2
See: T. Miyato et. al., `Spectral Normalization for Generative Adversarial
Networks <https://arxiv.org/abs/1802.05957>`_
Args:
n_power_iteration (int): Number of power iteration.
The default value is 1.
eps (float): Numerical stability in norm calculation.
The default value is 1e-6 for the compatibility with
mixed precision training. The value used in the author's
implementation is 1e-12.
use_gamma (bool): If ``True``, weight scaling parameter gamma which is
initialized by initial weight's max singular value is introduced.
factor (float, None): Scaling parameter to divide maximum singular
value. The default value is 1.0.
weight_name (str): Link's weight name to apply this hook. The default
value is ``'W'``.
name (str or None): Name of this hook. The default value is
``'SpectralNormalization'``.
Attributes:
vector_name (str): Name of the approximate first left singular vector
registered in the target link.
the target link.
axis (int): Axis of weight represents the number of output
feature maps or output units (``out_channels`` and
``out_size``, respectively).
.. admonition:: Example
There are almost the same but 2 ways to apply spectral normalization
(SN) hook to links.
1. Initialize link and SN separately. This makes it easy to handle
buffer and parameter of links registered by SN hook.
>>> l = L.Convolution2D(3, 5, 3)
>>> hook = chainer.link_hooks.SpectralNormalization()
>>> _ = l.add_hook(hook)
>>> # Check the shape of the first left singular vector.
>>> getattr(l, hook.vector_name).shape
(5,)
>>> # Delete SN hook from this link.
>>> l.delete_hook(hook.name)
2. Initialize both link and SN hook at one time. This makes it easy to
define your original :class:`~chainer.Chain`.
>>> # SN hook handles lazy initialization!
>>> layer = L.Convolution2D(
... 5, 3, stride=1, pad=1).add_hook(
... chainer.link_hooks.SpectralNormalization())
"""
name = 'SpectralNormalization'
def __init__(self, n_power_iteration=1, eps=1e-6, use_gamma=False,
factor=None, weight_name='W', name=None):
assert n_power_iteration > 0
self.n_power_iteration = n_power_iteration
self.eps = eps
self.use_gamma = use_gamma
self.factor = factor
self.weight_name = weight_name
self.vector_name = weight_name + '_u'
self._initialized = False
self.axis = 0
if name is not None:
self.name = name
def __enter__(self):
raise NotImplementedError(
'This hook is not supposed to be used as context manager.')
def __exit__(self):
raise NotImplementedError
def added(self, link):
# Define axis and register ``u`` if the weight is initialized.
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)):
self.axis = 1
if getattr(link, self.weight_name).array is not None:
self._prepare_parameters(link)
def deleted(self, link):
# Remove approximate vector ``u`` and parameter ``gamma` if exists.
delattr(link, self.vector_name)
if self.use_gamma:
del link.gamma
def forward_preprocess(self, cb_args):
# This method normalizes target link's weight spectrally
# using power iteration method
link = cb_args.link
input_variable = cb_args.args[0]
if not self._initialized:
self._prepare_parameters(link, input_variable)
weight = getattr(link, self.weight_name)
# For link.W or equivalents to be chainer.Parameter
# consistently to users, this hook maintains a reference to
# the unnormalized weight.
self.original_weight = weight
# note: `normalized_weight` is ~chainer.Variable
normalized_weight = self.normalize_weight(link)
setattr(link, self.weight_name, normalized_weight)
def forward_postprocess(self, cb_args):
# Here, the computational graph is already created,
# we can reset link.W or equivalents to be Parameter.
link = cb_args.link
setattr(link, self.weight_name, self.original_weight)
def _prepare_parameters(self, link, input_variable=None):
"""Prepare one buffer and one parameter.
Args:
link (:class:`~chainer.Link`): Link to normalize spectrally.
input_variable (:class:`~chainer.Variable`):
The first minibatch to initialize weight.
"""
if getattr(link, self.weight_name).array is None:
if input_variable is not None:
link._initialize_params(input_variable.shape[1])
initialW = getattr(link, self.weight_name)
if initialW.shape[self.axis] == 0:
raise ValueError(
'Expect {}.shape[{}] > 0'.format(self.weight_name, self.axis)
)
u = link.xp.random.normal(
size=(initialW.shape[self.axis],)).astype(dtype=initialW.dtype)
setattr(link, self.vector_name, u)
link.register_persistent(self.vector_name)
if self.use_gamma:
# Initialize the scaling parameter with the max singular value.
weight_matrix = self.reshape_W(initialW.array)
# TODO(crcrpar): Remove this when chainerx supports SVD.
if link.xp is chainerx:
xp, device, array = fallback._from_chx(weight_matrix)
if xp is numpy:
_, s, _ = numpy.linalg.svd(array)
else:
with chainer.using_device(device):
_, s, _ = xp.linalg.svd(array)
else:
_, s, _ = link.xp.linalg.svd(weight_matrix)
with link.init_scope():
link.gamma = variable.Parameter(s[0], ())
self._initialized = True
def normalize_weight(self, link):
"""Normalize target weight before every single forward computation."""
weight_name, vector_name = self.weight_name, self.vector_name
W = getattr(link, weight_name)
u = getattr(link, vector_name)
weight_matrix = self.reshape_W(W)
if not configuration.config.in_recomputing:
with chainer.using_device(link.device):
u, v = update_approximate_vectors(
weight_matrix, u, self.n_power_iteration, self.eps)
else:
v = self.v
sigma = calculate_max_singular_value(weight_matrix, u, v)
if self.factor is not None:
sigma /= self.factor
if self.use_gamma:
W = link.gamma * W / sigma
else:
W = W / sigma
if not configuration.config.in_recomputing:
self.v = v
with chainer.using_device(link.device):
if configuration.config.train:
if link.xp is chainerx:
# TODO(crcrpar): Remove this when
# chainerx supports `copyto`.
getattr(link, vector_name)[:] = u
else:
backend.copyto(getattr(link, vector_name), u)
return W
def reshape_W(self, W):
"""Reshape & transpose weight into 2D if necessary."""
if self.axis != 0:
axes = [self.axis] + [i for i in range(W.ndim) if i != self.axis]
W = W.transpose(axes)
if W.ndim == 2:
return W
return W.reshape(W.shape[0], -1)
| keisuke-umezawa/chainer | chainer/link_hooks/spectral_normalization.py | Python | mit | 11,583 |
from SimpleTCPClient import SimpleTCPClient
from SimpleTCPClientException import HTTPError, URLError
__all__ = [SimpleTCPClient, HTTPError, URLError] | umairghani/py-jrpc | jrpc/jrpcClient/__init__.py | Python | mit | 150 |
import sys as _sys
import ast as _ast
from ast import boolop, cmpop, excepthandler, expr, expr_context, operator
from ast import slice, stmt, unaryop, mod, AST
def _make_node(Name, Fields, Attributes, Bases):
def create_node(self, *args, **kwargs):
nbparam = len(args) + len(kwargs)
assert nbparam in (0, len(Fields)), \
"Bad argument number for {}: {}, expecting {}".\
format(Name, nbparam, len(Fields))
self._fields = Fields
self._attributes = Attributes
for argname, argval in zip(self._fields, args):
setattr(self, argname, argval)
for argname, argval in kwargs.items():
assert argname in Fields, \
"Invalid Keyword argument for {}: {}".format(Name, argname)
setattr(self, argname, argval)
setattr(_sys.modules[__name__],
Name,
type(Name,
Bases,
{'__init__': create_node}))
_nodes = {
# mod
'Module': (('body',), (), (mod,)),
'Interactive': (('body',), (), (mod,)),
'Expression': (('body',), (), (mod,)),
'Suite': (('body',), (), (mod,)),
# stmt
'FunctionDef': (('name', 'args', 'body', 'decorator_list', 'returns',),
('lineno', 'col_offset',),
(stmt,)),
'AsyncFunctionDef': (('name', 'args', 'body',
'decorator_list', 'returns',),
('lineno', 'col_offset',),
(stmt,)),
'ClassDef': (('name', 'bases', 'keywords', 'body', 'decorator_list',),
('lineno', 'col_offset',),
(stmt,)),
'Return': (('value',), ('lineno', 'col_offset',),
(stmt,)),
'Delete': (('targets',), ('lineno', 'col_offset',),
(stmt,)),
'Assign': (('targets', 'value',), ('lineno', 'col_offset',),
(stmt,)),
'AugAssign': (('target', 'op', 'value',), ('lineno', 'col_offset',),
(stmt,)),
'Print': (('dest', 'values', 'nl',), ('lineno', 'col_offset',),
(stmt,)),
'For': (('target', 'iter', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'AsyncFor': (('target', 'iter', 'body', 'orelse',),
('lineno', 'col_offset',),
(stmt,)),
'While': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'If': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'With': (('items', 'body',), ('lineno', 'col_offset',),
(stmt,)),
'AsyncWith': (('items', 'body',), ('lineno', 'col_offset',),
(stmt,)),
'Raise': (('exc', 'cause',), ('lineno', 'col_offset',),
(stmt,)),
'Try': (('body', 'handlers', 'orelse', 'finalbody',),
('lineno', 'col_offset',),
(stmt,)),
'Assert': (('test', 'msg',), ('lineno', 'col_offset',),
(stmt,)),
'Import': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'ImportFrom': (('module', 'names', 'level',), ('lineno', 'col_offset',),
(stmt,)),
'Exec': (('body', 'globals', 'locals',), ('lineno', 'col_offset',),
(stmt,)),
'Global': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'Nonlocal': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'Expr': (('value',), ('lineno', 'col_offset',),
(stmt,)),
'Pass': ((), ('lineno', 'col_offset',),
(stmt,)),
'Break': ((), ('lineno', 'col_offset',),
(stmt,)),
'Continue': ((), ('lineno', 'col_offset',),
(stmt,)),
# expr
'BoolOp': (('op', 'values',), ('lineno', 'col_offset',),
(expr,)),
'BinOp': (('left', 'op', 'right',), ('lineno', 'col_offset',),
(expr,)),
'UnaryOp': (('op', 'operand',), ('lineno', 'col_offset',),
(expr,)),
'Lambda': (('args', 'body',), ('lineno', 'col_offset',),
(expr,)),
'IfExp': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(expr,)),
'Dict': (('keys', 'values',), ('lineno', 'col_offset',),
(expr,)),
'Set': (('elts',), ('lineno', 'col_offset',),
(expr,)),
'ListComp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'SetComp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'DictComp': (('key', 'value', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'GeneratorExp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'Await': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Yield': (('value',), ('lineno', 'col_offset',),
(expr,)),
'YieldFrom': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Compare': (('left', 'ops', 'comparators',), ('lineno', 'col_offset',),
(expr,)),
'Call': (('func', 'args', 'keywords',), ('lineno', 'col_offset',),
(expr,)),
'Repr': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Num': (('n',), ('lineno', 'col_offset',),
(expr,)),
'Str': (('s',), ('lineno', 'col_offset',),
(expr,)),
'FormattedValue': (('value', 'conversion', 'format_spec',),
('lineno', 'col_offset',), (expr,)),
'JoinedStr': (('values',), ('lineno', 'col_offset',), (expr,)),
'Bytes': (('s',), ('lineno', 'col_offset',),
(expr,)),
'NameConstant': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Ellipsis': ((), ('lineno', 'col_offset',),
(expr,)),
'Attribute': (('value', 'attr', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Subscript': (('value', 'slice', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Starred': (('value', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Name': (('id', 'ctx', 'annotation'), ('lineno', 'col_offset',),
(expr,)),
'List': (('elts', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Tuple': (('elts', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
# expr_context
'Load': ((), (), (expr_context,)),
'Store': ((), (), (expr_context,)),
'Del': ((), (), (expr_context,)),
'AugLoad': ((), (), (expr_context,)),
'AugStore': ((), (), (expr_context,)),
'Param': ((), (), (expr_context,)),
# slice
'Slice': (('lower', 'upper', 'step'), (), (slice,)),
'ExtSlice': (('dims',), (), (slice,)),
'Index': (('value',), (), (slice,)),
# boolop
'And': ((), (), (boolop,)),
'Or': ((), (), (boolop,)),
# operator
'Add': ((), (), (operator,)),
'Sub': ((), (), (operator,)),
'Mult': ((), (), (operator,)),
'MatMult': ((), (), (operator,)),
'Div': ((), (), (operator,)),
'Mod': ((), (), (operator,)),
'Pow': ((), (), (operator,)),
'LShift': ((), (), (operator,)),
'RShift': ((), (), (operator,)),
'BitOr': ((), (), (operator,)),
'BitXor': ((), (), (operator,)),
'BitAnd': ((), (), (operator,)),
'FloorDiv': ((), (), (operator,)),
# unaryop
'Invert': ((), (), (unaryop, AST,)),
'Not': ((), (), (unaryop, AST,)),
'UAdd': ((), (), (unaryop, AST,)),
'USub': ((), (), (unaryop, AST,)),
# cmpop
'Eq': ((), (), (cmpop,)),
'NotEq': ((), (), (cmpop,)),
'Lt': ((), (), (cmpop,)),
'LtE': ((), (), (cmpop,)),
'Gt': ((), (), (cmpop,)),
'GtE': ((), (), (cmpop,)),
'Is': ((), (), (cmpop,)),
'IsNot': ((), (), (cmpop,)),
'In': ((), (), (cmpop,)),
'NotIn': ((), (), (cmpop,)),
# comprehension
'comprehension': (('target', 'iter', 'ifs', 'is_async'), (), (AST,)),
# excepthandler
'ExceptHandler': (('type', 'name', 'body'), ('lineno', 'col_offset'),
(excepthandler,)),
# arguments
'arguments': (('args', 'vararg', 'kwonlyargs', 'kw_defaults',
'kwarg', 'defaults'), (), (AST,)),
# keyword
'keyword': (('arg', 'value'), (), (AST,)),
# alias
'alias': (('name', 'asname'), (), (AST,)),
# withitem
'withitem': (('context_expr', 'optional_vars'), (), (AST,)),
}
for name, descr in _nodes.items():
_make_node(name, *descr)
if _sys.version_info.major == 2:
from .ast2 import ast_to_gast, gast_to_ast
if _sys.version_info.major == 3:
from .ast3 import ast_to_gast, gast_to_ast
def parse(*args, **kwargs):
return ast_to_gast(_ast.parse(*args, **kwargs))
def literal_eval(node_or_string):
if isinstance(node_or_string, AST):
node_or_string = gast_to_ast(node_or_string)
return _ast.literal_eval(node_or_string)
def get_docstring(node, clean=True):
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
| ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/gast/gast.py | Python | mit | 9,289 |
# Invert gray image
import cv2
from . import print_image
from . import plot_image
def invert(img, device, debug=None):
"""Inverts grayscale images.
Inputs:
img = image object, grayscale
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
img_inv = inverted image
:param img: numpy array
:param device: int
:param debug: str
:return device: int
:return img_inv: numpy array
"""
device += 1
img_inv = cv2.bitwise_not(img)
if debug == 'print':
print_image(img_inv, (str(device) + '_invert.png'))
elif debug == 'plot':
plot_image(img_inv, cmap='gray')
return device, img_inv
| AntonSax/plantcv | plantcv/invert.py | Python | mit | 794 |
def token_encryption_algorithm():
return 'HS256' | aaivazis/nautilus | nautilus/auth/util/token_encryption_algorithm.py | Python | mit | 52 |
import re
import quantities as pq
from numbers import NumberService
class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, input):
def handleExponents(input):
m = re.search(r'\bsquare (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquare (\w+)', r'\g<1>^2', input)
m = re.search(r'\bsquared (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquared (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) squared', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) squared', r'\g<1>^2', input)
m = re.search(r'\bsq (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsq (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) cubed', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) cubed', r'\g<1>^3', input)
m = re.search(r'\bcubic (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bcubic (\w+)', r'\g<1>^3', input)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', input)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
input = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), input)
return input
input = re.sub(r'\sper\s', r' / ', input)
input = handleExponents(input)
return input
def parseUnits(self, input):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
input (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(input)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, input):
"""Collects all the valid units from an input string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
input (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
input = self._preprocess(input)
units = []
description = ""
for w in input.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, input):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
input (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
input = self._preprocess(input)
n = NumberService().longestNumber(input)
units = self.extractUnits(input)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
| jobdash/semantic | semantic/units.py | Python | mit | 4,744 |
Subsets and Splits