repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
3DGenomes/tadbit | scripts/liftover_tads_genomes.py | 2 | 7055 | """
17 May 2013
Liftover (1) wrapper applied to the comparison of topologically associated
domains.
This script allows to compare Hi-C experiments (mainly align TAD boundaries)
done with different assemblies (e.g.: NCBI36 and GRCh37 for human genome), or
in different species.
INSTALL:
- liftover tool needs to be downloaded from
(http://hgdownload.cse.ucsc.edu/admin/exe/), installed, and appended to the
path.
- depending on the data a 'chain' file may also be downloaded. For example
from: http://hgdownload.cse.ucsc.edu/goldenPath/hg19/liftOver/
(1) Fujita, P. A., Rhead, B., Zweig, A. S., Hinrichs, A. S., Karolchik, D.,
Cline, M. S., Goldman, M., et al. (2011).
The UCSC Genome Browser database: update 2011.
Nucleic Acids Research, 39(Database issue), D876-82. doi:10.1093/nar/gkq963
"""
from os import system, listdir
from os.path import isdir
from pytadbit import load_chromosome
from pytadbit.utils.remap_tads import remap_chr, reorder
from optparse import OptionParser
def check_pik(path):
with open(path, "r") as f:
f.seek (0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek (max (fsize-2, 0), 0) # Set pos @ last n chars
key = f.read() # Read to end
return key == 's.'
def main():
"""
main function
"""
opts = get_options()
res = opts.res
if opts.genomes:
# load all chromosomes of reference genomes
ref_genome = {}
for crm in listdir(opts.ref_genome):
crm_path = opts.ref_genome + crm + '/'
if not isdir(crm_path):
continue
for crm_fh in listdir(crm_path):
crm_pik = crm_path + crm_fh
if not check_pik(crm_pik):
continue
ref_genome[crm] = load_chromosome(crm_pik)
if not opts.res:
resolutions = []
for crm in ref_genome:
for exp in ref_genome[crm].experiments:
resolutions.append(exp.resolution)
if not all([r == resolutions[0] for r in resolutions]):
raise AssertionError('Not all Experiments have the ' +
'same resolution\n')
res = resolutions[0]
alt_genomes = {}
for i, genome in enumerate(opts.genomes):
alt_genomes[i] = {}
for crm in listdir(genome):
crm_path = genome + crm + '/'
if not isdir(crm_path):
continue
for crm_fh in listdir(crm_path):
crm_pik = crm_path + crm_fh
if not check_pik(crm_pik):
continue
try:
alt_genomes[i][crm] = load_chromosome(crm_pik)
except:
print ('SKIPPING: {} \n not a valid ' +
'chromosome').format(crm_pik)
genome = {}
for crm in alt_genomes[i]:
genome = remap_chr(alt_genomes[i][crm], crm, '/tmp/',
opts.lft_path, opts.chain_path,
genome=genome)
reorder(genome)
for exp in genome:
for crm in genome[exp]:
try:
ref_genome[crm].add_experiment(
exp, res, tad_handler=genome[exp][crm])
except KeyError:
print ('Chromosome {} skipped, not in reference ' +
'genome').format(crm)
system('mkdir -p ' + opts.out_path)
for crm in ref_genome:
system('mkdir -p ' + opts.out_path + '/' + crm)
out_f = opts.out_path + '/' + crm + '/chr' + crm + '.tdb'
ref_genome[crm].save_chromosome(out_f, force=True)
# TODO: the same for 1 chromosome
def get_options():
'''
parse option from call
'''
def vararg_callback(option, _, value, parser):
assert value is None
value = []
rargs = parser.rargs
while rargs:
arg = rargs[0]
if ((arg[:2] == "--" and len(arg) > 2) or
(arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
break
else:
value.append(arg)
del rargs[0]
setattr(parser.values, option.dest, value)
#
parser = OptionParser(
usage=("%prog [options] file [options] file [options] " +
"file [options [file ...]]"))
parser.add_option('--genomes', dest='genomes', metavar="PATH",
action='callback', default=None,
callback=vararg_callback,
help='''path(s) to a directory/ies with a list of
chromosomes saved through tadbit (required if not
passing chromosomes)''')
parser.add_option('--ref_genome', dest='ref_genome', metavar="PATH",
help='''path to a directory with a list of chromosomes
saved through tadbit (required with genomes option)''')
parser.add_option('--crm', dest='crm', metavar="PATH",
help='''path to input file, a chromosome saved through
tadbit (required if not passing genomes)''')
parser.add_option('--ref_crm', dest='ref_crm', metavar="PATH",
help='''path to second input file, a reference chromosome
saved through tadbit (required)''')
parser.add_option('--chain', dest='chain_path', action="store", \
help=
'''path to UCSC chain file (required)''')
parser.add_option('-o', dest='out_path', metavar="PATH",
default='./',
help='''path to out file where merged tadbit chromosome
will be stored''')
parser.add_option('--res', dest='res',
default=None,
help='''Wanted resolution for the detection of TADs (i.e.:
100Kb)''')
parser.add_option('--crm_name', dest='crm_name',
default=None,
help='''Chromosome name for crm1 (e.g. 21).''')
parser.add_option('--tmp', dest='tmp_path', metavar="PATH",
default='./',
help='''path to temporary directory to store liftover
outfiles''')
parser.add_option('--liftover',
dest='lft_path', default='/usr/local/bin/',\
help='''[%default] path to liftover binary''')
opts = parser.parse_args()[0]
if not opts.crm or not opts.ref_crm or not opts.chain_path:
if not opts.genomes or not opts.ref_genome or not opts.chain_path:
exit(parser.print_help())
return opts
if __name__ == "__main__":
exit(main())
| gpl-3.0 | -5,894,526,456,116,291,000 | 38.634831 | 80 | 0.505599 | false |
smallyear/linuxLearn | salt/salt/modules/cloud.py | 1 | 8214 | # -*- coding: utf-8 -*-
'''
Salt-specific interface for calling Salt Cloud directly
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import copy
# Import salt libs
try:
import salt.cloud
HAS_SALTCLOUD = True
except ImportError:
HAS_SALTCLOUD = False
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
__func_alias__ = {
'profile_': 'profile'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
if HAS_SALTCLOUD:
return True
return False
def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client
def list_sizes(provider='all'):
'''
List cloud provider sizes for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_sizes my-gce-config
'''
client = _get_client()
sizes = client.list_sizes(provider)
return sizes
def list_images(provider='all'):
'''
List cloud provider images for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_images my-gce-config
'''
client = _get_client()
images = client.list_images(provider)
return images
def list_locations(provider='all'):
'''
List cloud provider locations for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_locations my-gce-config
'''
client = _get_client()
locations = client.list_locations(provider)
return locations
def query(query_type='list_nodes'):
'''
List cloud provider data for all providers
CLI Examples:
.. code-block:: bash
salt '*' cloud.query
salt '*' cloud.query list_nodes_full
salt '*' cloud.query list_nodes_select
'''
client = _get_client()
info = client.query(query_type)
return info
def full_query(query_type='list_nodes_full'):
'''
List all available cloud provider data
CLI Example:
.. code-block:: bash
salt '*' cloud.full_query
'''
return query(query_type=query_type)
def select_query(query_type='list_nodes_select'):
'''
List selected nodes
CLI Example:
.. code-block:: bash
salt '*' cloud.select_query
'''
return query(query_type=query_type)
def has_instance(name, provider=None):
'''
Return true if the instance is found on a provider
CLI Example:
.. code-block:: bash
salt '*' cloud.has_instance myinstance
'''
data = get_instance(name, provider)
if data is None:
return False
return True
def get_instance(name, provider=None):
'''
Return details on an instance.
Similar to the cloud action show_instance
but returns only the instance details.
CLI Example:
.. code-block:: bash
salt '*' cloud.get_instance myinstance
SLS Example:
.. code-block:: bash
{{ salt['cloud.get_instance']('myinstance')['mac_address'] }}
'''
data = action(fun='show_instance', names=[name], provider=provider)
info = salt.utils.cloud.simple_types_filter(data)
try:
# get the first: [alias][driver][vm_name]
info = next(six.itervalues(next(six.itervalues(next(six.itervalues(info))))))
except AttributeError:
return None
return info
def profile_(profile, names, vm_overrides=None, **kwargs):
'''
Spin up an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt '*' cloud.profile my-gce-config myinstance
'''
client = _get_client()
info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs)
return info
def destroy(names):
'''
Destroy the named VM(s)
CLI Example:
.. code-block:: bash
salt '*' cloud.destroy myinstance
'''
client = _get_client()
info = client.destroy(names)
return info
def action(
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
**kwargs):
'''
Execute a single action on the given provider/instance
CLI Example:
.. code-block:: bash
salt '*' cloud.action start instance=myinstance
salt '*' cloud.action stop instance=myinstance
salt '*' cloud.action show_image provider=my-ec2-config image=ami-1624987f
'''
client = _get_client()
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
return info
def create(provider, names, **kwargs):
'''
Create an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt minionname cloud.create my-ec2-config myinstance image=ami-1624987f size='t1.micro' ssh_username=ec2-user securitygroup=default delvol_on_destroy=True
'''
client = _get_client()
info = client.create(provider, names, **kwargs)
return info
def volume_list(provider):
'''
List block storage volumes
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_list my-nova
'''
client = _get_client()
info = client.extra_action(action='volume_list', provider=provider, names='name')
return info['name']
def volume_delete(provider, names, **kwargs):
'''
Delete volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_delete my-nova myblock
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_delete', **kwargs)
return info
def volume_create(provider, names, **kwargs):
'''
Create volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_create my-nova myblock size=100 voltype=SSD
'''
client = _get_client()
info = client.extra_action(action='volume_create', names=names, provider=provider, **kwargs)
return info
def volume_attach(provider, names, **kwargs):
'''
Attach volume to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_attach my-nova myblock server_name=myserver device='/dev/xvdf'
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_attach', **kwargs)
return info
def volume_detach(provider, names, **kwargs):
'''
Detach volume from a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_detach my-nova myblock server_name=myserver
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_detach', **kwargs)
return info
def network_list(provider):
'''
List private networks
CLI Example:
.. code-block:: bash
salt minionname cloud.network_list my-nova
'''
client = _get_client()
return client.extra_action(action='network_list', provider=provider, names='names')
def network_create(provider, names, **kwargs):
'''
Create private network
CLI Example:
.. code-block:: bash
salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='network_create', **kwargs)
def virtual_interface_list(provider, names, **kwargs):
'''
List virtual interfaces on a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_list my-nova names=['salt-master']
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
def virtual_interface_create(provider, names, **kwargs):
'''
Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs)
| apache-2.0 | 4,759,294,254,014,473,000 | 20.061538 | 163 | 0.632213 | false |
jasonhamilton/hotwing-core | hotwing_core/panel.py | 1 | 3463 | from __future__ import division
from .rib import Rib
from .profile import Profile
from .coordinate import Coordinate
class Panel():
"""
A Panel is a representation of a wing panel and contains all of the items/objects
necessary to define a wing.
A Panel can be thought of as a wing facing down with rib_1 on the left and rib_2 on the right.
If rib_1 is the root chord and rib_2 is the tip, chord the panel will represent the left part
of a wing.
.. code-block:: bash
| ------ width ---------- |
trailing edge
---------------------------
| |
rib_1 | | rib_2
| |
---------------------------
leading edge
Args:
left_rib (Rib): Rib defining the left of the wing
right_rib (Rib): Rib defining the right of the wing
width (Float): Width of the Panel measured from left_rib to right_rib
:ivar left_rib: Left Rib
:ivar right_rib: Right Rib
:ivar width: Width
"""
def __init__(self, left_rib, right_rib, width):
self.left_rib = left_rib
self.right_rib = right_rib
self.width = width
@classmethod
def copy(cls, panel):
"""
Copy a panel
Args:
panel (Panel): object to copy
Returns:
Panel: New panel
"""
return cls(panel.left_rib, panel.right_rib, panel.width)
@classmethod
def reverse(cls, panel):
"""
Reverse the ribs on the panel. If you have a left side, it will make it a right side. The ribs
will maintain the same direction, but just switch sides.
Args:
panel (Panel): object to flip
Returns:
Panel: New flipped panel
"""
return cls(panel.right_rib, panel.left_rib, panel.width)
@classmethod
def trim(cls, panel, left=None, right=None):
"""
Creates a new Panel by taking an existing Panel and trimming it.
The new panel's ribs will be interpolated to the correct shape.
Args:
panel (Panel): object to trim
left (Float): distance from left rib to make the left side cut
right (Float): distance from left rib to make the right side cut
Returns:
Panel: New trimmed Panel
"""
if left is None or left == 0:
# no need to trim left
r1 = panel.left_rib
left = 0
else:
# need to interp new left
r1 = Rib.interpolate_new_rib(
panel.left_rib, panel.right_rib, panel.width, left)
if right is None or right == panel.width:
# no need to trim right
r2 = panel.right_rib
right = panel.width
else:
r2 = Rib.interpolate_new_rib(
panel.left_rib, panel.right_rib, panel.width, right)
new_width = right - left
p = cls(r1, r2, new_width)
return p
def __getitem__(self, key):
"""
Trim Panel using the slice functionality.
Ex: panel_obj[2:5], trims from 2 to 5
"""
if isinstance(key, slice):
return Panel.trim(self,key.start,key.stop)
raise NotImplementedError | gpl-3.0 | 3,650,149,134,089,995,000 | 28.862069 | 103 | 0.520358 | false |
emanuil-tolev/fundfind | fundfind/core.py | 1 | 1312 | import os
from flask import Flask
from flask.ext.login import LoginManager, current_user
from fundfind import default_settings
login_manager = LoginManager()
def create_app():
app = Flask(__name__)
configure_app(app)
configure_jinja(app)
setup_error_email(app)
login_manager.setup_app(app)
return app
def configure_app(app):
app.config.from_object(default_settings)
# parent directory
here = os.path.dirname(os.path.abspath( __file__ ))
config_path = os.path.join(os.path.dirname(here), 'app.cfg')
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
def setup_error_email(app):
ADMINS = app.config.get('ADMINS', '')
if not app.debug and ADMINS:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1',
'[email protected]',
ADMINS, 'FundFind error')
mail_handler.setLevel(logging.error)
app.logger.addHandler(mail_handler)
def configure_jinja(app):
# expore some more objects to the templates
add_to_globals = {
'isinstance': isinstance,
'list': list,
'dict': dict
}
app.jinja_env.globals.update(**add_to_globals)
app = create_app()
| mit | 3,052,434,311,215,307,300 | 28.155556 | 64 | 0.632622 | false |
nikkomidoy/project_soa | project_soa/users/views.py | 1 | 1647 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("soamgr:order")
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name',]
# we already imported User in the view code above, remember?
model = User
def get_context_data(self, **kwargs):
context = super(UserUpdateView, self).get_context_data(**kwargs)
if self.request.user.is_paraplanner:
self.fields += ['account_name','bsb','account_number',]
return context
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
| mit | 1,562,233,376,126,577,700 | 29.5 | 79 | 0.694596 | false |
mozilla-releng/services | lib/please_cli/please_cli/config.py | 1 | 3928 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import click
CWD_DIR = os.path.abspath(os.getcwd())
NO_ROOT_DIR_ERROR = '''Project root directory couldn't be detected.
`please` file couln't be found in any of the following folders:
%s
'''
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
VERSION = f.read().strip()
ROOT_DIR = None
_folders = []
for item in reversed(CWD_DIR.split(os.sep)):
item_dir = '/' + CWD_DIR[:CWD_DIR.find(item) + len(item)][1:]
_folders.append(item_dir)
if os.path.isfile(os.path.join(item_dir, 'please')):
ROOT_DIR = item_dir
break
if ROOT_DIR is None:
raise click.ClickException(NO_ROOT_DIR_ERROR % '\n - '.join(_folders))
CACHE_URLS = [
'https://cache.mozilla-releng.net',
]
SRC_DIR = os.path.join(ROOT_DIR, 'src')
TMP_DIR = os.path.join(ROOT_DIR, 'tmp')
CHANNELS = ['master', 'testing', 'staging', 'production']
DEPLOY_CHANNELS = ['testing', 'staging', 'production']
DOCKER_BASE_REGISTRY = 'index.docker.io'
DOCKER_BASE_REPO = 'mozillareleng/services'
DOCKER_BASE_TAG = 'base-' + VERSION
NIX_BIN_DIR = os.environ.get('NIX_BIN_DIR', '') # must end with /
OPENSSL_BIN_DIR = os.environ.get('OPENSSL_BIN_DIR', '') # must end with /
OPENSSL_ETC_DIR = os.environ.get('OPENSSL_ETC_DIR', '') # must end with /
POSTGRESQL_BIN_DIR = os.environ.get('POSTGRESQL_BIN_DIR', '') # must end with /
IN_DOCKER = False
if os.path.exists('/proc/1/cgroup'):
with open('/proc/1/cgroup', 'rt') as ifh:
IN_DOCKER = 'docker' in ifh.read()
TEMPLATES = {
'backend-json-api': {}
}
DEV_PROJECTS = ['postgresql', 'redis']
PROJECTS = list(map(lambda x: x.replace('_', '-')[len(SRC_DIR) + 1:],
filter(lambda x: os.path.exists(os.path.join(SRC_DIR, x, 'default.nix')),
glob.glob(SRC_DIR + '/*') + glob.glob(SRC_DIR + '/*/*'))))
PROJECTS += DEV_PROJECTS
# TODO: below data should be placed in src/<app>/default.nix files alongside
PROJECTS_CONFIG = {
'common/naming': {
'update': False,
},
'postgresql': {
'update': False,
'run': 'POSTGRESQL',
'run_options': {
'port': 9000,
'data_dir': os.path.join(TMP_DIR, 'postgresql'),
},
},
'redis': {
'update': False,
'run': 'REDIS',
'run_options': {
'port': 6379,
'schema': 'redis',
'data_dir': os.path.join(TMP_DIR, 'redis'),
},
},
'docs': {
'update': False,
'run': 'SPHINX',
'run_options': {
'schema': 'http',
'port': 7000,
},
'deploys': [
{
'target': 'S3',
'options': {
'testing': {
'enable': True,
's3_bucket': 'relengstatic-testing-relengdocs-static-website',
'url': 'https://docs.testing.mozilla-releng.net',
'dns': 'd1sw5c8kdn03y.cloudfront.net.',
},
'staging': {
'enable': True,
's3_bucket': 'relengstatic-staging-relengdocs-static-website',
'url': 'https://docs.staging.mozilla-releng.net',
'dns': 'd32jt14rospqzr.cloudfront.net.',
},
'production': {
'enable': True,
's3_bucket': 'relengstatic-prod-relengdocs-static-website',
'url': 'https://docs.mozilla-releng.net',
'dns': 'd1945er7u4liht.cloudfront.net.',
},
},
},
],
},
}
| mpl-2.0 | -2,205,204,115,440,344,000 | 30.677419 | 93 | 0.52113 | false |
andreask/mailchimp-python | mailchimp/objects/mc_interest_category.py | 1 | 3803 | # coding=utf-8
import logging
from mailchimp.exceptions import MCInterestCategoryNotFound, MCListNotFound, ObjectNotFound
from mailchimp import Request
from .base_object import BaseObject
from .mc_link import MCLink
logger = logging.getLogger(__name__)
class MCInterestCategory(BaseObject):
item_url = '/lists/{list_id}/interest-categories'
def __init__(self, json_data={}):
super(MCInterestCategory, self).__init__()
self._update(json_data)
def _update(self, json_data):
self.id = json_data.get("id")
self.list_id = json_data.get("list_id")
self.title = json_data.get("title")
self.display_order = json_data.get("display_order")
self.type = json_data.get("type")
self.links = [MCLink(link) for link in json_data.get('_links')] if json_data.get('_links') else []
@classmethod
def get_list_url(cls, list_id):
"""
Replace the placeholder for the list id with the list id sent to the method - creates a valid url.
:param list_id: the list to get the url for
:return: the url for the list
"""
return cls.item_url.replace("{list_id}", list_id)
@classmethod
def get(cls, list_id, category_id):
"""
Get the category from the mailchimp API. list_id has to be a valid list and category_id should be the
id of the category to retrieve.
:param list_id: the list id to get the category from
:param category_id: the category to get
:return: a MCInterestCategory object containing the category if successful, raises an MCInterestCategoryNotFound
exception otherwise
"""
try:
response = Request.get("%s/%s" % (MCInterestCategory.get_list_url(list_id), category_id))
return MCInterestCategory(response.json())
except ObjectNotFound:
raise MCInterestCategoryNotFound(list_id, category_id)
@classmethod
def list(cls, list_id, params={}):
"""
Get the list of categories for the list corresponding with the id list_id from the mailchimp API.
:param list_id: the id of the list to get members from
:param params: parameters for defining limits for the search - can be used to page result or search for a
specific status.
:return: an array of MCInterestCategory objects if successful, raises a MCListNotFound exception otherwise
"""
try:
response = Request.get("%s" % MCInterestCategory.get_list_url(list_id), params)
return [MCInterestCategory(category) for category in response.json()['categories']]
except ObjectNotFound:
raise MCListNotFound(list_id)
def delete(self):
"""
Deletes the current category from the list
:return: True if successful
"""
if not self.id:
return False
try:
Request.delete("%s/%s" % (MCInterestCategory.get_list_url(self.list_id), self.id))
return True
except Exception as e:
logger.error("Unable to delete member from list")
raise e
def save(self):
"""
Saves the current category to the list
:return: True if successful
"""
hash_value = self.id
if not self.id:
md = hashlib.md5()
md.update(self.email_address.lower().encode("utf-8"))
hash_value = md.hexdigest()
try:
response = Request.put("%s/%s" % (MCMember.get_list_url(self.list_id), hash_value),
self.to_dict())
self._update(response.json())
return True
except Exception as e:
logger.error("Unable to save member")
raise e
| mit | 8,170,451,207,158,942,000 | 32.359649 | 120 | 0.611097 | false |
sondree/Master-thesis | Python PLOTS/batchRunner.py | 1 | 1407 |
from plotfitness_emitter import main as main_em
from plotfitness_receiver import main as main_recv
from plotfitness_direction import main as main_dir
from time import sleep
production = True
if production:
print "Warning running in production mode. This will take a long time"
sleep(1)
num_steps = [1,1]
else:
num_steps = [5,5]
def run_pltfit_emitter():
for radius in xrange(200,350,50):
main_em(1.5, 40, 3, radius, None, [1000,1000], num_steps, "PltFit Emitter UAV radius %s," % radius)
def run_pltfit_receiver():
for uav_count in xrange(2,6):
for noise_step in xrange(5):
main_recv(0.5 + noise_step, 40, uav_count, 400, None, [1000,1000], num_steps, "PltFit Receiver UAV count %s," % uav_count)
def high_res():
#main_em(1.5, 40, 3, 200, None, [1000,1000], [1,1], "PltFit Emitter UAV count %s," % 3)
for uav_count in xrange(2,6):
for noise_step in xrange(5):
if uav_count < 3 and noise_step < 3:
continue
main_recv(0.5 + noise_step, 40, uav_count, 400, None, [1000,1000], [1,1], "PltFit Receiver UAV count %s," % uav_count)
def run_pltfit_direction():
for uav_count in xrange(3,5):
main_dir(1.0, (670.0,670.0), uav_count, 1000, 3, 50, 1, 2, None, "PltFit Direction")
if __name__=="__main__":
run_pltfit_emitter()
#run_pltfit_receiver()
#run_pltfit_direction()
| gpl-3.0 | -3,059,308,023,333,114,400 | 32.5 | 134 | 0.625444 | false |
zstackio/zstack-woodpecker | integrationtest/vm/e2e_mini/vm/test_delete_resume_vm.py | 1 | 1268 | # -*- coding:utf-8 -*-
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
vm = test_lib.lib_get_specific_stub('e2e_mini/vm', 'vm')
vm_ops = None
vm_name = 'vm-' + vm.get_time_postfix()
def test():
global vm_ops
vm_ops = vm.VM()
vm_ops.create_vm(name=vm_name)
for view in ['card', 'list']:
# Delete button
vm_ops.delete_vm(vm_name, view=view)
# Resume button
vm_ops.resume(vm_name, 'vm', view=view)
# Delete via more operation
vm_ops.delete_vm(vm_name, view=view, corner_btn=False)
# Resume by more ops
vm_ops.resume(vm_name, 'vm', view=view, details_page=True)
# Delete via more operation in details page
vm_ops.delete_vm(vm_name, view=view, corner_btn=False, details_page=True)
# Resume button
vm_ops.resume(vm_name, 'vm', view=view)
vm_ops.check_browser_console_log()
test_util.test_pass('Delete Resume VM Test Successful')
def env_recover():
global vm_ops
vm_ops.expunge_vm(vm_name)
vm_ops.close()
#Will be called only if exception happens in test().
def error_cleanup():
global vm_ops
try:
vm_ops.expunge_vm(vm_name)
vm_ops.close()
except:
pass
| apache-2.0 | 5,973,048,780,601,433,000 | 25.978723 | 81 | 0.621451 | false |
edx/edx-enterprise | tests/test_enterprise/management/test_migrate_enterprise_catalogs.py | 1 | 5050 | # -*- coding: utf-8 -*-
"""
Tests for the djagno management command `create_enterprise_course_enrollments`.
"""
import mock
from pytest import mark, raises
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from test_utils.factories import EnterpriseCustomerCatalogFactory, EnterpriseCustomerFactory, UserFactory
@mark.django_db
class MigrateEnterpriseCatalogsCommandTests(TestCase):
"""
Test command `migrate_enterprise_catalogs`.
"""
command = 'migrate_enterprise_catalogs'
def setUp(self):
self.user = UserFactory.create(is_staff=True, is_active=True)
self.enterprise_customer = EnterpriseCustomerFactory(
name='Starfleet Academy',
)
self.enterprise_catalog = EnterpriseCustomerCatalogFactory(
enterprise_customer=self.enterprise_customer
)
super().setUp()
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.LOGGER')
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.EnterpriseCatalogApiClient')
def test_enterprise_catalogs_created_success(self, api_client_mock, logger_mock):
"""
Test that the command calls the enterprise catalog api to create each catalog successfully.
"""
api_client_mock.return_value = mock.MagicMock()
api_client_mock.return_value.get_enterprise_catalog.return_value = False
api_client_mock.return_value.create_enterprise_catalog = mock.MagicMock()
call_command(self.command, '--api_user', self.user.username)
api_client_mock.return_value.create_enterprise_catalog.assert_called()
logger_mock.info.assert_called_with(
'Successfully migrated Enterprise Catalog {}'.format(self.enterprise_catalog.uuid)
)
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.LOGGER')
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.EnterpriseCatalogApiClient')
def test_enterprise_catalogs_created_failure(self, api_client_mock, logger_mock):
"""
Test that the command catches errors that may occur while creating catalogs with the enterprise catalog api.
"""
api_client_mock.return_value = mock.MagicMock()
api_client_mock.return_value.get_enterprise_catalog.return_value = False
api_client_mock.return_value.create_enterprise_catalog = mock.MagicMock(side_effect=Exception)
call_command(self.command, '--api_user', self.user.username)
api_client_mock.return_value.create_enterprise_catalog.assert_called()
logger_mock.exception.assert_called_with(
'Failed to migrate enterprise catalog {}'.format(self.enterprise_catalog.uuid)
)
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.LOGGER')
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.EnterpriseCatalogApiClient')
def test_enterprise_catalogs_updated_success(self, api_client_mock, logger_mock):
"""
Test that the command calls the enterprise catalog api to update each catalog successfully.
"""
api_client_mock.return_value = mock.MagicMock()
api_client_mock.return_value.get_enterprise_catalog.return_value = True
api_client_mock.return_value.update_enterprise_catalog = mock.MagicMock()
call_command(self.command, '--api_user', self.user.username)
api_client_mock.return_value.update_enterprise_catalog.assert_called()
logger_mock.info.assert_called_with(
'Successfully migrated Enterprise Catalog {}'.format(self.enterprise_catalog.uuid)
)
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.LOGGER')
@mock.patch('enterprise.management.commands.migrate_enterprise_catalogs.EnterpriseCatalogApiClient')
def test_enterprise_catalogs_updated_failure(self, api_client_mock, logger_mock):
"""
Test that the command catches errors that may occur while updating catalogs with the enterprise catalog api.
"""
api_client_mock.return_value = mock.MagicMock()
api_client_mock.return_value.get_enterprise_catalog.return_value = True
api_client_mock.return_value.update_enterprise_catalog = mock.MagicMock(side_effect=Exception)
call_command(self.command, '--api_user', self.user.username)
api_client_mock.return_value.update_enterprise_catalog.assert_called()
logger_mock.exception.assert_called_with(
'Failed to migrate enterprise catalog {}'.format(self.enterprise_catalog.uuid)
)
def test_api_user_doesnt_exist(self):
"""
Test that the command fails when the provided user is invalid.
"""
error = 'A user with the username invalid was not found.'
with raises(CommandError) as excinfo:
call_command(self.command, '--api_user', 'invalid')
assert str(excinfo.value) == error
| agpl-3.0 | 6,437,641,796,574,643,000 | 47.557692 | 116 | 0.712079 | false |
Bitcoin-ABC/bitcoin-abc | test/functional/abc_p2p_avalanche_voting.py | 1 | 9419 | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the resolution of forks via avalanche."""
import random
from test_framework.avatools import (
get_ava_p2p_interface,
create_coinbase_stakes,
)
from test_framework.key import (
ECKey,
ECPubKey,
)
from test_framework.messages import AvalancheVote
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
)
BLOCK_ACCEPTED = 0
BLOCK_INVALID = 1
BLOCK_PARKED = 2
BLOCK_FORK = 3
BLOCK_UNKNOWN = -1
BLOCK_MISSING = -2
BLOCK_PENDING = -3
QUORUM_NODE_COUNT = 16
class AvalancheTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [
['-enableavalanche=1', '-avacooldown=0'],
['-enableavalanche=1', '-avacooldown=0', '-noparkdeepreorg', '-maxreorgdepth=-1']]
self.supports_cli = False
self.rpc_timeout = 120
def run_test(self):
node = self.nodes[0]
# Build a fake quorum of nodes.
def get_quorum():
return [get_ava_p2p_interface(node)
for _ in range(0, QUORUM_NODE_COUNT)]
# Pick on node from the quorum for polling.
quorum = get_quorum()
poll_node = quorum[0]
# Generate many block and poll for them.
addrkey0 = node.get_deterministic_priv_key()
blockhashes = node.generatetoaddress(100, addrkey0.address)
# Use the first coinbase to create a stake
stakes = create_coinbase_stakes(node, [blockhashes[0]], addrkey0.key)
fork_node = self.nodes[1]
# Make sure the fork node has synced the blocks
self.sync_blocks([node, fork_node])
# Get the key so we can verify signatures.
avakey = ECPubKey()
avakey.set(bytes.fromhex(node.getavalanchekey()))
self.log.info("Poll for the chain tip...")
best_block_hash = int(node.getbestblockhash(), 16)
poll_node.send_poll([best_block_hash])
def assert_response(expected):
response = poll_node.wait_for_avaresponse()
r = response.response
assert_equal(r.cooldown, 0)
# Verify signature.
assert avakey.verify_schnorr(response.sig, r.get_hash())
votes = r.votes
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
assert_response([AvalancheVote(BLOCK_ACCEPTED, best_block_hash)])
self.log.info("Poll for a selection of blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(1), 16),
int(node.getblockhash(10), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
int(node.getblockhash(96), 16),
int(node.getblockhash(99), 16),
int(node.getblockhash(100), 16),
]
poll_node.send_poll(various_block_hashes)
assert_response([AvalancheVote(BLOCK_ACCEPTED, h)
for h in various_block_hashes])
self.log.info(
"Poll for a selection of blocks, but some are now invalid...")
invalidated_block = node.getblockhash(76)
node.invalidateblock(invalidated_block)
# We need to send the coin to a new address in order to make sure we do
# not regenerate the same block.
node.generatetoaddress(
26, 'ecregtest:pqv2r67sgz3qumufap3h2uuj0zfmnzuv8v38gtrh5v')
node.reconsiderblock(invalidated_block)
poll_node.send_poll(various_block_hashes)
assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:5]] +
[AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[-3:]])
self.log.info("Poll for unknown blocks...")
various_block_hashes = [
int(node.getblockhash(0), 16),
int(node.getblockhash(25), 16),
int(node.getblockhash(42), 16),
various_block_hashes[5],
various_block_hashes[6],
various_block_hashes[7],
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
random.randrange(1 << 255, (1 << 256) - 1),
]
poll_node.send_poll(various_block_hashes)
assert_response([AvalancheVote(BLOCK_ACCEPTED, h) for h in various_block_hashes[:3]] +
[AvalancheVote(BLOCK_FORK, h) for h in various_block_hashes[3:6]] +
[AvalancheVote(BLOCK_UNKNOWN, h) for h in various_block_hashes[-3:]])
self.log.info("Trigger polling from the node...")
# duplicate the deterministic sig test from src/test/key_tests.cpp
privkey = ECKey()
privkey.set(bytes.fromhex(
"12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"), True)
pubkey = privkey.get_pubkey()
proof_sequence = 11
proof_expiration = 12
proof = node.buildavalancheproof(
proof_sequence, proof_expiration, pubkey.get_bytes().hex(),
stakes)
# Activate the quorum.
for n in quorum:
success = node.addavalanchenode(
n.nodeid, pubkey.get_bytes().hex(), proof)
assert success is True
self.log.info("Testing getavalanchepeerinfo...")
avapeerinfo = node.getavalanchepeerinfo()
# There is a single peer because all nodes share the same proof.
assert_equal(len(avapeerinfo), 1)
assert_equal(avapeerinfo[0]["peerid"], 0)
assert_equal(avapeerinfo[0]["nodecount"], len(quorum))
# The first avalanche node index is 1, because 0 is self.nodes[1].
assert_equal(sorted(avapeerinfo[0]["nodes"]),
list(range(1, QUORUM_NODE_COUNT + 1)))
assert_equal(avapeerinfo[0]["proof"], proof)
def can_find_block_in_poll(hash, resp=BLOCK_ACCEPTED):
found_hash = False
for n in quorum:
poll = n.get_avapoll_if_available()
# That node has not received a poll
if poll is None:
continue
# We got a poll, check for the hash and repond
votes = []
for inv in poll.invs:
# Vote yes to everything
r = BLOCK_ACCEPTED
# Look for what we expect
if inv.hash == hash:
r = resp
found_hash = True
votes.append(AvalancheVote(r, inv.hash))
n.send_avaresponse(poll.round, votes, privkey)
return found_hash
# Now that we have a peer, we should start polling for the tip.
hash_tip = int(node.getbestblockhash(), 16)
wait_until(lambda: can_find_block_in_poll(hash_tip), timeout=5)
# Make sure the fork node has synced the blocks
self.sync_blocks([node, fork_node])
# Create a fork 2 blocks deep. This should trigger polling.
fork_node.invalidateblock(fork_node.getblockhash(100))
fork_address = fork_node.get_deterministic_priv_key().address
fork_node.generatetoaddress(2, fork_address)
# Because the new tip is a deep reorg, the node will not accept it
# right away, but poll for it.
def parked_block(blockhash):
for tip in node.getchaintips():
if tip["hash"] == blockhash:
assert tip["status"] != "active"
return tip["status"] == "parked"
return False
fork_tip = fork_node.getbestblockhash()
wait_until(lambda: parked_block(fork_tip))
self.log.info("Answer all polls to finalize...")
hash_to_find = int(fork_tip, 16)
def has_accepted_new_tip():
can_find_block_in_poll(hash_to_find)
return node.getbestblockhash() == fork_tip
# Because everybody answers yes, the node will accept that block.
wait_until(has_accepted_new_tip, timeout=15)
assert_equal(node.getbestblockhash(), fork_tip)
self.log.info("Answer all polls to park...")
node.generate(1)
tip_to_park = node.getbestblockhash()
hash_to_find = int(tip_to_park, 16)
assert(tip_to_park != fork_tip)
def has_parked_new_tip():
can_find_block_in_poll(hash_to_find, BLOCK_PARKED)
return node.getbestblockhash() == fork_tip
# Because everybody answers no, the node will park that block.
wait_until(has_parked_new_tip, timeout=15)
assert_equal(node.getbestblockhash(), fork_tip)
self.log.info(
"Check the node is discouraging unexpected avaresponses.")
with node.assert_debug_log(
['Misbehaving', 'peer=1 (0 -> 2): unexpected-ava-response']):
# unknown voting round
poll_node.send_avaresponse(
round=2**32 - 1, votes=[], privkey=privkey)
if __name__ == '__main__':
AvalancheTest().main()
| mit | -3,139,993,200,430,624,300 | 36.229249 | 94 | 0.591783 | false |
code-sauce/tensorflow | tensorflow/contrib/distributions/python/ops/kullback_leibler.py | 1 | 5019 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration and usage mechanisms for KL-divergences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
_DIVERGENCES = {}
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = inspect.getmro(type_a)
hierarchy_b = inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn
def kl(dist_a, dist_b, allow_nan=False, name=None):
"""Get the KL-divergence KL(dist_a || dist_b).
If there is no KL method registered specifically for `type(dist_a)` and
`type(dist_b)`, then the class hierarchies of these types are searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(dist_a)`).
Args:
dist_a: The first distribution.
dist_b: The second distribution.
allow_nan: If `False` (default), a runtime error is raised
if the KL returns NaN values for any batch entry of the given
distributions. If `True`, the KL may return a NaN for the given entry.
name: (optional) Name scope to use for created operations.
Returns:
A Tensor with the batchwise KL-divergence between dist_a and dist_b.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of dist_a and dist_b.
"""
kl_fn = _registered_kl(type(dist_a), type(dist_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
"type %s" % (type(dist_a).__name__, type(dist_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(dist_a, dist_b, name=name)
if allow_nan:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan=False). Values:"
% (dist_a.name, dist_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
class RegisterKL(object):
"""Decorator to register a KL divergence implementation function.
Usage:
@distributions.RegisterKL(distributions.Normal, distributions.Normal)
def _kl_normal_mvn(norm_a, norm_b):
# Return KL(norm_a || norm_b)
"""
def __init__(self, dist_cls_a, dist_cls_b):
"""Initialize the KL registrar.
Args:
dist_cls_a: the class of the first argument of the KL divergence.
dist_cls_b: the class of the second argument of the KL divergence.
"""
self._key = (dist_cls_a, dist_cls_b)
def __call__(self, kl_fn):
"""Perform the KL registration.
Args:
kl_fn: The function to use for the KL divergence.
Returns:
kl_fn
Raises:
TypeError: if kl_fn is not a callable.
ValueError: if a KL divergence function has already been registered for
the given argument classes.
"""
if not callable(kl_fn):
raise TypeError("kl_fn must be callable, received: %s" % kl_fn)
if self._key in _DIVERGENCES:
raise ValueError("KL(%s || %s) has already been registered to: %s"
% (self._key[0].__name__, self._key[1].__name__,
_DIVERGENCES[self._key]))
_DIVERGENCES[self._key] = kl_fn
return kl_fn
| apache-2.0 | 2,924,097,993,749,842,400 | 34.097902 | 80 | 0.662483 | false |
kirstymcnaught/SpecialEffectMinecraftMods | src/gen_toml.py | 1 | 1831 |
import subprocess, re, shutil
## Grep all the files for MODID
cmd="find . -iname '*.java' | xargs grep 'MODID = ' -h"
regex = "\"(.*)\""
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, shell=True)
lines = result.stdout.decode("utf-8").split("\r\n")
mod_ids = []
for line in lines:
match = re.search(regex, line)
if match:
mod_id = match.group(1)
mod_ids.append(mod_id)
print(mod_id)
# Copy base file
fname_base = 'main/resources/META-INF/mods.toml.base'
fname_final = 'main/resources/META-INF/mods.toml'
shutil.copyfile(fname_base, fname_final)
author='Kirsty McNaught'
url = 'https://www.specialeffect.org.uk/eyemine'
# Append mod details
with open(fname_final, "a") as myfile:
for mod_id in mod_ids:
myfile.write('[[mods]]\n')
myfile.write('modId=\"{}\"\n'.format(mod_id))
myfile.write('version=\"${file.jarVersion}\"\n')
myfile.write('displayName=\"{}\"\n'.format(mod_id)) # TODO: nicer display names
myfile.write('displayURL=\"{}\"\n'.format(url))
myfile.write('authors=\"{}\"\n'.format(author))
# myfile.write('\n')
myfile.write('[[dependencies.{}]]\n'.format(mod_id))
myfile.write('\tmodId="forge"\n')
myfile.write('\tmandatory=true\n')
myfile.write('\tversionRange="[25,)"\n')
myfile.write('\tordering="NONE"\n')
myfile.write('\tside="BOTH"\n') # TODO: maybe client only??
# Here's another dependency
myfile.write('[[dependencies.{}]]\n'.format(mod_id))
myfile.write('\tmodId="minecraft"\n')
myfile.write('\tmandatory=true\n')
myfile.write('\tversionRange="[1.14.4]"\n')
myfile.write('\tordering="NONE"\n')
myfile.write('\tside="BOTH"\n')
myfile.write('\n')
| gpl-3.0 | 8,918,407,722,003,720,000 | 29.032787 | 87 | 0.59148 | false |
ericchill/gnofract4d | fract4dgui/painter.py | 1 | 1313 | # GUI for painting colors onto the fractal
import gtk
import dialog
import browser
import utils
def show(parent,f):
PainterDialog.show(parent,f)
class PainterDialog(dialog.T):
def show(parent, f):
dialog.T.reveal(PainterDialog, True, parent, None, f)
show = staticmethod(show)
def __init__(self,main_window,f):
dialog.T.__init__(
self,
_("Painter"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.main_window = main_window
self.f = f
self.paint_toggle = gtk.ToggleButton(_("Painting"))
self.paint_toggle.set_active(True)
self.paint_toggle.connect('toggled',self.onChangePaintMode)
self.csel = gtk.ColorSelection()
self.vbox.add(self.csel)
self.vbox.add(self.paint_toggle)
self.vbox.show_all()
self.onChangePaintMode()
def onChangePaintMode(self,*args):
self.f.set_paint_mode(self.paint_toggle.get_active(), self.csel)
def onResponse(self,widget,id):
if id == gtk.RESPONSE_CLOSE or \
id == gtk.RESPONSE_NONE or \
id == gtk.RESPONSE_DELETE_EVENT:
self.hide()
self.f.set_paint_mode(False,None)
| bsd-3-clause | -2,438,796,135,652,036,600 | 28.177778 | 72 | 0.593298 | false |
kubernetes-client/python | kubernetes/client/configuration.py | 1 | 13238 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class Configuration(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param host: Base url
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer)
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication
:param password: Password for HTTP basic authentication
:param discard_unknown_keys: Boolean value indicating whether to discard
unknown properties. A server may send a response that includes additional
properties that are not known by the client in the following scenarios:
1. The OpenAPI document is incomplete, i.e. it does not match the server
implementation.
2. The client was generated using an older version of the OpenAPI document
and the server has been upgraded since then.
If a schema in the OpenAPI document defines the additionalProperties attribute,
then all undeclared properties received by the server are injected into the
additional properties map. In that case, there are undeclared properties, and
nothing to discard.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = client.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(self, host="http://localhost",
api_key=None, api_key_prefix=None,
username=None, password=None,
discard_unknown_keys=False,
):
"""Constructor
"""
self.host = host
"""Default Base url
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.discard_unknown_keys = discard_unknown_keys
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = None
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Disable client side validation
self.client_side_validation = True
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = copy.deepcopy(default)
@classmethod
def get_default_copy(cls):
"""Return new instance of configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration passed by the set_default method.
:return: The configuration object.
"""
if cls._default is not None:
return copy.deepcopy(cls._default)
return Configuration()
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if 'authorization' in self.api_key:
auth['BearerToken'] = {
'type': 'api_key',
'in': 'header',
'key': 'authorization',
'value': self.get_api_key_with_prefix('authorization')
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: release-1.18\n"\
"SDK Package Version: 18.0.0-snapshot".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "/",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:return: URL based on host settings
"""
variables = {} if variables is None else variables
servers = self.get_host_settings()
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server['variables'].items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
| apache-2.0 | 6,432,329,210,058,188,000 | 32.261307 | 124 | 0.585662 | false |
Szkered/BC_2402 | sydb/stocks/admin.py | 1 | 1758 | from django.contrib import admin
from stocks.models import *
class StockAdmin(admin.ModelAdmin):
list_display = ['name', 'unit_measure', 'unit_price', 'category_slug']
ordering = ['name']
search_fields = ('name',)
class DonorAdmin(admin.ModelAdmin):
list_display = ['name', 'contact_no', 'address', 'referral', 'mailing']
ordering = ['name']
class DonateAdmin(admin.ModelAdmin):
list_display = ['stock', 'quantity']
ordering = ['stock']
class DonationAdmin(admin.ModelAdmin):
list_display = ['date', 'donor']
ordering = ['date']
class DestinationAdmin(admin.ModelAdmin):
pass
class VendorAdmin(admin.ModelAdmin):
list_display = ['name', 'contact_no', 'address']
ordering = ['name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'stock']
ordering = ['stock']
class DistributeAdmin(admin.ModelAdmin):
list_display = ['quantity', 'stock', 'family_type', 'date']
ordering = ['stock']
class PurchaseInline(admin.StackedInline):
model = Purchase
extra = 0
class OrderAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['vendor', 'confirm']}),
('Date Info', {'fields': ['date'], 'classes' : ['collapse']}),
]
inlines = [PurchaseInline]
admin.site.register(Stock, StockAdmin)
admin.site.register(Donor, DonorAdmin)
admin.site.register(Destination, DestinationAdmin)
admin.site.register(Vendor, VendorAdmin)
admin.site.register(Donate, DonateAdmin)
admin.site.register(Donation, DonationAdmin)
# admin.site.register(Purchase, PurchaseAdmin)
admin.site.register(Distribute, DistributeAdmin)
admin.site.register(Transfer)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Order, OrderAdmin)
| apache-2.0 | 4,979,858,879,367,325,000 | 27.354839 | 75 | 0.6843 | false |
Lukasa/urllib3 | test/contrib/test_pyopenssl_dependencies.py | 1 | 1662 | # -*- coding: utf-8 -*-
import unittest
from nose.plugins.skip import SkipTest
try:
from urllib3.contrib.pyopenssl import (inject_into_urllib3,
extract_from_urllib3)
except ImportError as e:
raise SkipTest('Could not import PyOpenSSL: %r' % e)
from mock import patch, Mock
class TestPyOpenSSLInjection(unittest.TestCase):
"""
Tests for error handling in pyopenssl's 'inject_into urllib3'
"""
def test_inject_validate_fail_cryptography(self):
"""
Injection should not be supported if cryptography is too old.
"""
try:
with patch("cryptography.x509.extensions.Extensions") as mock:
del mock.get_extension_for_class
self.assertRaises(ImportError, inject_into_urllib3)
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
def test_inject_validate_fail_pyopenssl(self):
"""
Injection should not be supported if pyOpenSSL is too old.
"""
try:
return_val = Mock()
del return_val._x509
with patch("OpenSSL.crypto.X509", return_value=return_val):
self.assertRaises(ImportError, inject_into_urllib3)
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
| mit | -2,780,362,765,998,670,300 | 35.130435 | 74 | 0.608303 | false |
mkollaro/destroystack | bin/packstack_deploy.py | 1 | 7940 | #!/usr/bin/env python
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import destroystack.tools.common as common
import destroystack.tools.server_manager as server_manager
import destroystack.tools.servers as server_tools
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
logging.getLogger("paramiko").setLevel(logging.WARNING)
# local server object on which packstack will be run
LOCALHOST = None
# packages that will be checked for on local host
REQUIRED_PACKAGES = ['openstack-packstack', 'openstack-utils',
'python-novaclient']
# password that will be set to services (like database)
DEFAULT_SERVICE_PASS = "123456"
PACKSTACK_DEFAULT_OPTIONS = {
"CONFIG_GLANCE_INSTALL": "n",
"CONFIG_CINDER_INSTALL": "n",
"CONFIG_NOVA_INSTALL": "n",
"CONFIG_QUANTUM_INSTALL": "n",
"CONFIG_NEUTRON_INSTALL": "n",
"CONFIG_HORIZON_INSTALL": "n",
"CONFIG_SWIFT_INSTALL": "n",
"CONFIG_CEILOMETER_INSTALL": "n",
"CONFIG_NAGIOS_INSTALL": "n",
"CONFIG_CLIENT_INSTALL": "y",
"CONFIG_SWIFT_STORAGE_ZONES": "1",
"CONFIG_SWIFT_STORAGE_REPLICAS": "3",
"CONFIG_SWIFT_STORAGE_FSTYPE": "ext4",
"CONFIG_PROVISION_TEMPEST": "n",
"CONFIG_PROVISION_DEMO": "n",
"CONFIG_KEYSTONE_ADMIN_PW": DEFAULT_SERVICE_PASS,
"CONFIG_NOVA_NETWORK_PUBIF": "eth0",
"CONFIG_NOVA_COMPUTE_PRIVIF": "lo",
"CONFIG_NOVA_NETWORK_PRIVIF": "lo",
}
# packstack answerfile that will be created locally
ANSWERFILE = 'packstack.answer'
def main():
global LOCALHOST
LOCALHOST = server_tools.LocalServer()
install_packages(REQUIRED_PACKAGES)
create_configuration()
deploy()
def install_packages(packages):
packages = ' '.join(packages)
LOCALHOST.cmd('yum install -y %s' % packages, log_output=True)
def create_configuration():
"""Using the server roles in the config file, create a packstack answerfile
"""
packstack_answers = copy.copy(PACKSTACK_DEFAULT_OPTIONS)
manager = server_manager.ServerManager()
_configure_roles(packstack_answers, manager)
_configure_keystone(packstack_answers, manager)
_configure_swift(packstack_answers, manager)
_create_packstack_answerfile(packstack_answers)
def deploy():
"""Run Packstack and configure components if necessary
"""
manager = server_manager.ServerManager()
LOG.info("Running packstack, this may take a while")
LOCALHOST.cmd("packstack --answer-file=%s" % ANSWERFILE,
collect_stdout=False)
data_servers = list(manager.servers(role='swift_data'))
_set_swift_mount_check(data_servers)
_set_iptables(manager)
def get_ips(host_list):
"""Return string 'address,address,address' from IPs in the host list."""
return ','.join([x.ip for x in host_list])
def _configure_roles(packstack_opt, manager):
compute = manager.get_all(role='compute')
if compute:
packstack_opt["CONFIG_COMPUTE_HOSTS"] = get_ips(compute)
packstack_opt["CONFIG_NOVA_COMPUTE_HOSTS"] = get_ips(compute)
packstack_opt["CONFIG_NOVA_INSTALL"] = "y"
packstack_opt["CONFIG_GLANCE_INSTALL"] = "y"
packstack_opt["CONFIG_CINDER_INSTALL"] = "y"
def _configure_keystone(packstack_opt, manager):
keystone = manager.get_all(role='keystone')
if keystone:
packstack_opt["CONFIG_KEYSTONE_HOST"] = get_ips(keystone)
user = common.CONFIG["keystone"].get("user", "admin")
if user != "admin":
raise Exception("This helper script assumes that you are using the"
" 'admin' keystone user")
password = common.CONFIG["keystone"].get("password", DEFAULT_SERVICE_PASS)
packstack_opt["CONFIG_KEYSTONE_ADMIN_PW"] = password
def _configure_swift(packstack_opt, manager):
"""Add Swift proxy/data servers to packstack answerfile.
Also formats the extra disks provided to the data servers.
"""
proxy_servers = manager.servers(role='swift_proxy')
data_servers = list(manager.servers(role='swift_data'))
if not (proxy_servers and data_servers):
return
data_nodes = server_tools.prepare_swift_disks(data_servers)
packstack_opt["CONFIG_SWIFT_INSTALL"] = "y"
packstack_opt["CONFIG_SWIFT_PROXY_HOSTS"] = get_ips(proxy_servers)
packstack_opt["CONFIG_SWIFT_STORAGE_HOSTS"] = ",".join(data_nodes)
def _get_default_host():
"""Get one of the hosts that will be defaultly used by services.
This is usually the host with the role 'controller' (one of them is
selected), but if there is no such role specified, use the 'keystone' role.
If even that is unavailable, just choose the first host provided.
"""
manager = server_manager.ServerManager()
controller = manager.get(role='controller')
keystone = manager.get(role='keystone')
return controller or keystone or manager.get()
def _set_default_host_in_answerfile():
"""Set all the hosts in the answerfile to the default host (controller).
Packstack by default creates an answerfile that uses localhost for all
services, but since we usually run Packstack from a separate server that
isn't supposed to have OpenStack installed, it is better to choose one from
the servers given in the config. The exception is the server on which
OpenStack clients should be installed, which will remain the same
(localhost).
"""
# save the original host to which OpenStack clients should be installed
res = LOCALHOST.cmd(
"openstack-config --get %s general CONFIG_OSCLIENT_HOST" % ANSWERFILE)
original_client_host = ''.join(res.out)
default_host = _get_default_host().ip
LOCALHOST.cmd("sed -ri 's/HOST(S?)\w*=.*/HOST\\1=%s/' %s"
% (default_host, ANSWERFILE))
# restore host for client installation
LOCALHOST.cmd("openstack-config --set %s general CONFIG_OSCLIENT_HOST %s"
% (ANSWERFILE, original_client_host))
def _create_packstack_answerfile(answers):
if not LOCALHOST.file_exists(ANSWERFILE):
LOCALHOST.cmd("packstack --gen-answer-file=%s" % ANSWERFILE)
_set_default_host_in_answerfile()
else:
LOG.info("Reusing existing packstack answer file")
for question, answer in answers.iteritems():
LOCALHOST.cmd("openstack-config --set %s general %s %s"
% (ANSWERFILE, question, answer))
def _set_swift_mount_check(data_servers):
"""Set the parameter mount_check to True in /etc/swift/*-server.conf
If this is not checked True, Swift will replicate files onto the
system disk if the disk is umounted.
"""
for server in data_servers:
server.cmd("""
sed -i -e 's/mount_check.*=.*false/mount_check = true/' \
/etc/swift/*-server.conf""")
for server in data_servers:
server.cmd("swift-init account container object rest restart")
def _set_iptables(manager):
"""Allow all incoming traffic to the OpenStack nodes from local IP."""
ip = _get_localhost_ip()
if not ip:
# since this functionality might not be necessary, just give up
return
for server in manager.get_all():
server.cmd("iptables -I INPUT -s %s -j ACCEPT" % ip)
server.cmd("service iptables save")
def _get_localhost_ip():
return ''.join(LOCALHOST.cmd("hostname --ip-address").out)
if __name__ == '__main__':
main()
| apache-2.0 | -2,189,798,833,146,099,000 | 34.288889 | 79 | 0.68073 | false |
centricular/cerbero | cerbero/commands/check.py | 1 | 2640 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.build.cookbook import CookBook
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
class Check(Command):
doc = N_('Run checks on a given recipe')
name = 'check'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('recipe', nargs=1,
help=_('name of the recipe to run checks on')),
ArgparseArgument('--recursive', action='store_true', default=False,
help=_('Recursively run checks on dependencies')),
])
def run(self, config, args):
cookbook = CookBook(config)
recipe_name = args.recipe[0]
recursive = args.recursive
recipe = cookbook.get_recipe(recipe_name)
if recursive:
ordered_recipes = cookbook.list_recipe_deps(recipe_name)
else:
ordered_recipes = [recipe]
for recipe in ordered_recipes:
if cookbook.recipe_needs_build(recipe.name):
raise FatalError(_("Recipe %s is not built yet" % recipe.name))
for recipe in ordered_recipes:
# call step function
stepfunc = None
try:
stepfunc = getattr(recipe, 'check')
except:
m.message('%s has no check step, skipped' % recipe.name)
if stepfunc:
try:
stepfunc()
except FatalError as e:
raise e
except Exception as ex:
raise FatalError(_("Error running %s checks: %s") %
(recipe.name, ex))
register_command(Check)
| lgpl-2.1 | -215,881,122,121,253,340 | 35.666667 | 79 | 0.623864 | false |
Teagan42/home-assistant | homeassistant/components/netatmo/binary_sensor.py | 1 | 6522 | """Support for the Netatmo binary sensors."""
import logging
import pyatmo
from homeassistant.components.binary_sensor import BinarySensorDevice
from .camera import CameraData
from .const import AUTH, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion",
}
TAG_SENSOR_TYPES = {"Tag Vibration": "vibration", "Tag Open": "opening"}
SENSOR_TYPES = {"NACamera": WELCOME_SENSOR_TYPES, "NOC": PRESENCE_SENSOR_TYPES}
CONF_HOME = "home"
CONF_CAMERAS = "cameras"
CONF_WELCOME_SENSORS = "welcome_sensors"
CONF_PRESENCE_SENSORS = "presence_sensors"
CONF_TAG_SENSORS = "tag_sensors"
DEFAULT_TIMEOUT = 90
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the access to Netatmo binary sensor."""
auth = hass.data[DOMAIN][entry.entry_id][AUTH]
def get_entities():
"""Retrieve Netatmo entities."""
entities = []
def get_camera_home_id(data, camera_id):
"""Return the home id for a given camera id."""
for home_id in data.camera_data.cameras:
for camera in data.camera_data.cameras[home_id].values():
if camera["id"] == camera_id:
return home_id
return None
try:
data = CameraData(hass, auth)
for camera in data.get_all_cameras():
home_id = get_camera_home_id(data, camera_id=camera["id"])
sensor_types = {}
sensor_types.update(SENSOR_TYPES[camera["type"]])
# Tags are only supported with Netatmo Welcome indoor cameras
if camera["type"] == "NACamera" and data.get_modules(camera["id"]):
sensor_types.update(TAG_SENSOR_TYPES)
for sensor_name in sensor_types:
entities.append(
NetatmoBinarySensor(data, camera["id"], home_id, sensor_name)
)
except pyatmo.NoDevice:
_LOGGER.debug("No camera entities to add")
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(self, data, camera_id, home_id, sensor_type, module_id=None):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_id = camera_id
self._module_id = module_id
self._sensor_type = sensor_type
camera_info = data.camera_data.cameraById(cid=camera_id)
self._camera_name = camera_info["name"]
self._camera_type = camera_info["type"]
self._home_id = home_id
self._home_name = self._data.camera_data.getHomeName(home_id=home_id)
self._timeout = DEFAULT_TIMEOUT
if module_id:
self._module_name = data.camera_data.moduleById(mid=module_id)["name"]
self._name = (
f"{MANUFACTURER} {self._camera_name} {self._module_name} {sensor_type}"
)
self._unique_id = (
f"{self._camera_id}-{self._module_id}-"
f"{self._camera_type}-{sensor_type}"
)
else:
self._name = f"{MANUFACTURER} {self._camera_name} {sensor_type}"
self._unique_id = f"{self._camera_id}-{self._camera_type}-{sensor_type}"
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def device_info(self):
"""Return the device info for the sensor."""
return {
"identifiers": {(DOMAIN, self._camera_id)},
"name": self._camera_name,
"manufacturer": MANUFACTURER,
"model": self._camera_type,
}
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event(camera_type=self._camera_type)
if self._camera_type == "NACamera":
if self._sensor_type == "Someone known":
self._state = self._data.camera_data.someone_known_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Someone unknown":
self._state = self._data.camera_data.someone_unknown_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Motion":
self._state = self._data.camera_data.motion_detected(
cid=self._camera_id, exclude=self._timeout
)
elif self._camera_type == "NOC":
if self._sensor_type == "Outdoor motion":
self._state = self._data.camera_data.outdoor_motion_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor human":
self._state = self._data.camera_data.human_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor animal":
self._state = self._data.camera_data.animal_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor vehicle":
self._state = self._data.camera_data.car_detected(
cid=self._camera_id, offset=self._timeout
)
if self._sensor_type == "Tag Vibration":
self._state = self._data.camera_data.module_motion_detected(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Tag Open":
self._state = self._data.camera_data.module_opened(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
| apache-2.0 | -1,740,085,250,344,084,000 | 36.268571 | 87 | 0.568997 | false |
dksr/REMIND | python/base/utils/LoggerManager.py | 1 | 3182 | #!/usr/bin/env python
import logging
import logging.handlers
from Singleton import Singleton
import os
LOGPATH = '/tmp'
class LoggerManager(Singleton):
def __init__(self):
self.loggers = {}
formatter = logging.Formatter('%(asctime)s:%(levelname)-8s:%(name)-10s:%(lineno)4s: %(message)-80s')
level = 'DEBUG'
nlevel = getattr(logging, level, None)
if nlevel != None:
self.LOGGING_MODE = nlevel
else:
self.LOGGING_MODE = logging.DEBUG
self.LOGGING_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH, 'log_event.log'),'a',0, 10)
self.LOGGING_HANDLER.doRollover()
self.ERROR_HANDLER = logging.handlers.RotatingFileHandler(
os.path.join(LOGPATH,'log_error.log'),'a',0, 10)
self.ERROR_HANDLER.doRollover()
self.LOGGING_HANDLER.setFormatter(formatter)
self.LOGGING_HANDLER.setLevel(self.LOGGING_MODE)
def getLogger(self, loggername):
if not self.loggers.has_key(loggername):
logger = Logger(loggername,
logging_handler= self.LOGGING_HANDLER,
error_handler = self.ERROR_HANDLER,
logging_mode = self.LOGGING_MODE)
self.loggers[loggername] = logger
return self.loggers[loggername]
class Logger:
'''
Implements the christine logging facility.
'''
def __init__(self, loggername, type = 'event', logging_handler= '', error_handler = '', logging_mode = ''):
'''
Constructor, construye una clase de logger.
@param loggername: Nombre que el logger tendra.
@param type: Tipo de logger. Los valores disponibles son : event y error
por defecto apunta a event. En caso de utilizarse otro
que no sea event o error se apuntara a event.
'''
# Creating two logger, one for the info, debug and warnings and
#other for errors, criticals and exceptions
self.__Logger = logging.getLogger(loggername)
self.__ErrorLogger = logging.getLogger('Error'+ loggername)
# Setting Logger properties
self.__Logger.addHandler(logging_handler)
self.__Logger.setLevel(logging_mode)
self.__ErrorLogger.addHandler(error_handler)
self.__ErrorLogger.setLevel(logging_mode)
self.info = self.__Logger.info
self.debug = self.__Logger.debug
self.warning = self.__Logger.warning
self.critical = self.__ErrorLogger.critical
self.error = self.__ErrorLogger.error
self.exception = self.__ErrorLogger.exception
| mit | -1,496,373,961,397,045,000 | 48.734375 | 116 | 0.52137 | false |
trevor/calendarserver | txweb2/server.py | 1 | 26937 | # -*- test-case-name: txweb2.test.test_server -*-
##
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import print_function
import cgi, time, urlparse
from urllib import quote, unquote
from urlparse import urlsplit
import weakref
from zope.interface import implements
from twisted.internet import defer
from twisted.python import failure
from twext.python.log import Logger
from txweb2 import http, iweb, fileupload, responsecode
from txweb2 import http_headers
from txweb2.filter.range import rangefilter
from txweb2 import error
from txweb2 import __version__ as web2_version
from twisted import __version__ as twisted_version
VERSION = "Twisted/%s TwistedWeb/%s" % (twisted_version, web2_version)
_errorMarker = object()
log = Logger()
def defaultHeadersFilter(request, response):
if not response.headers.hasHeader('server'):
response.headers.setHeader('server', VERSION)
if not response.headers.hasHeader('date'):
response.headers.setHeader('date', time.time())
return response
defaultHeadersFilter.handleErrors = True
def preconditionfilter(request, response):
if request.method in ("GET", "HEAD"):
http.checkPreconditions(request, response)
return response
def doTrace(request):
request = iweb.IRequest(request)
txt = "%s %s HTTP/%d.%d\r\n" % (request.method, request.uri,
request.clientproto[0], request.clientproto[1])
l=[]
for name, valuelist in request.headers.getAllRawHeaders():
for value in valuelist:
l.append("%s: %s\r\n" % (name, value))
txt += ''.join(l)
return http.Response(
responsecode.OK,
{'content-type': http_headers.MimeType('message', 'http')},
txt)
def parsePOSTData(request, maxMem=100*1024, maxFields=1024,
maxSize=10*1024*1024):
"""
Parse data of a POST request.
@param request: the request to parse.
@type request: L{txweb2.http.Request}.
@param maxMem: maximum memory used during the parsing of the data.
@type maxMem: C{int}
@param maxFields: maximum number of form fields allowed.
@type maxFields: C{int}
@param maxSize: maximum size of file upload allowed.
@type maxSize: C{int}
@return: a deferred that will fire when the parsing is done. The deferred
itself doesn't hold a return value, the request is modified directly.
@rtype: C{defer.Deferred}
"""
if request.stream.length == 0:
return defer.succeed(None)
ctype = request.headers.getHeader('content-type')
if ctype is None:
return defer.succeed(None)
def updateArgs(data):
args = data
request.args.update(args)
def updateArgsAndFiles(data):
args, files = data
request.args.update(args)
request.files.update(files)
def error(f):
f.trap(fileupload.MimeFormatError)
raise http.HTTPError(
http.StatusResponse(responsecode.BAD_REQUEST, str(f.value)))
if (ctype.mediaType == 'application'
and ctype.mediaSubtype == 'x-www-form-urlencoded'):
d = fileupload.parse_urlencoded(request.stream)
d.addCallbacks(updateArgs, error)
return d
elif (ctype.mediaType == 'multipart'
and ctype.mediaSubtype == 'form-data'):
boundary = ctype.params.get('boundary')
if boundary is None:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Boundary not specified in Content-Type.")))
d = fileupload.parseMultipartFormData(request.stream, boundary,
maxMem, maxFields, maxSize)
d.addCallbacks(updateArgsAndFiles, error)
return d
else:
return defer.fail(http.HTTPError(
http.StatusResponse(
responsecode.BAD_REQUEST,
"Invalid content-type: %s/%s" % (
ctype.mediaType, ctype.mediaSubtype))))
class StopTraversal(object):
"""
Indicates to Request._handleSegment that it should stop handling
path segments.
"""
pass
class Request(http.Request):
"""
vars:
site
remoteAddr
scheme
host
port
path
params
querystring
args
files
prepath
postpath
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
"""
implements(iweb.IRequest)
site = None
_initialprepath = None
responseFilters = [rangefilter, preconditionfilter,
error.defaultErrorHandler, defaultHeadersFilter]
def __init__(self, *args, **kw):
self.timeStamps = [("t", time.time(),)]
if kw.has_key('site'):
self.site = kw['site']
del kw['site']
if kw.has_key('prepathuri'):
self._initialprepath = kw['prepathuri']
del kw['prepathuri']
self._resourcesByURL = {}
self._urlsByResource = {}
# Copy response filters from the class
self.responseFilters = self.responseFilters[:]
self.files = {}
self.resources = []
http.Request.__init__(self, *args, **kw)
try:
self.serverInstance = self.chanRequest.channel.transport.server.port
except AttributeError:
self.serverInstance = "Unknown"
def timeStamp(self, tag):
self.timeStamps.append((tag, time.time(),))
def addResponseFilter(self, filter, atEnd=False, onlyOnce=False):
"""
Add a response filter to this request.
Response filters are applied to the response to this request in order.
@param filter: a callable which takes an response argument and returns
a response object.
@param atEnd: if C{True}, C{filter} is added at the end of the list of
response filters; if C{False}, it is added to the beginning.
@param onlyOnce: if C{True}, C{filter} is not added to the list of
response filters if it already in the list.
"""
if onlyOnce and filter in self.responseFilters:
return
if atEnd:
self.responseFilters.append(filter)
else:
self.responseFilters.insert(0, filter)
def unparseURL(self, scheme=None, host=None, port=None,
path=None, params=None, querystring=None, fragment=None):
"""Turn the request path into a url string. For any pieces of
the url that are not specified, use the value from the
request. The arguments have the same meaning as the same named
attributes of Request."""
if scheme is None: scheme = self.scheme
if host is None: host = self.host
if port is None: port = self.port
if path is None: path = self.path
if params is None: params = self.params
if querystring is None: querystring = self.querystring
if fragment is None: fragment = ''
if port == http.defaultPortForScheme.get(scheme, 0):
hostport = host
else:
hostport = host + ':' + str(port)
return urlparse.urlunparse((
scheme, hostport, path,
params, querystring, fragment))
def _parseURL(self):
if self.uri[0] == '/':
# Can't use urlparse for request_uri because urlparse
# wants to be given an absolute or relative URI, not just
# an abs_path, and thus gets '//foo' wrong.
self.scheme = self.host = self.path = self.params = self.querystring = ''
if '?' in self.uri:
self.path, self.querystring = self.uri.split('?', 1)
else:
self.path = self.uri
if ';' in self.path:
self.path, self.params = self.path.split(';', 1)
else:
# It is an absolute uri, use standard urlparse
(self.scheme, self.host, self.path,
self.params, self.querystring, fragment) = urlparse.urlparse(self.uri)
if self.querystring:
self.args = cgi.parse_qs(self.querystring, True)
else:
self.args = {}
path = map(unquote, self.path[1:].split('/'))
if self._initialprepath:
# We were given an initial prepath -- this is for supporting
# CGI-ish applications where part of the path has already
# been processed
prepath = map(unquote, self._initialprepath[1:].split('/'))
if path[:len(prepath)] == prepath:
self.prepath = prepath
self.postpath = path[len(prepath):]
else:
self.prepath = []
self.postpath = path
else:
self.prepath = []
self.postpath = path
#print("_parseURL", self.uri, (self.uri, self.scheme, self.host, self.path, self.params, self.querystring))
def _schemeFromPort(self, port):
"""
Try to determine the scheme matching the supplied server port. This is needed in case
where a device in front of the server is changing the scheme (e.g. decoding SSL) but not
rewriting the scheme in URIs returned in responses (e.g. in Location headers). This could trick
clients into using an inappropriate scheme for subsequent requests. What we should do is
take the port number from the Host header or request-URI and map that to the scheme that
matches the service we configured to listen on that port.
@param port: the port number to test
@type port: C{int}
@return: C{True} if scheme is https (secure), C{False} otherwise
@rtype: C{bool}
"""
#from twistedcaldav.config import config
if hasattr(self.site, "EnableSSL") and self.site.EnableSSL:
if port == self.site.SSLPort:
return True
elif port in self.site.BindSSLPorts:
return True
return False
def _fixupURLParts(self):
hostaddr, secure = self.chanRequest.getHostInfo()
if not self.scheme:
self.scheme = ('http', 'https')[secure]
if self.host:
self.host, self.port = http.splitHostPort(self.scheme, self.host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# If GET line wasn't an absolute URL
host = self.headers.getHeader('host')
if host:
self.host, self.port = http.splitHostPort(self.scheme, host)
self.scheme = ('http', 'https')[self._schemeFromPort(self.port)]
else:
# When no hostname specified anywhere, either raise an
# error, or use the interface hostname, depending on
# protocol version
if self.clientproto >= (1,1):
raise http.HTTPError(responsecode.BAD_REQUEST)
self.host = hostaddr.host
self.port = hostaddr.port
def process(self):
"Process a request."
log.info("%s %s %s" % (
self.method,
self.uri,
"HTTP/%s.%s" % self.clientproto
))
try:
self.checkExpect()
resp = self.preprocessRequest()
if resp is not None:
self._cbFinishRender(resp).addErrback(self._processingFailed)
return
self._parseURL()
self._fixupURLParts()
self.remoteAddr = self.chanRequest.getRemoteHost()
except:
self._processingFailed(failure.Failure())
return
d = defer.Deferred()
d.addCallback(self._getChild, self.site.resource, self.postpath)
d.addCallback(self._rememberResource, "/" + "/".join(quote(s) for s in self.postpath))
d.addCallback(self._processTimeStamp)
d.addCallback(lambda res, req: res.renderHTTP(req), self)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingFailed)
d.callback(None)
return d
def _processTimeStamp(self, res):
self.timeStamp("t-req-proc")
return res
def preprocessRequest(self):
"""Do any request processing that doesn't follow the normal
resource lookup procedure. "OPTIONS *" is handled here, for
example. This would also be the place to do any CONNECT
processing."""
if self.method == "OPTIONS" and self.uri == "*":
response = http.Response(responsecode.OK)
response.headers.setHeader('allow', ('GET', 'HEAD', 'OPTIONS', 'TRACE'))
return response
elif self.method == "POST":
# Allow other methods to tunnel through using POST and a request header.
# See http://code.google.com/apis/gdata/docs/2.0/basics.html
if self.headers.hasHeader("X-HTTP-Method-Override"):
intendedMethod = self.headers.getRawHeaders("X-HTTP-Method-Override")[0];
if intendedMethod:
self.originalMethod = self.method
self.method = intendedMethod
# This is where CONNECT would go if we wanted it
return None
def _getChild(self, _, res, path, updatepaths=True):
"""Call res.locateChild, and pass the result on to _handleSegment."""
self.resources.append(res)
if not path:
return res
result = res.locateChild(self, path)
if isinstance(result, defer.Deferred):
return result.addCallback(self._handleSegment, res, path, updatepaths)
else:
return self._handleSegment(result, res, path, updatepaths)
def _handleSegment(self, result, res, path, updatepaths):
"""Handle the result of a locateChild call done in _getChild."""
newres, newpath = result
# If the child resource is None then display a error page
if newres is None:
raise http.HTTPError(responsecode.NOT_FOUND)
# If we got a deferred then we need to call back later, once the
# child is actually available.
if isinstance(newres, defer.Deferred):
return newres.addCallback(
lambda actualRes: self._handleSegment(
(actualRes, newpath), res, path, updatepaths)
)
if path:
url = quote("/" + "/".join(path))
else:
url = "/"
if newpath is StopTraversal:
# We need to rethink how to do this.
#if newres is res:
return res
#else:
# raise ValueError("locateChild must not return StopTraversal with a resource other than self.")
newres = iweb.IResource(newres)
if newres is res:
assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, res)
assert len(newpath) < len(path), "Infinite loop impending..."
if updatepaths:
# We found a Resource... update the request.prepath and postpath
for x in xrange(len(path) - len(newpath)):
self.prepath.append(self.postpath.pop(0))
url = quote("/" + "/".join(self.prepath) + ("/" if self.prepath and self.prepath[-1] else ""))
self._rememberResource(newres, url)
else:
try:
previousURL = self.urlForResource(res)
url = quote(previousURL + path[0] + ("/" if path[0] and len(path) > 1 else ""))
self._rememberResource(newres, url)
except NoURLForResourceError:
pass
child = self._getChild(None, newres, newpath, updatepaths=updatepaths)
return child
_urlsByResource = weakref.WeakKeyDictionary()
def _rememberResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
self._resourcesByURL[url] = resource
self._urlsByResource[resource] = url
return resource
def _forgetResource(self, resource, url):
"""
Remember the URL of a visited resource.
"""
del self._resourcesByURL[url]
del self._urlsByResource[resource]
def urlForResource(self, resource):
"""
Looks up the URL of the given resource if this resource was found while
processing this request. Specifically, this includes the requested
resource, and resources looked up via L{locateResource}.
Note that a resource may be found at multiple URIs; if the same resource
is visited at more than one location while processing this request,
this method will return one of those URLs, but which one is not defined,
nor whether the same URL is returned in subsequent calls.
@param resource: the resource to find a URI for. This resource must
have been obtained from the request (i.e. via its C{uri} attribute, or
through its C{locateResource} or C{locateChildResource} methods).
@return: a valid URL for C{resource} in this request.
@raise NoURLForResourceError: if C{resource} has no URL in this request
(because it was not obtained from the request).
"""
url = self._urlsByResource.get(resource, None)
if url is None:
raise NoURLForResourceError(resource)
return url
def locateResource(self, url):
"""
Looks up the resource with the given URL.
@param uri: The URL of the desired resource.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise HTTPError: If C{url} is not a URL on the site that this
request is being applied to. The contained response will
have a status code of L{responsecode.BAD_GATEWAY}.
@raise HTTPError: If C{url} contains a query or fragment.
The contained response will have a status code of
L{responsecode.BAD_REQUEST}.
"""
if url is None:
return defer.succeed(None)
#
# Parse the URL
#
(scheme, host, path, query, fragment) = urlsplit(url)
if query or fragment:
raise http.HTTPError(http.StatusResponse(
responsecode.BAD_REQUEST,
"URL may not contain a query or fragment: %s" % (url,)
))
# Look for cached value
cached = self._resourcesByURL.get(path, None)
if cached is not None:
return defer.succeed(cached)
segments = unquote(path).split("/")
assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,)
# Walk the segments up to see if we can find a cached resource to start from
preSegments = segments[:-1]
postSegments = segments[-1:]
cachedParent = None
while(len(preSegments)):
parentPath = "/".join(preSegments) + "/"
cachedParent = self._resourcesByURL.get(parentPath, None)
if cachedParent is not None:
break
else:
postSegments.insert(0, preSegments.pop())
if cachedParent is None:
cachedParent = self.site.resource
postSegments = segments[1:]
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, cachedParent, postSegments, updatepaths=False)
d.addCallback(self._rememberResource, path)
d.addErrback(notFound)
return d
def locateChildResource(self, parent, childName):
"""
Looks up the child resource with the given name given the parent
resource. This is similar to locateResource(), but doesn't have to
start the lookup from the root resource, so it is potentially faster.
@param parent: the parent of the resource being looked up. This resource
must have been obtained from the request (i.e. via its C{uri} attribute,
or through its C{locateResource} or C{locateChildResource} methods).
@param childName: the name of the child of C{parent} to looked up.
to C{parent}.
@return: a L{Deferred} resulting in the L{IResource} at the
given URL or C{None} if no such resource can be located.
@raise NoURLForResourceError: if C{resource} was not obtained from the
request.
"""
if parent is None or childName is None:
return None
assert "/" not in childName, "Child name may not contain '/': %s" % (childName,)
parentURL = self.urlForResource(parent)
if not parentURL.endswith("/"):
parentURL += "/"
url = parentURL + quote(childName)
segment = childName
def notFound(f):
f.trap(http.HTTPError)
if f.value.response.code != responsecode.NOT_FOUND:
return f
return None
d = defer.maybeDeferred(self._getChild, None, parent, [segment], updatepaths=False)
d.addCallback(self._rememberResource, url)
d.addErrback(notFound)
return d
def _processingFailed(self, reason):
if reason.check(http.HTTPError) is not None:
# If the exception was an HTTPError, leave it alone
d = defer.succeed(reason.value.response)
else:
# Otherwise, it was a random exception, so give a
# ICanHandleException implementer a chance to render the page.
def _processingFailed_inner(reason):
handler = iweb.ICanHandleException(self, self)
return handler.renderHTTP_exception(self, reason)
d = defer.maybeDeferred(_processingFailed_inner, reason)
d.addCallback(self._cbFinishRender)
d.addErrback(self._processingReallyFailed, reason)
return d
def _processingReallyFailed(self, reason, origReason):
"""
An error occurred when attempting to report an error to the HTTP
client.
"""
log.failure("Exception rendering error page", reason)
log.failure("Original exception", origReason)
try:
body = (
"<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>"
"An error occurred rendering the requested page. "
"Additionally, an error occurred rendering the error page."
"</body></html>"
)
response = http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body
)
self.writeResponse(response)
except:
log.failure(
"An error occurred. We tried to report that error. "
"Reporting that error caused an error. "
"In the process of reporting the error-reporting error to "
"the client, there was *yet another* error. Here it is. "
"I give up."
)
self.chanRequest.abortConnection()
def _cbFinishRender(self, result):
def filterit(response, f):
if (hasattr(f, 'handleErrors') or
(response.code >= 200 and response.code < 300)):
return f(self, response)
else:
return response
response = iweb.IResponse(result, None)
if response:
d = defer.Deferred()
for f in self.responseFilters:
d.addCallback(filterit, f)
d.addCallback(self.writeResponse)
d.callback(response)
return d
resource = iweb.IResource(result, None)
if resource:
self.resources.append(resource)
d = defer.maybeDeferred(resource.renderHTTP, self)
d.addCallback(self._cbFinishRender)
return d
raise TypeError("html is not a resource or a response")
def renderHTTP_exception(self, req, reason):
log.failure("Exception rendering request: {request}", reason, request=req)
body = ("<html><head><title>Internal Server Error</title></head>"
"<body><h1>Internal Server Error</h1>An error occurred rendering the requested page. More information is available in the server log.</body></html>")
return http.Response(
responsecode.INTERNAL_SERVER_ERROR,
{'content-type': http_headers.MimeType('text','html')},
body)
class Site(object):
def __init__(self, resource):
"""Initialize.
"""
self.resource = iweb.IResource(resource)
def __call__(self, *args, **kwargs):
return Request(site=self, *args, **kwargs)
class NoURLForResourceError(RuntimeError):
def __init__(self, resource):
RuntimeError.__init__(self, "Resource %r has no URL in this request." % (resource,))
self.resource = resource
__all__ = ['Request', 'Site', 'StopTraversal', 'VERSION', 'defaultHeadersFilter', 'doTrace', 'parsePOSTData', 'preconditionfilter', 'NoURLForResourceError']
| apache-2.0 | -2,017,913,453,591,127,000 | 36.516713 | 165 | 0.605932 | false |
elthariel/dff | modules/viewer/hexedit/offsetItem.py | 1 | 2183 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Jeremy Mounier <[email protected]>
import binascii
import struct
import string
import time
from PyQt4.QtCore import QString, Qt
from PyQt4.QtGui import QWidget, QFont, QColor, QTextCursor, QGraphicsTextItem
class offsetItem(QGraphicsTextItem):
def __init__(self, whex):
QGraphicsTextItem.__init__(self)
self.initValues(whex)
# self.initShape()
self.initPosition()
self.initFont()
def initPosition(self):
self.setPos(0, 25)
def initValues(self, whex):
self.whex = whex
self.heditor = self.whex.heditor
#Buffer
self.buffer = []
self.bufferLines = 0
#Line
self.currentLine = 0
#Offset
self.startOffset = 0
self.fontPixel = 14
def initFont(self):
self.setDefaultTextColor(QColor(Qt.red))
self.font = QFont("Gothic")
self.font.setFixedPitch(1)
self.font.setBold(False)
self.font.setPixelSize(self.fontPixel)
self.setFont(self.font)
#Print Operations
def printFullOffset(self, start, len):
count = 0
fullBuff = QString()
while count <= len:
if self.heditor.decimalview:
fullBuff.append("%.10d" % start)
else:
fullBuff.append("%.10X" % start)
fullBuff.append("\n")
start += 16
count += 1
#Clear and set
cursor = self.textCursor()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
self.setPlainText(fullBuff)
cursor.movePosition(QTextCursor.Start)
| gpl-2.0 | 3,857,610,205,408,654,000 | 27.723684 | 78 | 0.63628 | false |
aio-libs/aiohttp | tests/test_multipart.py | 1 | 50388 | # type: ignore
import asyncio
import io
import json
import pathlib
import sys
import zlib
from typing import Any, Optional
from unittest import mock
import pytest
import aiohttp
from aiohttp import payload
from aiohttp.hdrs import (
CONTENT_DISPOSITION,
CONTENT_ENCODING,
CONTENT_TRANSFER_ENCODING,
CONTENT_TYPE,
)
from aiohttp.helpers import parse_mimetype
from aiohttp.multipart import MultipartResponseWrapper
from aiohttp.streams import StreamReader
from aiohttp.test_utils import make_mocked_coro
BOUNDARY: bytes = b"--:"
def pytest_generate_tests(metafunc: Any) -> None: # pragma: no cover
if "newline" in metafunc.fixturenames:
metafunc.parametrize("newline", [b"\r\n", b"\n"], ids=str)
@pytest.fixture
def buf():
return bytearray()
@pytest.fixture
def stream(buf: Any):
writer = mock.Mock()
async def write(chunk):
buf.extend(chunk)
writer.write.side_effect = write
return writer
@pytest.fixture
def writer():
return aiohttp.MultipartWriter(boundary=":")
class Response:
headers: Any
content: Any
def __init__(self, headers: Any, content: Any) -> None:
self.headers = headers
self.content = content
class Stream:
content: Any
def __init__(self, content: Any) -> None:
self.content = io.BytesIO(content)
async def read(self, size: Optional[Any] = None):
return self.content.read(size)
def at_eof(self):
return self.content.tell() == len(self.content.getbuffer())
async def readline(self):
return self.content.readline()
def unread_data(self, data: Any) -> None:
self.content = io.BytesIO(data + self.content.read())
class StreamWithShortenRead(Stream):
def __init__(self, content: Any) -> None:
self._first = True
super().__init__(content)
async def read(self, size: Optional[Any] = None):
if size is not None and self._first:
self._first = False
size = size // 2
return await super().read(size)
class TestMultipartResponseWrapper:
def test_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.at_eof()
assert wrapper.resp.content.at_eof.called
async def test_next(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.stream.next = make_mocked_coro(b"")
wrapper.stream.at_eof.return_value = False
await wrapper.next()
assert wrapper.stream.next.called
async def test_release(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
await wrapper.release()
assert wrapper.resp.release.called
async def test_release_when_stream_at_eof(self) -> None:
wrapper = MultipartResponseWrapper(mock.Mock(), mock.Mock())
wrapper.resp.release = make_mocked_coro(None)
wrapper.stream.next = make_mocked_coro(b"")
wrapper.stream.at_eof.return_value = True
await wrapper.next()
assert wrapper.stream.next.called
assert wrapper.resp.release.called
class TestPartReader:
async def test_next(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_next_next(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.next()
assert b"Hello, world!" == result
assert obj.at_eof()
result = await obj.next()
assert result is None
async def test_read(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello, world!" == result
assert obj.at_eof()
async def test_read_chunk_at_eof(self) -> None:
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(b"--:"))
obj._at_eof = True
result = await obj.read_chunk()
assert b"" == result
async def test_read_chunk_without_content_length(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
c1 = await obj.read_chunk(8)
c2 = await obj.read_chunk(8)
c3 = await obj.read_chunk(8)
assert c1 + c2 == b"Hello, world!"
assert c3 == b""
async def test_read_incomplete_chunk(self, newline: Any) -> None:
stream = Stream(b"")
if sys.version_info >= (3, 8, 1):
# Workaround for a weird behavior of patch.object
def prepare(data):
return data
else:
async def prepare(data):
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, "),
prepare(b"World"),
prepare(b"!%s--:" % newline),
prepare(b""),
],
):
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
c1 = await obj.read_chunk(8)
assert c1 == b"Hello, "
c2 = await obj.read_chunk(8)
assert c2 == b"World"
c3 = await obj.read_chunk(8)
assert c3 == b"!"
async def test_read_all_at_once(self, newline: Any) -> None:
data = b"Hello, World!%s--:--%s" % (newline, newline)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read_chunk()
assert b"Hello, World!" == result
result = await obj.read_chunk()
assert b"" == result
assert obj.at_eof()
async def test_read_incomplete_body_chunked(self, newline: Any) -> None:
data = b"Hello, World!%s--" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = b""
with pytest.raises(AssertionError):
for _ in range(4):
result += await obj.read_chunk(7)
assert data == result
async def test_read_boundary_with_incomplete_chunk(self, newline: Any) -> None:
stream = Stream(b"")
if sys.version_info >= (3, 8, 1):
# Workaround for weird 3.8.1 patch.object() behavior
def prepare(data):
return data
else:
async def prepare(data):
return data
with mock.patch.object(
stream,
"read",
side_effect=[
prepare(b"Hello, World"),
prepare(b"!%s" % newline),
prepare(b"--:"),
prepare(b""),
],
):
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
c1 = await obj.read_chunk(12)
assert c1 == b"Hello, World"
c2 = await obj.read_chunk(8)
assert c2 == b"!"
c3 = await obj.read_chunk(8)
assert c3 == b""
async def test_multi_read_chunk(self, newline: Any) -> None:
data = b"Hello,%s--:%s%sworld!%s--:--" % ((newline,) * 4)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read_chunk(8)
assert b"Hello," == result
result = await obj.read_chunk(8)
assert b"" == result
assert obj.at_eof()
async def test_read_chunk_properly_counts_read_bytes(self, newline: Any) -> None:
expected = b"." * 10
tail = b"%s--:--" % newline
size = len(expected)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": size},
StreamWithShortenRead(expected + tail),
_newline=newline,
)
result = bytearray()
while True:
chunk = await obj.read_chunk()
if not chunk:
break
result.extend(chunk)
assert size == len(result)
assert b"." * size == result
assert obj.at_eof()
async def test_read_does_not_read_boundary(self, newline: Any) -> None:
data = b"Hello, world!%s--:" % newline
stream = Stream(data)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
result = await obj.read()
assert b"Hello, world!" == result
assert b"--:" == (await stream.read())
async def test_multiread(self, newline: Any) -> None:
data = b"Hello,%s--:%s%sworld!%s--:--" % ((newline,) * 4)
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello," == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_multiline(self, newline: Any) -> None:
data = b"Hello\n,\r\nworld!%s--:--" % newline
obj = aiohttp.BodyPartReader(BOUNDARY, {}, Stream(data), _newline=newline)
result = await obj.read()
assert b"Hello\n,\r\nworld!" == result
result = await obj.read()
assert b"" == result
assert obj.at_eof()
async def test_read_respects_content_length(self, newline: Any) -> None:
data = b"." * 100500
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": 100500},
Stream(data + tail),
_newline=newline,
)
result = await obj.read()
assert data == result
assert obj.at_eof()
async def test_read_with_content_encoding_gzip(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "gzip"},
Stream(
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
b"%s--:--" % newline
),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_deflate(self, newline: Any) -> None:
data = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate"},
Stream(data + tail),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_read_with_content_encoding_identity(self, newline: Any) -> None:
thing = (
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\x0b\xc9\xccMU"
b"(\xc9W\x08J\xcdI\xacP\x04\x00$\xfb\x9eV\x0e\x00\x00\x00"
)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "identity"},
Stream(thing + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert thing == result
async def test_read_with_content_encoding_unknown(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "snappy"},
Stream(b"\x0e4Time to Relax!%s--:--" % newline),
_newline=newline,
)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_with_content_transfer_encoding_base64(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "base64"},
Stream(b"VGltZSB0byBSZWxheCE=%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert b"Time to Relax!" == result
async def test_decode_with_content_transfer_encoding_base64(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "base64"},
Stream(b"VG\r\r\nltZSB0byBSZ\r\nWxheCE=%s--:--" % newline),
_newline=newline,
)
result = b""
while not obj.at_eof():
chunk = await obj.read_chunk(size=6)
result += obj.decode(chunk)
assert b"Time to Relax!" == result
async def test_read_with_content_transfer_encoding_quoted_printable(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "quoted-printable"},
Stream(
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,"
b" =D0=BC=D0=B8=D1=80!%s--:--" % newline
),
_newline=newline,
)
result = await obj.read(decode=True)
expected = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
assert result == expected
@pytest.mark.parametrize("encoding", ("binary", "8bit", "7bit"))
async def test_read_with_content_transfer_encoding_binary(
self, encoding: Any, newline: Any
) -> None:
data = (
b"\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82,"
b" \xd0\xbc\xd0\xb8\xd1\x80!"
)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: encoding},
Stream(data + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.read(decode=True)
assert data == result
async def test_read_with_content_transfer_encoding_unknown(
self, newline: Any
) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TRANSFER_ENCODING: "unknown"},
Stream(b"\x0e4Time to Relax!%s--:--" % newline),
_newline=newline,
)
with pytest.raises(RuntimeError):
await obj.read(decode=True)
async def test_read_text(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(b"Hello, world!%s--:--" % newline),
_newline=newline,
)
result = await obj.text()
assert "Hello, world!" == result
async def test_read_text_default_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data.encode("utf-8") + tail),
_newline=newline,
)
result = await obj.text()
assert data == result
async def test_read_text_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data.encode("cp1251") + tail),
_newline=newline,
)
result = await obj.text(encoding="cp1251")
assert data == result
async def test_read_text_guess_encoding(self, newline: Any) -> None:
data = "Привет, Мир!"
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "text/plain;charset=cp1251"},
Stream(data.encode("cp1251") + tail),
_newline=newline,
)
result = await obj.text()
assert data == result
async def test_read_text_compressed(self, newline: Any) -> None:
data = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00" b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate", CONTENT_TYPE: "text/plain"},
Stream(data),
_newline=newline,
)
result = await obj.text()
assert "Time to Relax!" == result
async def test_read_text_while_closed(self) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: "text/plain"}, Stream(b"")
)
obj._at_eof = True
result = await obj.text()
assert "" == result
async def test_read_json(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json"},
Stream(b'{"test": "passed"}%s--:--' % newline),
_newline=newline,
)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_encoding(self, newline: Any) -> None:
data = '{"тест": "пассед"}'.encode("cp1251")
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json"},
Stream(data + tail),
_newline=newline,
)
result = await obj.json(encoding="cp1251")
assert {"тест": "пассед"} == result
async def test_read_json_guess_encoding(self, newline: Any) -> None:
data = '{"тест": "пассед"}'.encode("cp1251")
tail = b"%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/json; charset=cp1251"},
Stream(data + tail),
_newline=newline,
)
result = await obj.json()
assert {"тест": "пассед"} == result
async def test_read_json_compressed(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_ENCODING: "deflate", CONTENT_TYPE: "application/json"},
Stream(b"\xabV*I-.Q\xb2RP*H,.NMQ\xaa\x05\x00" b"%s--:--" % newline),
_newline=newline,
)
result = await obj.json()
assert {"test": "passed"} == result
async def test_read_json_while_closed(self) -> None:
stream = Stream(b"")
obj = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_TYPE: "application/json"}, stream
)
obj._at_eof = True
result = await obj.json()
assert result is None
async def test_read_form(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
Stream(data),
_newline=newline,
)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_encoding(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
Stream(data),
_newline=newline,
)
result = await obj.form(encoding="cp1251")
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_guess_encoding(self, newline: Any) -> None:
data = b"foo=bar&foo=baz&boo=%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded; charset=utf-8"},
Stream(data),
_newline=newline,
)
result = await obj.form()
assert [("foo", "bar"), ("foo", "baz"), ("boo", "")] == result
async def test_read_form_while_closed(self) -> None:
stream = Stream(b"")
obj = aiohttp.BodyPartReader(
BOUNDARY,
{CONTENT_TYPE: "application/x-www-form-urlencoded"},
stream,
)
obj._at_eof = True
result = await obj.form()
assert not result
async def test_readline(self, newline: Any) -> None:
data = b"Hello\n,\r\nworld!%s--:--" % newline
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
Stream(data),
_newline=newline,
)
result = await obj.readline()
assert b"Hello\n" == result
result = await obj.readline()
assert b",\r\n" == result
result = await obj.readline()
assert b"world!" == result
result = await obj.readline()
assert b"" == result
assert obj.at_eof()
async def test_release(self, newline: Any) -> None:
data = b"Hello,%s--:\r\n\r\nworld!%s--:--" % (newline, newline)
stream = Stream(data)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
stream,
_newline=newline,
)
remained = b"--:\r\n\r\nworld!%s--:--" % newline
await obj.release()
assert obj.at_eof()
assert remained == stream.content.read()
async def test_release_respects_content_length(self, newline: Any) -> None:
obj = aiohttp.BodyPartReader(
BOUNDARY,
{"CONTENT-LENGTH": 100500},
Stream(b"." * 100500 + b"%s--:--" % newline),
_newline=newline,
)
result = await obj.release()
assert result is None
assert obj.at_eof()
async def test_release_release(self, newline: Any) -> None:
data = b"Hello,%s--:\r\n\r\nworld!%s--:--" % (newline, newline)
remained = b"--:\r\n\r\nworld!%s--:--" % newline
stream = Stream(data)
obj = aiohttp.BodyPartReader(
BOUNDARY,
{},
stream,
_newline=newline,
)
await obj.release()
await obj.release()
assert remained == stream.content.read()
async def test_filename(self) -> None:
part = aiohttp.BodyPartReader(
BOUNDARY, {CONTENT_DISPOSITION: "attachment; filename=foo.html"}, None
)
assert "foo.html" == part.filename
async def test_reading_long_part(self, newline: Any) -> None:
size = 2 * 2 ** 16
protocol = mock.Mock(_reading_paused=False)
stream = StreamReader(protocol, 2 ** 16, loop=asyncio.get_event_loop())
stream.feed_data(b"0" * size + b"%s--:--" % newline)
stream.feed_eof()
obj = aiohttp.BodyPartReader(BOUNDARY, {}, stream, _newline=newline)
data = await obj.read()
assert len(data) == size
class TestMultipartReader:
def test_from_response(self, newline: Any) -> None:
resp = Response(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\nhello%s--:--" % (newline, newline)),
)
res = aiohttp.MultipartReader.from_response(resp)
assert isinstance(res, MultipartResponseWrapper)
assert isinstance(res.stream, aiohttp.MultipartReader)
def test_bad_boundary(self) -> None:
resp = Response(
{CONTENT_TYPE: "multipart/related;boundary=" + "a" * 80}, Stream(b"")
)
with pytest.raises(ValueError):
aiohttp.MultipartReader.from_response(resp)
def test_dispatch(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = reader._get_part_reader({CONTENT_TYPE: "text/plain"})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_bodypart(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = reader._get_part_reader({CONTENT_TYPE: "text/plain"})
assert isinstance(res, reader.part_reader_cls)
def test_dispatch_multipart(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----" b"--:--",
]
)
),
)
res = reader._get_part_reader(
{CONTENT_TYPE: "multipart/related;boundary=--:--"}
)
assert isinstance(res, reader.__class__)
def test_dispatch_custom_multipart_reader(self, newline: Any) -> None:
class CustomReader(aiohttp.MultipartReader):
pass
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----",
b"--:--",
]
)
),
)
reader.multipart_reader_cls = CustomReader
res = reader._get_part_reader(
{CONTENT_TYPE: "multipart/related;boundary=--:--"}
)
assert isinstance(res, CustomReader)
async def test_emit_next(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
res = await reader.next()
assert isinstance(res, reader.part_reader_cls)
async def test_invalid_boundary(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"---:%s\r\necho%s---:--" % (newline, newline)),
)
with pytest.raises(ValueError):
await reader.next()
async def test_release(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"Content-Type: multipart/related;boundary=--:--",
b"",
b"----:--",
b"",
b"test",
b"----:--",
b"",
b"passed",
b"----:----",
b"",
b"--:--",
]
)
),
)
await reader.release()
assert reader.at_eof()
async def test_release_release(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
await reader.release()
assert reader.at_eof()
await reader.release()
assert reader.at_eof()
async def test_release_next(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(b"--:%s\r\necho%s--:--" % (newline, newline)),
)
await reader.release()
assert reader.at_eof()
res = await reader.next()
assert res is None
async def test_second_next_releases_previous_object(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_release_without_read_the_last_object(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
second = await reader.next()
third = await reader.next()
assert first.at_eof()
assert second.at_eof()
assert second.at_eof()
assert third is None
async def test_read_chunk_by_length_doesnt_breaks_reader(
self, newline: Any
) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"Content-Length: 4",
b"",
b"test",
b"--:",
b"Content-Length: 6",
b"",
b"passed",
b"--:--",
]
)
),
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
while not part.at_eof():
read_part += await part.read_chunk(3)
body_parts.append(read_part)
assert body_parts == [b"test", b"passed"]
async def test_read_chunk_from_stream_doesnt_breaks_reader(
self, newline: Any
) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"--:",
b"",
b"chunk",
b"--:",
b"",
b"two_chunks",
b"--:--",
]
)
),
)
body_parts = []
while True:
read_part = b""
part = await reader.next()
if part is None:
break
while not part.at_eof():
chunk = await part.read_chunk(5)
assert chunk
read_part += chunk
body_parts.append(read_part)
assert body_parts == [b"chunk", b"two_chunks"]
async def test_reading_skips_prelude(self, newline: Any) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/related;boundary=":"'},
Stream(
newline.join(
[
b"Multi-part data is not supported.",
b"",
b"--:",
b"",
b"test",
b"--:",
b"",
b"passed",
b"--:--",
]
)
),
)
first = await reader.next()
assert isinstance(first, aiohttp.BodyPartReader)
second = await reader.next()
assert first.at_eof()
assert not second.at_eof()
async def test_read_mixed_newlines(self) -> None:
reader = aiohttp.MultipartReader(
{CONTENT_TYPE: 'multipart/mixed;boundary=":"'},
Stream(
b"".join(
[
b"--:\n",
b"Content-Type: multipart/related;boundary=--:--\n",
b"\n",
b"----:--\r\n",
b"\r\n",
b"test\r\n",
b"----:--\r\n",
b"\r\n",
b"passed\r\n",
b"----:----\r\n",
b"\n",
b"--:--",
]
)
),
)
while True:
part = await reader.next()
if part is None:
break
while True:
subpart = await part.next()
if subpart is None:
break
async def test_writer(writer: Any) -> None:
assert writer.size == 7
assert writer.boundary == ":"
async def test_writer_serialize_io_chunk(buf: Any, stream: Any, writer: Any) -> None:
flo = io.BytesIO(b"foobarbaz")
writer.append(flo)
await writer.write(stream)
assert (
buf == b"--:\r\nContent-Type: application/octet-stream"
b"\r\nContent-Length: 9\r\n\r\nfoobarbaz\r\n--:--\r\n"
)
async def test_writer_serialize_json(buf: Any, stream: Any, writer: Any) -> None:
writer.append_json({"привет": "мир"})
await writer.write(stream)
assert (
b'{"\\u043f\\u0440\\u0438\\u0432\\u0435\\u0442":'
b' "\\u043c\\u0438\\u0440"}' in buf
)
async def test_writer_serialize_form(buf: Any, stream: Any, writer: Any) -> None:
data = [("foo", "bar"), ("foo", "baz"), ("boo", "zoo")]
writer.append_form(data)
await writer.write(stream)
assert b"foo=bar&foo=baz&boo=zoo" in buf
async def test_writer_serialize_form_dict(buf: Any, stream: Any, writer: Any) -> None:
data = {"hello": "мир"}
writer.append_form(data)
await writer.write(stream)
assert b"hello=%D0%BC%D0%B8%D1%80" in buf
async def test_writer_write(buf: Any, stream: Any, writer: Any) -> None:
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", 1), ("two", 2)])
sub_multipart = aiohttp.MultipartWriter(boundary="::")
sub_multipart.append("nested content")
sub_multipart.headers["X-CUSTOM"] = "test"
writer.append(sub_multipart)
await writer.write(stream)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
b"--:\r\n"
b'Content-Type: multipart/mixed; boundary="::"\r\n'
b"X-CUSTOM: test\r\nContent-Length: 93\r\n\r\n"
b"--::\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 14\r\n\r\n"
b"nested content\r\n"
b"--::--\r\n"
b"\r\n"
b"--:--\r\n"
) == bytes(buf)
async def test_writer_write_no_close_boundary(buf: Any, stream: Any) -> None:
writer = aiohttp.MultipartWriter(boundary=":")
writer.append("foo-bar-baz")
writer.append_json({"test": "passed"})
writer.append_form({"test": "passed"})
writer.append_form([("one", 1), ("two", 2)])
await writer.write(stream, close_boundary=False)
assert (
b"--:\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 11\r\n\r\n"
b"foo-bar-baz"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/json\r\n"
b"Content-Length: 18\r\n\r\n"
b'{"test": "passed"}'
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"test=passed"
b"\r\n"
b"--:\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 11\r\n\r\n"
b"one=1&two=2"
b"\r\n"
) == bytes(buf)
async def test_writer_write_no_parts(buf: Any, stream: Any, writer: Any) -> None:
await writer.write(stream)
assert b"--:--\r\n" == bytes(buf)
async def test_writer_serialize_with_content_encoding_gzip(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "gzip"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: gzip" == headers
)
decompressor = zlib.decompressobj(wbits=16 + zlib.MAX_WBITS)
data = decompressor.decompress(message.split(b"\r\n")[0])
data += decompressor.flush()
assert b"Time to Relax!" == data
async def test_writer_serialize_with_content_encoding_deflate(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_ENCODING: "deflate"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Encoding: deflate" == headers
)
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00\r\n--:--\r\n"
assert thing == message
async def test_writer_serialize_with_content_encoding_identity(
buf: Any, stream: Any, writer: Any
) -> None:
thing = b"\x0b\xc9\xccMU(\xc9W\x08J\xcdI\xacP\x04\x00"
writer.append(thing, {CONTENT_ENCODING: "identity"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: application/octet-stream\r\n"
b"Content-Encoding: identity\r\n"
b"Content-Length: 16" == headers
)
assert thing == message.split(b"\r\n")[0]
def test_writer_serialize_with_content_encoding_unknown(
buf: Any, stream: Any, writer: Any
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_ENCODING: "snappy"})
async def test_writer_with_content_transfer_encoding_base64(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "base64"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: base64" == headers
)
assert b"VGltZSB0byBSZWxheCE=" == message.split(b"\r\n")[0]
async def test_writer_content_transfer_encoding_quote_printable(
buf: Any, stream: Any, writer: Any
) -> None:
writer.append("Привет, мир!", {CONTENT_TRANSFER_ENCODING: "quoted-printable"})
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert (
b"--:\r\nContent-Type: text/plain; charset=utf-8\r\n"
b"Content-Transfer-Encoding: quoted-printable" == headers
)
assert (
b"=D0=9F=D1=80=D0=B8=D0=B2=D0=B5=D1=82,"
b" =D0=BC=D0=B8=D1=80!" == message.split(b"\r\n")[0]
)
def test_writer_content_transfer_encoding_unknown(
buf: Any, stream: Any, writer: Any
) -> None:
with pytest.raises(RuntimeError):
writer.append("Time to Relax!", {CONTENT_TRANSFER_ENCODING: "unknown"})
class TestMultipartWriter:
def test_default_subtype(self, writer: Any) -> None:
mimetype = parse_mimetype(writer.headers.get(CONTENT_TYPE))
assert "multipart" == mimetype.type
assert "mixed" == mimetype.subtype
def test_unquoted_boundary(self) -> None:
writer = aiohttp.MultipartWriter(boundary="abc123")
expected = {CONTENT_TYPE: "multipart/mixed; boundary=abc123"}
assert expected == writer.headers
def test_quoted_boundary(self) -> None:
writer = aiohttp.MultipartWriter(boundary=R"\"")
expected = {CONTENT_TYPE: R'multipart/mixed; boundary="\\\""'}
assert expected == writer.headers
def test_bad_boundary(self) -> None:
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary="тест")
with pytest.raises(ValueError):
aiohttp.MultipartWriter(boundary="test\n")
def test_default_headers(self, writer: Any) -> None:
expected = {CONTENT_TYPE: 'multipart/mixed; boundary=":"'}
assert expected == writer.headers
def test_iter_parts(self, writer: Any) -> None:
writer.append("foo")
writer.append("bar")
writer.append("baz")
assert 3 == len(list(writer))
def test_append(self, writer: Any) -> None:
assert 0 == len(writer)
writer.append("hello, world!")
assert 1 == len(writer)
assert isinstance(writer._parts[0][0], payload.Payload)
def test_append_with_headers(self, writer: Any) -> None:
writer.append("hello, world!", {"x-foo": "bar"})
assert 1 == len(writer)
assert "x-foo" in writer._parts[0][0].headers
assert writer._parts[0][0].headers["x-foo"] == "bar"
def test_append_json(self, writer: Any) -> None:
writer.append_json({"foo": "bar"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "application/json"
def test_append_part(self, writer: Any) -> None:
part = payload.get_payload("test", headers={CONTENT_TYPE: "text/plain"})
writer.append(part, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_json_overrides_content_type(self, writer: Any) -> None:
writer.append_json({"foo": "bar"}, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_form(self, writer: Any) -> None:
writer.append_form({"foo": "bar"}, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_append_multipart(self, writer: Any) -> None:
subwriter = aiohttp.MultipartWriter(boundary=":")
subwriter.append_json({"foo": "bar"})
writer.append(subwriter, {CONTENT_TYPE: "test/passed"})
assert 1 == len(writer)
part = writer._parts[0][0]
assert part.headers[CONTENT_TYPE] == "test/passed"
def test_with(self) -> None:
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append("foo")
writer.append(b"bar")
writer.append_json({"baz": True})
assert 3 == len(writer)
def test_append_int_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(1)
def test_append_float_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(1.1)
def test_append_none_not_allowed(self) -> None:
with pytest.raises(TypeError):
with aiohttp.MultipartWriter(boundary=":") as writer:
writer.append(None)
async def test_write_preserves_content_disposition(
self, buf: Any, stream: Any
) -> None:
with aiohttp.MultipartWriter(boundary=":") as writer:
part = writer.append(b"foo", headers={CONTENT_TYPE: "test/passed"})
part.set_content_disposition("form-data", filename="bug")
await writer.write(stream)
headers, message = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: test/passed\r\n"
b"Content-Length: 3\r\n"
b"Content-Disposition:"
b' form-data; filename="bug"'
)
assert message == b"foo\r\n--:--\r\n"
async def test_preserve_content_disposition_header(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={
CONTENT_DISPOSITION: 'attachments; filename="bug.py"',
CONTENT_TYPE: "text/python",
},
)
content_length = part.size
await writer.write(stream)
assert part.headers[CONTENT_TYPE] == "text/python"
assert part.headers[CONTENT_DISPOSITION] == ('attachments; filename="bug.py"')
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/python\r\n"
b'Content-Disposition: attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_set_content_disposition_override(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={
CONTENT_DISPOSITION: 'attachments; filename="bug.py"',
CONTENT_TYPE: "text/python",
},
)
content_length = part.size
await writer.write(stream)
assert part.headers[CONTENT_TYPE] == "text/python"
assert part.headers[CONTENT_DISPOSITION] == ('attachments; filename="bug.py"')
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/python\r\n"
b'Content-Disposition: attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_reset_content_disposition_header(
self, buf: Any, stream: Any
) -> None:
# https://github.com/aio-libs/aiohttp/pull/3475#issuecomment-451072381
with pathlib.Path(__file__).open("rb") as fobj:
with aiohttp.MultipartWriter("form-data", boundary=":") as writer:
part = writer.append(
fobj,
headers={CONTENT_TYPE: "text/plain"},
)
content_length = part.size
assert CONTENT_DISPOSITION in part.headers
part.set_content_disposition("attachments", filename="bug.py")
await writer.write(stream)
headers, _ = bytes(buf).split(b"\r\n\r\n", 1)
assert headers == (
b"--:\r\n"
b"Content-Type: text/plain\r\n"
b"Content-Disposition:"
b' attachments; filename="bug.py"\r\n'
b"Content-Length: %s"
b"" % (str(content_length).encode(),)
)
async def test_async_for_reader() -> None:
data = [{"test": "passed"}, 42, b"plain text", b"aiohttp\n", b"no epilogue"]
reader = aiohttp.MultipartReader(
headers={CONTENT_TYPE: 'multipart/mixed; boundary=":"'},
content=Stream(
b"\r\n".join(
[
b"--:",
b"Content-Type: application/json",
b"",
json.dumps(data[0]).encode(),
b"--:",
b"Content-Type: application/json",
b"",
json.dumps(data[1]).encode(),
b"--:",
b'Content-Type: multipart/related; boundary="::"',
b"",
b"--::",
b"Content-Type: text/plain",
b"",
data[2],
b"--::",
b'Content-Disposition: attachment; filename="aiohttp"',
b"Content-Type: text/plain",
b"Content-Length: 28",
b"Content-Encoding: gzip",
b"",
b"\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03K\xcc\xcc\xcf())"
b"\xe0\x02\x00\xd6\x90\xe2O\x08\x00\x00\x00",
b"--::",
b'Content-Type: multipart/related; boundary=":::"',
b"",
b"--:::",
b"Content-Type: text/plain",
b"",
data[4],
b"--:::--",
b"--::--",
b"",
b"--:--",
b"",
]
)
),
)
idata = iter(data)
async def check(reader):
async for part in reader:
if isinstance(part, aiohttp.BodyPartReader):
if part.headers[CONTENT_TYPE] == "application/json":
assert next(idata) == (await part.json())
else:
assert next(idata) == await part.read(decode=True)
else:
await check(part)
await check(reader)
async def test_async_for_bodypart() -> None:
part = aiohttp.BodyPartReader(
boundary=b"--:", headers={}, content=Stream(b"foobarbaz\r\n--:--")
)
async for data in part:
assert data == b"foobarbaz"
| apache-2.0 | 4,208,057,882,136,613,000 | 32.983784 | 86 | 0.519047 | false |
iakov/margen | margen.py | 1 | 4589 | #!/usr/bin/python2
"""
Copyright 2016 Iakov Kirilenko
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Simple generator for colorful AR tags
"""
import argparse
import errno
import numpy as np
import os
import cv2 as cv
def palette2((row,col)):
maxDarkLuma = 140 if row % 5 == 0 or col % 5 == 0 else 250
colors = [(maxDarkLuma * 7 / 10, 0, 0), (0, maxDarkLuma * 6 / 10, 0), (0, 0, maxDarkLuma)]
return colors[(row + col) % len(colors)]
def generate_palette(s):
n = int(s)
if n == 0:
return lambda _: (0, 0, 0)
elif n == 1:
maxDarkLuma = 150
colors = [(maxDarkLuma * 7 / 10, 0, 0), (0, maxDarkLuma * 6 / 10, 0), (0, 0, maxDarkLuma)]
return lambda(row,col): colors[(row + col) % len(colors)]
elif n == 2:
return palette2
elif n == 3:
return lambda(row,col): palette2((row,col)) if row % 5 != 0 and col % 5 != 0 else (0, 0, 0)
else:
raise argparse.ArgumentTypeError("palette %r not implemented" % s)
def parse_args():
parser = argparse.ArgumentParser(description='AR marker tag generator')
parser.add_argument('codes', metavar='N[..M]', nargs='+', help='integer code N or range N..M')
parser.add_argument('--force', dest='force', action='store_true',
help='ignore checks & errors (depends on context)')
parser.add_argument('--out-dir', dest='dir', default='.', help='output directory name')
parser.add_argument('--palette', dest='palette', metavar='P', type=generate_palette, default=generate_palette("0"),
help='use palette #P ( 0 -- b/w) ')
parser.add_argument('--box-size', dest='boxSize', type=int, default=50, help='bit box size per side in pixels')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose output')
return parser.parse_args()
class Generator:
def draw_box(self, pos, color=None):
if color is None:
color = self.args.palette(pos)
row, col = pos
top_left = ((col + 1) * self.args.boxSize, (row + 1) * self.args.boxSize)
down_right = ((col + 2) * self.args.boxSize - 1, (row + 2) * self.args.boxSize - 1)
points = np.array([top_left, (down_right[0], top_left[1]), down_right, (top_left[0], down_right[1])])
cv.fillConvexPoly(self.img, points, color)
def generate(self, code):
freeBits = (6 - 2) * (6 - 2) - 3
if code < 0 or code >= 1 << freeBits:
return None
binCode = bin(code)[2:].zfill(freeBits)
binCode = '1' + binCode[0:11] + '1' + binCode[11:] + '0'
"""Check message (for parity, etc.)"""
if binCode[3] == '1' or binCode[4] == '1' or binCode.count('1') % 2 != 0:
if not self.args.force:
if self.args.verbose: print '%d\t=> %s (-)'%(code,binCode)
return None
"""Draw border"""
for i in range(0, 6):
for pos in [(0, i), (5, i), (i, 0), (i, 5)]:
self.draw_box(pos)
"""Draw message"""
for i in range(0, len(binCode)):
pos = (i / 4 + 1, i % 4 + 1)
self.draw_box(pos, None if binCode[i] == '1' else (255, 255, 255))
if self.args.verbose: print "%d\t=> %s (+)"%(code,binCode)
return self.img
def __init__(self, args):
self.img = cv.bitwise_not(np.zeros(((6 + 2) * args.boxSize, (6 + 2) * args.boxSize, 3), np.uint8))
self.args = args
def main():
args = parse_args()
try:
os.makedirs(args.dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
g = Generator(args)
for code in [z for y in [x.split("..") for x in args.codes]
for z in range(int(y[0]), 1 + int(y[0] if len(y) == 1 else y[1]))]:
marker = g.generate(code)
if marker is None: continue
filename = args.dir + '/{0:04}.png'.format(code)
cv.cvtColor(marker, cv.COLOR_RGB2BGR, marker)
cv.imwrite(filename, marker, [cv.IMWRITE_PNG_COMPRESSION, 9])
if __name__ == "__main__":
main()
| apache-2.0 | -5,827,541,999,847,004,000 | 36.008065 | 119 | 0.577468 | false |
fiete201/qutebrowser | qutebrowser/mainwindow/prompt.py | 1 | 34258 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Showing prompts above the statusbar."""
import os.path
import html
import collections
import functools
import dataclasses
from typing import Deque, MutableSequence, Optional, cast
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QTimer, QDir, QModelIndex,
QItemSelectionModel, QObject, QEventLoop)
from PyQt5.QtWidgets import (QWidget, QGridLayout, QVBoxLayout, QLineEdit,
QLabel, QFileSystemModel, QTreeView, QSizePolicy,
QSpacerItem)
from qutebrowser.browser import downloads
from qutebrowser.config import config, configtypes, configexc, stylesheet
from qutebrowser.utils import usertypes, log, utils, qtutils, objreg, message
from qutebrowser.keyinput import modeman
from qutebrowser.api import cmdutils
from qutebrowser.utils import urlmatch
prompt_queue = cast('PromptQueue', None)
@dataclasses.dataclass
class AuthInfo:
"""Authentication info returned by a prompt."""
user: str
password: str
class Error(Exception):
"""Base class for errors in this module."""
class UnsupportedOperationError(Error):
"""Raised when the prompt class doesn't support the requested operation."""
class PromptQueue(QObject):
"""Global manager and queue for upcoming prompts.
The way in which multiple questions are handled deserves some explanation.
If a question is blocking, we *need* to ask it immediately, and can't wait
for previous questions to finish. We could theoretically ask a blocking
question inside of another blocking one, so in ask_question we simply save
the current question on the stack, let the user answer the *most recent*
question, and then restore the previous state.
With a non-blocking question, things are a bit easier. We simply add it to
self._queue if we're still busy handling another question, since it can be
answered at any time.
In either case, as soon as we finished handling a question, we call
_pop_later() which schedules a _pop to ask the next question in _queue. We
schedule it rather than doing it immediately because then the order of how
things happen is clear, e.g. on_mode_left can't happen after we already set
up the *new* question.
Attributes:
_shutting_down: Whether we're currently shutting down the prompter and
should ignore future questions to avoid segfaults.
_loops: A list of local EventLoops to spin in when blocking.
_queue: A deque of waiting questions.
_question: The current Question object if we're handling a question.
Signals:
show_prompts: Emitted with a Question object when prompts should be
shown.
"""
show_prompts = pyqtSignal(usertypes.Question)
def __init__(self, parent=None):
super().__init__(parent)
self._question = None
self._shutting_down = False
self._loops: MutableSequence[qtutils.EventLoop] = []
self._queue: Deque[usertypes.Question] = collections.deque()
message.global_bridge.mode_left.connect(self._on_mode_left)
def __repr__(self):
return utils.get_repr(self, loops=len(self._loops),
queue=len(self._queue), question=self._question)
def _pop_later(self):
"""Helper to call self._pop as soon as everything else is done."""
QTimer.singleShot(0, self._pop)
def _pop(self):
"""Pop a question from the queue and ask it, if there are any."""
log.prompt.debug("Popping from queue {}".format(self._queue))
if self._queue:
question = self._queue.popleft()
if not question.is_aborted:
# the question could already be aborted, e.g. by a cancelled
# download. See
# https://github.com/qutebrowser/qutebrowser/issues/415 and
# https://github.com/qutebrowser/qutebrowser/issues/1249
self.ask_question(question, blocking=False)
def shutdown(self):
"""Cancel all blocking questions.
Quits and removes all running event loops.
Return:
True if loops needed to be aborted,
False otherwise.
"""
log.prompt.debug("Shutting down with loops {}".format(self._loops))
self._shutting_down = True
if self._loops:
for loop in self._loops:
loop.quit()
loop.deleteLater()
return True
else:
return False
@pyqtSlot(usertypes.Question, bool)
def ask_question(self, question, blocking):
"""Display a prompt for a given question.
Args:
question: The Question object to ask.
blocking: If True, this function blocks and returns the result.
Return:
The answer of the user when blocking=True.
None if blocking=False.
"""
log.prompt.debug("Asking question {}, blocking {}, loops {}, queue "
"{}".format(question, blocking, self._loops,
self._queue))
if self._shutting_down:
# If we're currently shutting down we have to ignore this question
# to avoid segfaults - see
# https://github.com/qutebrowser/qutebrowser/issues/95
log.prompt.debug("Ignoring question because we're shutting down.")
question.abort()
return None
if self._question is not None and not blocking:
# We got an async question, but we're already busy with one, so we
# just queue it up for later.
log.prompt.debug("Adding {} to queue.".format(question))
self._queue.append(question)
return None
if blocking:
# If we're blocking we save the old question on the stack, so we
# can restore it after exec, if exec gets called multiple times.
log.prompt.debug("New question is blocking, saving {}".format(
self._question))
old_question = self._question
if old_question is not None:
old_question.interrupted = True
self._question = question
self.show_prompts.emit(question)
if blocking:
loop = qtutils.EventLoop()
self._loops.append(loop)
loop.destroyed.connect(lambda: self._loops.remove(loop))
question.completed.connect(loop.quit)
question.completed.connect(loop.deleteLater)
log.prompt.debug("Starting loop.exec() for {}".format(question))
flags = cast(QEventLoop.ProcessEventsFlags,
QEventLoop.ExcludeSocketNotifiers)
loop.exec(flags)
log.prompt.debug("Ending loop.exec() for {}".format(question))
log.prompt.debug("Restoring old question {}".format(old_question))
self._question = old_question
self.show_prompts.emit(old_question)
if old_question is None:
# Nothing left to restore, so we can go back to popping async
# questions.
if self._queue:
self._pop_later()
return question.answer
else:
question.completed.connect(self._pop_later)
return None
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
"""Abort question when a prompt mode was left."""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
if self._question is None:
return
log.prompt.debug("Left mode {}, hiding {}".format(
mode, self._question))
self.show_prompts.emit(None)
if self._question.answer is None and not self._question.is_aborted:
log.prompt.debug("Cancelling {} because {} was left".format(
self._question, mode))
self._question.cancel()
self._question = None
class PromptContainer(QWidget):
"""Container for prompts to be shown above the statusbar.
This is a per-window object, however each window shows the same prompt.
Attributes:
_layout: The layout used to show prompts in.
_win_id: The window ID this object is associated with.
Signals:
update_geometry: Emitted when the geometry should be updated.
"""
STYLESHEET = """
QWidget#PromptContainer {
{% if conf.statusbar.position == 'top' %}
border-bottom-left-radius: {{ conf.prompt.radius }}px;
border-bottom-right-radius: {{ conf.prompt.radius }}px;
{% else %}
border-top-left-radius: {{ conf.prompt.radius }}px;
border-top-right-radius: {{ conf.prompt.radius }}px;
{% endif %}
}
QWidget {
font: {{ conf.fonts.prompts }};
color: {{ conf.colors.prompts.fg }};
background-color: {{ conf.colors.prompts.bg }};
}
QLineEdit {
border: {{ conf.colors.prompts.border }};
}
QTreeView {
selection-background-color: {{ conf.colors.prompts.selected.bg }};
border: {{ conf.colors.prompts.border }};
}
QTreeView::branch {
background-color: {{ conf.colors.prompts.bg }};
}
QTreeView::item:selected, QTreeView::item:selected:hover,
QTreeView::branch:selected {
background-color: {{ conf.colors.prompts.selected.bg }};
}
"""
update_geometry = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._layout = QVBoxLayout(self)
self._layout.setContentsMargins(10, 10, 10, 10)
self._win_id = win_id
self._prompt: Optional[_BasePrompt] = None
self.setObjectName('PromptContainer')
self.setAttribute(Qt.WA_StyledBackground, True)
stylesheet.set_register(self)
message.global_bridge.prompt_done.connect(self._on_prompt_done)
prompt_queue.show_prompts.connect(self._on_show_prompts)
message.global_bridge.mode_left.connect(self._on_global_mode_left)
def __repr__(self):
return utils.get_repr(self, win_id=self._win_id)
@pyqtSlot(usertypes.Question)
def _on_show_prompts(self, question):
"""Show a prompt for the given question.
Args:
question: A Question object or None.
"""
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting old prompt {}".format(widget))
widget.hide()
widget.deleteLater()
if question is None:
log.prompt.debug("No prompts left, hiding prompt container.")
self._prompt = None
self.hide()
return
classes = {
usertypes.PromptMode.yesno: YesNoPrompt,
usertypes.PromptMode.text: LineEditPrompt,
usertypes.PromptMode.user_pwd: AuthenticationPrompt,
usertypes.PromptMode.download: DownloadFilenamePrompt,
usertypes.PromptMode.alert: AlertPrompt,
}
klass = classes[question.mode]
prompt = klass(question)
log.prompt.debug("Displaying prompt {}".format(prompt))
self._prompt = prompt
# If this question was interrupted, we already connected the signal
if not question.interrupted:
question.aborted.connect(
functools.partial(self._on_aborted, prompt.KEY_MODE))
modeman.enter(self._win_id, prompt.KEY_MODE, 'question asked')
self.setSizePolicy(prompt.sizePolicy())
self._layout.addWidget(prompt)
prompt.show()
self.show()
prompt.setFocus()
self.update_geometry.emit()
@pyqtSlot()
def _on_aborted(self, key_mode):
"""Leave KEY_MODE whenever a prompt is aborted."""
try:
modeman.leave(self._win_id, key_mode, 'aborted', maybe=True)
except objreg.RegistryUnavailableError:
# window was deleted: ignore
pass
@pyqtSlot(usertypes.KeyMode)
def _on_prompt_done(self, key_mode):
"""Leave the prompt mode in this window if a question was answered."""
modeman.leave(self._win_id, key_mode, ':prompt-accept', maybe=True)
@pyqtSlot(usertypes.KeyMode)
def _on_global_mode_left(self, mode):
"""Leave prompt/yesno mode in this window if it was left elsewhere.
This ensures no matter where a prompt was answered, we leave the prompt
mode and dispose of the prompt object in every window.
"""
if mode not in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]:
return
modeman.leave(self._win_id, mode, 'left in other window', maybe=True)
item = self._layout.takeAt(0)
if item is not None:
widget = item.widget()
log.prompt.debug("Deleting prompt {}".format(widget))
widget.hide()
widget.deleteLater()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno])
def prompt_accept(self, value=None, *, save=False):
"""Accept the current prompt.
//
This executes the next action depending on the question mode, e.g. asks
for the password or leaves the mode.
Args:
value: If given, uses this value instead of the entered one.
For boolean prompts, "yes"/"no" are accepted as value.
save: Save the value to the config.
"""
assert self._prompt is not None
question = self._prompt.question
try:
done = self._prompt.accept(value, save=save)
except Error as e:
raise cmdutils.CommandError(str(e))
if done:
message.global_bridge.prompt_done.emit(self._prompt.KEY_MODE)
question.done()
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt], maxsplit=0)
def prompt_open_download(self, cmdline: str = None,
pdfjs: bool = False) -> None:
"""Immediately open a download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
pdfjs: Open the download via PDF.js.
"""
assert self._prompt is not None
try:
self._prompt.download_open(cmdline, pdfjs=pdfjs)
except UnsupportedOperationError:
pass
@cmdutils.register(instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt])
@cmdutils.argument('which', choices=['next', 'prev'])
def prompt_item_focus(self, which):
"""Shift the focus of the prompt file completion menu to another item.
Args:
which: 'next', 'prev'
"""
assert self._prompt is not None
try:
self._prompt.item_focus(which)
except UnsupportedOperationError:
pass
@cmdutils.register(
instance='prompt-container', scope='window',
modes=[usertypes.KeyMode.prompt, usertypes.KeyMode.yesno])
def prompt_yank(self, sel=False):
"""Yank URL to clipboard or primary selection.
Args:
sel: Use the primary selection instead of the clipboard.
"""
assert self._prompt is not None
question = self._prompt.question
if question.url is None:
message.error('No URL found.')
return
if sel and utils.supports_selection():
target = 'primary selection'
else:
sel = False
target = 'clipboard'
utils.set_clipboard(question.url, sel)
message.info("Yanked to {}: {}".format(target, question.url))
class LineEdit(QLineEdit):
"""A line edit used in prompts."""
def __init__(self, parent=None):
super().__init__(parent)
self.setStyleSheet("""
QLineEdit {
background-color: transparent;
}
""")
self.setAttribute(Qt.WA_MacShowFocusRect, False)
def keyPressEvent(self, e):
"""Override keyPressEvent to paste primary selection on Shift + Ins."""
if e.key() == Qt.Key_Insert and e.modifiers() == Qt.ShiftModifier:
try:
text = utils.get_clipboard(selection=True, fallback=True)
except utils.ClipboardError: # pragma: no cover
e.ignore()
else:
e.accept()
self.insert(text)
return
super().keyPressEvent(e)
def __repr__(self):
return utils.get_repr(self)
class _BasePrompt(QWidget):
"""Base class for all prompts."""
KEY_MODE = usertypes.KeyMode.prompt
def __init__(self, question, parent=None):
super().__init__(parent)
self.question = question
self._vbox = QVBoxLayout(self)
self._vbox.setSpacing(15)
self._key_grid = None
def __repr__(self):
return utils.get_repr(self, question=self.question, constructor=True)
def _init_texts(self, question):
assert question.title is not None, question
title = '<font size="4"><b>{}</b></font>'.format(
html.escape(question.title))
title_label = QLabel(title, self)
self._vbox.addWidget(title_label)
if question.text is not None:
# Not doing any HTML escaping here as the text can be formatted
text_label = QLabel(question.text)
text_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self._vbox.addWidget(text_label)
def _init_key_label(self):
assert self._key_grid is None, self._key_grid
self._key_grid = QGridLayout()
self._key_grid.setVerticalSpacing(0)
all_bindings = config.key_instance.get_reverse_bindings_for(
self.KEY_MODE.name)
labels = []
for cmd, text in self._allowed_commands():
bindings = all_bindings.get(cmd, [])
if bindings:
binding = None
preferred = ['<enter>', '<escape>']
for pref in preferred:
if pref in bindings:
binding = pref
if binding is None:
binding = bindings[0]
key_label = QLabel('<b>{}</b>'.format(html.escape(binding)))
text_label = QLabel(text)
labels.append((key_label, text_label))
for i, (key_label, text_label) in enumerate(labels):
self._key_grid.addWidget(key_label, i, 0)
self._key_grid.addWidget(text_label, i, 1)
spacer = QSpacerItem(0, 0, QSizePolicy.Expanding)
self._key_grid.addItem(spacer, 0, 2)
self._vbox.addLayout(self._key_grid)
def _check_save_support(self, save):
if save:
raise UnsupportedOperationError("Saving answers is only possible "
"with yes/no prompts.")
def accept(self, value=None, save=False):
raise NotImplementedError
def download_open(self, cmdline, pdfjs):
"""Open the download directly if this is a download prompt."""
utils.unused(cmdline)
utils.unused(pdfjs)
raise UnsupportedOperationError
def item_focus(self, _which):
"""Switch to next file item if this is a filename prompt.."""
raise UnsupportedOperationError
def _allowed_commands(self):
"""Get the commands we could run as response to this message."""
raise NotImplementedError
class LineEditPrompt(_BasePrompt):
"""A prompt for a single text value."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._lineedit = LineEdit(self)
self._init_texts(question)
self._vbox.addWidget(self._lineedit)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.selectAll()
self.setFocusProxy(self._lineedit)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
self.question.answer = text
return True
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class FilenamePrompt(_BasePrompt):
"""A prompt for a filename."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
self._lineedit = LineEdit(self)
if question.default:
self._lineedit.setText(question.default)
self._lineedit.textEdited.connect(self._set_fileview_root)
self._vbox.addWidget(self._lineedit)
self.setFocusProxy(self._lineedit)
self._init_fileview()
self._set_fileview_root(question.default)
if config.val.prompt.filebrowser:
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._to_complete = ''
@pyqtSlot(str)
def _set_fileview_root(self, path, *, tabbed=False):
"""Set the root path for the file display."""
separators = os.sep
if os.altsep is not None:
separators += os.altsep
dirname = os.path.dirname(path)
basename = os.path.basename(path)
if not tabbed:
self._to_complete = ''
try:
if not path:
pass
elif path in separators and os.path.isdir(path):
# Input "/" -> don't strip anything
pass
elif path[-1] in separators and os.path.isdir(path):
# Input like /foo/bar/ -> show /foo/bar/ contents
path = path.rstrip(separators)
elif os.path.isdir(dirname) and not tabbed:
# Input like /foo/ba -> show /foo contents
path = dirname
self._to_complete = basename
else:
return
except OSError:
log.prompt.exception("Failed to get directory information")
return
root = self._file_model.setRootPath(path)
self._file_view.setRootIndex(root)
@pyqtSlot(QModelIndex)
def _insert_path(self, index, *, clicked=True):
"""Handle an element selection.
Args:
index: The QModelIndex of the selected element.
clicked: Whether the element was clicked.
"""
if index == QModelIndex():
path = os.path.join(self._file_model.rootPath(), self._to_complete)
else:
path = os.path.normpath(self._file_model.filePath(index))
if clicked:
path += os.sep
else:
# On Windows, when we have C:\foo and tab over .., we get C:\
path = path.rstrip(os.sep)
log.prompt.debug('Inserting path {}'.format(path))
self._lineedit.setText(path)
self._lineedit.setFocus()
self._set_fileview_root(path, tabbed=True)
if clicked:
# Avoid having a ..-subtree highlighted
self._file_view.setCurrentIndex(QModelIndex())
def _init_fileview(self):
self._file_view = QTreeView(self)
self._file_model = QFileSystemModel(self)
self._file_view.setModel(self._file_model)
self._file_view.clicked.connect(self._insert_path)
if config.val.prompt.filebrowser:
self._vbox.addWidget(self._file_view)
else:
self._file_view.hide()
# Only show name
self._file_view.setHeaderHidden(True)
for col in range(1, 4):
self._file_view.setColumnHidden(col, True)
# Nothing selected initially
self._file_view.setCurrentIndex(QModelIndex())
# The model needs to be sorted so we get the correct first/last index
self._file_model.directoryLoaded.connect(
lambda: self._file_model.sort(0))
def accept(self, value=None, save=False):
self._check_save_support(save)
text = value if value is not None else self._lineedit.text()
text = downloads.transform_path(text)
if text is None:
message.error("Invalid filename")
return False
self.question.answer = text
return True
def item_focus(self, which):
# This duplicates some completion code, but I don't see a nicer way...
assert which in ['prev', 'next'], which
selmodel = self._file_view.selectionModel()
parent = self._file_view.rootIndex()
first_index = self._file_model.index(0, 0, parent)
row = self._file_model.rowCount(parent) - 1
last_index = self._file_model.index(row, 0, parent)
if not first_index.isValid():
# No entries
return
assert last_index.isValid()
idx = selmodel.currentIndex()
if not idx.isValid():
# No item selected yet
idx = last_index if which == 'prev' else first_index
elif which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid():
idx = last_index if which == 'prev' else first_index
idx = self._do_completion(idx, which)
selmodel.setCurrentIndex(
idx,
QItemSelectionModel.ClearAndSelect | # type: ignore[arg-type]
QItemSelectionModel.Rows)
self._insert_path(idx, clicked=False)
def _do_completion(self, idx, which):
filename = self._file_model.fileName(idx)
while not filename.startswith(self._to_complete) and idx.isValid():
if which == 'prev':
idx = self._file_view.indexAbove(idx)
else:
assert which == 'next', which
idx = self._file_view.indexBelow(idx)
filename = self._file_model.fileName(idx)
return idx
def _allowed_commands(self):
return [('prompt-accept', 'Accept'), ('mode-leave', 'Abort')]
class DownloadFilenamePrompt(FilenamePrompt):
"""A prompt for a filename for downloads."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._file_model.setFilter(
QDir.AllDirs | QDir.Drives | QDir.NoDot) # type: ignore[arg-type]
def accept(self, value=None, save=False):
done = super().accept(value, save)
answer = self.question.answer
if answer is not None:
self.question.answer = downloads.FileDownloadTarget(answer)
return done
def download_open(self, cmdline, pdfjs):
if pdfjs:
target: 'downloads._DownloadTarget' = downloads.PDFJSDownloadTarget()
else:
target = downloads.OpenFileDownloadTarget(cmdline)
self.question.answer = target
self.question.done()
message.global_bridge.prompt_done.emit(self.KEY_MODE)
def _allowed_commands(self):
cmds = [
('prompt-accept', 'Accept'),
('mode-leave', 'Abort'),
('prompt-open-download', "Open download"),
('prompt-open-download --pdfjs', "Open download via PDF.js"),
('prompt-yank', "Yank URL"),
]
return cmds
class AuthenticationPrompt(_BasePrompt):
"""A prompt for username/password."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
user_label = QLabel("Username:", self)
self._user_lineedit = LineEdit(self)
password_label = QLabel("Password:", self)
self._password_lineedit = LineEdit(self)
self._password_lineedit.setEchoMode(QLineEdit.Password)
grid = QGridLayout()
grid.addWidget(user_label, 1, 0)
grid.addWidget(self._user_lineedit, 1, 1)
grid.addWidget(password_label, 2, 0)
grid.addWidget(self._password_lineedit, 2, 1)
self._vbox.addLayout(grid)
self._init_key_label()
assert not question.default, question.default
self.setFocusProxy(self._user_lineedit)
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
if ':' not in value:
raise Error("Value needs to be in the format "
"username:password, but {} was given".format(
value))
username, password = value.split(':', maxsplit=1)
self.question.answer = AuthInfo(username, password)
return True
elif self._user_lineedit.hasFocus():
# Earlier, tab was bound to :prompt-accept, so to still support
# that we simply switch the focus when tab was pressed.
self._password_lineedit.setFocus()
return False
else:
self.question.answer = AuthInfo(self._user_lineedit.text(),
self._password_lineedit.text())
return True
def item_focus(self, which):
"""Support switching between fields with tab."""
assert which in ['prev', 'next'], which
if which == 'next' and self._user_lineedit.hasFocus():
self._password_lineedit.setFocus()
elif which == 'prev' and self._password_lineedit.hasFocus():
self._user_lineedit.setFocus()
def _allowed_commands(self):
return [('prompt-accept', "Accept"),
('mode-leave', "Abort")]
class YesNoPrompt(_BasePrompt):
"""A prompt with yes/no answers."""
KEY_MODE = usertypes.KeyMode.yesno
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def _check_save_support(self, save):
if save and self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.")
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is None:
if self.question.default is None:
raise Error("No default value was set for this question!")
self.question.answer = self.question.default
elif value == 'yes':
self.question.answer = True
elif value == 'no':
self.question.answer = False
else:
raise Error("Invalid value {} - expected yes/no!".format(value))
if save:
opt = config.instance.get_opt(self.question.option)
assert isinstance(opt.typ, configtypes.Bool)
pattern = urlmatch.UrlPattern(self.question.url)
try:
config.instance.set_obj(opt.name, self.question.answer,
pattern=pattern, save_yaml=True)
except configexc.Error as e:
raise Error(str(e))
return True
def _allowed_commands(self):
cmds = []
cmds.append(('prompt-accept yes', "Yes"))
if self.question.option is not None:
cmds.append(('prompt-accept --save yes', "Always"))
cmds.append(('prompt-accept no', "No"))
if self.question.option is not None:
cmds.append(('prompt-accept --save no', "Never"))
if self.question.default is not None:
assert self.question.default in [True, False]
default = 'yes' if self.question.default else 'no'
cmds.append(('prompt-accept', "Use default ({})".format(default)))
cmds.append(('mode-leave', "Abort"))
cmds.append(('prompt-yank', "Yank URL"))
return cmds
class AlertPrompt(_BasePrompt):
"""A prompt without any answer possibility."""
def __init__(self, question, parent=None):
super().__init__(question, parent)
self._init_texts(question)
self._init_key_label()
def accept(self, value=None, save=False):
self._check_save_support(save)
if value is not None:
raise Error("No value is permitted with alert prompts!")
# Simply mark prompt as done without setting self.question.answer
return True
def _allowed_commands(self):
return [('prompt-accept', "Hide")]
def init():
"""Initialize global prompt objects."""
global prompt_queue
prompt_queue = PromptQueue()
message.global_bridge.ask_question.connect( # type: ignore[call-arg]
prompt_queue.ask_question, Qt.DirectConnection)
| gpl-3.0 | 8,010,042,735,672,391,000 | 34.537344 | 81 | 0.594547 | false |
huggingface/transformers | src/transformers/utils/versions.py | 1 | 4381 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with package versions
"""
import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None:
raise ValueError("got_ver is None")
if want_ver is None:
raise ValueError("want_ver is None")
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the `site-packages` dir via `importlib_metadata`.
Args:
requirement (:obj:`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (:obj:`str`, `optional`): what suggestion to print in case of requirements not being met
Example::
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
pkg, want_full = match[0]
want_range = want_full.split(",") # there could be multiple requirements
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git master"
return require_version(requirement, hint)
| apache-2.0 | -2,630,832,435,068,718,000 | 35.508333 | 139 | 0.635471 | false |
fberanizo/neural_network | tests/ibovespa/external_rnn.py | 1 | 5741 | # -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('../..'))
import unittest, pandas, numpy, datetime, itertools, rnn
from sklearn import cross_validation, preprocessing
class ExternalRNN(unittest.TestCase):
"""Test cases for Ibovespa tendency problem."""
grid_search = True
def test_1(self):
"""Tests the accuracy of a External RNN using k-folds validation method."""
# Read data from CSV files
X_train, X_test, y_train, y_test = self.read_data()
# Rescales data
min_max_scaler = preprocessing.MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
X_test = min_max_scaler.fit_transform(X_test)
y_train = min_max_scaler.fit_transform(y_train)
y_test = min_max_scaler.fit_transform(y_test)
n_folds = 5
accuracies = map(lambda x: 0, self.hipergrid())
for idx, hiperparams in enumerate(self.hipergrid()):
skf = cross_validation.StratifiedKFold(y_train.flatten(), n_folds=n_folds)
for fold, (train_index, test_index) in enumerate(skf):
self.progress(((1.0+fold)+n_folds*idx)/(len(self.hipergrid())*n_folds))
X_train2, X_test2 = X_train[train_index], X_train[test_index]
y_train2, y_test2 = y_train[train_index], y_train[test_index]
classifier = rnn.ExternalRNN(**hiperparams).fit(X_train2, y_train2)
accuracies[idx] += classifier.score(X_test2, y_test2)
# Finds which hiperparams give maximum accuracy
best_hiperparams = self.hipergrid()[accuracies.index(numpy.max(accuracies))]
accuracy = classifier.score(X_test, y_test)
print 'Acurácia no cj treino:' + str(numpy.max(accuracies)/n_folds)
print 'Acurácia no cj teste:' + str(accuracy)
print 'Melhores hiperparâmetros: ' + str(best_hiperparams)
def read_data(self):
"""Reads and processes financial data from CSV files"""
ibovespa = "%5EBVSP"
america = ["%5EGSPC", "%5EDJI", "%5EMERV", "%5EMXX", "%5EIXIC", "%5EIPSA"]
europe = ["%5EFTSE", "%5EGDAXI", "%5EFCHI", "FTSEMIB.MI", "%5EIBEX"]
asia = ["%5EN225", "%5EHSI", "%5EBSESN", "%5ESSEC", "%5EJKSE"]
continents = 3
stocks_per_continent = 5
time_window = 7 # 7 days
prediction_range = 1 # 1 day
stocks = america + europe + asia
# Request stock data
# data = {}
# url = "http://ichart.finance.yahoo.com/table.csv?s=STOCK_NAME&g=d&a=0&b=1&c=2016&&ignore=.csv"
# for stock_name in america + europe + asia + [ibovespa]:
# print stock_name
# s = requests.get(url.replace("STOCK_NAME", stock_name)).content
# stock = pandas.read_csv(io.StringIO(s.decode('utf-8'))).set_index("Date")
# stock.to_csv('input/' + stock_name + '.csv')
ibovespa_data = pandas.read_csv('input/' + ibovespa + '.csv', parse_dates=['Date'])
stock_data = pandas.DataFrame(data=[], columns=['Date','Open','High','Low','Close','Volume','Adj Close'])
for stock in stocks:
stock_data = stock_data.append(pandas.read_csv('input/' + stock + '.csv', parse_dates=['Date']))
train = pandas.DataFrame(data=[], columns=['Date', 'Trend']).set_index("Date")
test = pandas.DataFrame(data=[], columns=['Date', 'Trend']).set_index("Date")
for idx, ibovespa_data in ibovespa_data.iterrows():
trend = 0 if ibovespa_data["Close"] < ibovespa_data["Open"] else 1
start_date = ibovespa_data["Date"] + pandas.Timedelta('-1 days')
end_date = ibovespa_data["Date"] + pandas.Timedelta('-1 days')
mask = (stock_data['Date'] >= start_date) & (stock_data['Date'] <= end_date)
stocks = stock_data.loc[mask]['Close'].tolist()
columns = ['Date', 'Trend'] + range(len(stocks))
data = [ibovespa_data["Date"], trend] + stocks
row = pandas.DataFrame([data], columns=columns).set_index("Date")
# Data from last 3 months is test, the rest is train
three_months_ago = pandas.to_datetime('today') + pandas.Timedelta('-90 days')
if ibovespa_data["Date"] < three_months_ago:
train = train.append(row)
else:
test = test.append(row)
# Removes rows with NaN columns
train.dropna(axis=0, how='any', inplace=True)
test.dropna(axis=0, how='any', inplace=True)
X_train = train[train.columns.tolist()[:-1]].as_matrix()
y_train = train[train.columns.tolist()[-1:]].as_matrix()
X_test = test[test.columns.tolist()[:-1]].as_matrix()
y_test = test[test.columns.tolist()[-1:]].as_matrix()
return X_train, X_test, y_train, y_test
def hipergrid(self):
"""Hiperparameters for ExternalRNN"""
hidden_layer_size = [{'hidden_layer_size':3},{'hidden_layer_size':5},{'hidden_layer_size':7}]
learning_rate = [{'learning_rate':0.1},{'learning_rate':0.3},{'learning_rate':1}]
grid = []
for hiperparams in itertools.product(hidden_layer_size, learning_rate):
d = {}
for hiperparam in hiperparams:
d.update(hiperparam)
grid.append(d)
return grid
def progress(self, percent):
"""Prints progress in stdout"""
bar_length = 20
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPerforming 5-folds grid search: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -760,951,907,419,125,500 | 42.801527 | 123 | 0.584699 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/io_scene_obj/export_obj.py | 1 | 38495 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import bpy
import mathutils
import bpy_extras.io_utils
from progress_report import ProgressReport, ProgressReportSubstep
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def mesh_triangulate(me):
import bmesh
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
def write_mtl(scene, filepath, path_mode, copy_set, mtl_dict):
from mathutils import Color, Vector
world = scene.world
if world:
world_amb = world.ambient_color
else:
world_amb = Color((0.0, 0.0, 0.0))
source_dir = os.path.dirname(bpy.data.filepath)
dest_dir = os.path.dirname(filepath)
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
fw('# Blender MTL File: %r\n' % (os.path.basename(bpy.data.filepath) or "None"))
fw('# Material Count: %i\n' % len(mtl_dict))
mtl_dict_values = list(mtl_dict.values())
mtl_dict_values.sort(key=lambda m: m[0])
# Write material/image combinations we have used.
# Using mtl_dict.values() directly gives un-predictable order.
for mtl_mat_name, mat, face_img in mtl_dict_values:
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
fw('\nnewmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
if mat:
use_mirror = mat.raytrace_mirror.use and mat.raytrace_mirror.reflect_factor != 0.0
# convert from blenders spec to 0 - 1000 range.
if mat.specular_shader == 'WARDISO':
tspec = (0.4 - mat.specular_slope) / 0.0004
else:
tspec = (mat.specular_hardness - 1) / 0.51
fw('Ns %.6f\n' % tspec)
del tspec
# Ambient
if use_mirror:
fw('Ka %.6f %.6f %.6f\n' % (mat.raytrace_mirror.reflect_factor * mat.mirror_color)[:])
else:
fw('Ka %.6f %.6f %.6f\n' % (mat.ambient, mat.ambient, mat.ambient)) # Do not use world color!
fw('Kd %.6f %.6f %.6f\n' % (mat.diffuse_intensity * mat.diffuse_color)[:]) # Diffuse
fw('Ks %.6f %.6f %.6f\n' % (mat.specular_intensity * mat.specular_color)[:]) # Specular
# Emission, not in original MTL standard but seems pretty common, see T45766.
# XXX Blender has no color emission, it's using diffuse color instead...
fw('Ke %.6f %.6f %.6f\n' % (mat.emit * mat.diffuse_color)[:])
if hasattr(mat, "raytrace_transparency") and hasattr(mat.raytrace_transparency, "ior"):
fw('Ni %.6f\n' % mat.raytrace_transparency.ior) # Refraction index
else:
fw('Ni %.6f\n' % 1.0)
fw('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
# See http://en.wikipedia.org/wiki/Wavefront_.obj_file for whole list of values...
# Note that mapping is rather fuzzy sometimes, trying to do our best here.
if mat.use_shadeless:
fw('illum 0\n') # ignore lighting
elif mat.specular_intensity == 0:
fw('illum 1\n') # no specular.
elif use_mirror:
if mat.use_transparency and mat.transparency_method == 'RAYTRACE':
if mat.raytrace_mirror.fresnel != 0.0:
fw('illum 7\n') # Reflection, Transparency, Ray trace and Fresnel
else:
fw('illum 6\n') # Reflection, Transparency, Ray trace
elif mat.raytrace_mirror.fresnel != 0.0:
fw('illum 5\n') # Reflection, Ray trace and Fresnel
else:
fw('illum 3\n') # Reflection and Ray trace
elif mat.use_transparency and mat.transparency_method == 'RAYTRACE':
fw('illum 9\n') # 'Glass' transparency and no Ray trace reflection... fuzzy matching, but...
else:
fw('illum 2\n') # light normaly
else:
# Write a dummy material here?
fw('Ns 0\n')
fw('Ka %.6f %.6f %.6f\n' % world_amb[:]) # Ambient, uses mirror color,
fw('Kd 0.8 0.8 0.8\n')
fw('Ks 0.8 0.8 0.8\n')
fw('d 1\n') # No alpha
fw('illum 2\n') # light normaly
# Write images!
if face_img: # We have an image on the face!
filepath = face_img.filepath
if filepath: # may be '' for generated images
# write relative image path
filepath = bpy_extras.io_utils.path_reference(filepath, source_dir, dest_dir,
path_mode, "", copy_set, face_img.library)
fw('map_Kd %s\n' % filepath) # Diffuse mapping image
del filepath
else:
# so we write the materials image.
face_img = None
if mat: # No face image. if we havea material search for MTex image.
image_map = {}
# backwards so topmost are highest priority
for mtex in reversed(mat.texture_slots):
if mtex and mtex.texture and mtex.texture.type == 'IMAGE':
image = mtex.texture.image
if image:
# texface overrides others
if (mtex.use_map_color_diffuse and (face_img is None) and
(mtex.use_map_warp is False) and (mtex.texture_coords != 'REFLECTION')):
image_map["map_Kd"] = (mtex, image)
if mtex.use_map_ambient:
image_map["map_Ka"] = (mtex, image)
# this is the Spec intensity channel but Ks stands for specular Color
'''
if mtex.use_map_specular:
image_map["map_Ks"] = (mtex, image)
'''
if mtex.use_map_color_spec: # specular color
image_map["map_Ks"] = (mtex, image)
if mtex.use_map_hardness: # specular hardness/glossiness
image_map["map_Ns"] = (mtex, image)
if mtex.use_map_alpha:
image_map["map_d"] = (mtex, image)
if mtex.use_map_translucency:
image_map["map_Tr"] = (mtex, image)
if mtex.use_map_normal:
image_map["map_Bump"] = (mtex, image)
if mtex.use_map_displacement:
image_map["disp"] = (mtex, image)
if mtex.use_map_color_diffuse and (mtex.texture_coords == 'REFLECTION'):
image_map["refl"] = (mtex, image)
if mtex.use_map_emit:
image_map["map_Ke"] = (mtex, image)
for key, (mtex, image) in sorted(image_map.items()):
filepath = bpy_extras.io_utils.path_reference(image.filepath, source_dir, dest_dir,
path_mode, "", copy_set, image.library)
options = []
if key == "map_Bump":
if mtex.normal_factor != 1.0:
options.append('-bm %.6f' % mtex.normal_factor)
if mtex.offset != Vector((0.0, 0.0, 0.0)):
options.append('-o %.6f %.6f %.6f' % mtex.offset[:])
if mtex.scale != Vector((1.0, 1.0, 1.0)):
options.append('-s %.6f %.6f %.6f' % mtex.scale[:])
if options:
fw('%s %s %s\n' % (key, " ".join(options), repr(filepath)[1:-1]))
else:
fw('%s %s\n' % (key, repr(filepath)[1:-1]))
def test_nurbs_compat(ob):
if ob.type != 'CURVE':
return False
for nu in ob.data.splines:
if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
return True
return False
def write_nurb(fw, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
for nu in cu.splines:
if nu.type == 'POLY':
DEG_ORDER_U = 1
else:
DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct
if nu.type == 'BEZIER':
print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
continue
if nu.point_count_v > 1:
print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
continue
if len(nu.points) <= DEG_ORDER_U:
print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
continue
pt_num = 0
do_closed = nu.use_cyclic_u
do_endpoints = (do_closed == 0) and nu.use_endpoint_u
for pt in nu.points:
fw('v %.6f %.6f %.6f\n' % (ob_mat * pt.co.to_3d())[:])
pt_num += 1
tot_verts += pt_num
fw('g %s\n' % (name_compat(ob.name))) # name_compat(ob.getData(1)) could use the data name too
fw('cstype bspline\n') # not ideal, hard coded
fw('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i + 1) for i in range(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
fw('curv 0.0 1.0 %s\n' % (" ".join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm - 1)
parm_ls = [(i / tot_parm_div) for i in range(tot_parm)]
if do_endpoints: # end points, force param
for i in range(DEG_ORDER_U + 1):
parm_ls[i] = 0.0
parm_ls[-(1 + i)] = 1.0
fw("parm u %s\n" % " ".join(["%.6f" % i for i in parm_ls]))
fw('end\n')
return tot_verts
def write_file(filepath, objects, scene,
EXPORT_TRI=False,
EXPORT_EDGES=False,
EXPORT_SMOOTH_GROUPS=False,
EXPORT_SMOOTH_GROUPS_BITFLAGS=False,
EXPORT_NORMALS=False,
EXPORT_UV=True,
EXPORT_MTL=True,
EXPORT_APPLY_MODIFIERS=True,
EXPORT_APPLY_MODIFIERS_RENDER=False,
EXPORT_BLEN_OBS=True,
EXPORT_GROUP_BY_OB=False,
EXPORT_GROUP_BY_MAT=False,
EXPORT_KEEP_VERT_ORDER=False,
EXPORT_POLYGROUPS=False,
EXPORT_CURVE_AS_NURBS=True,
EXPORT_GLOBAL_MATRIX=None,
EXPORT_PATH_MODE='AUTO',
progress=ProgressReport(),
):
"""
Basic write function. The context and options must be already set
This can be accessed externaly
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
"""
if EXPORT_GLOBAL_MATRIX is None:
EXPORT_GLOBAL_MATRIX = mathutils.Matrix()
def veckey3d(v):
return round(v.x, 4), round(v.y, 4), round(v.z, 4)
def veckey2d(v):
return round(v[0], 4), round(v[1], 4)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vetex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert_index in face.vertices:
vWeights = vWeightMap[vert_index]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight
if weightDict:
return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1]
else:
return '(null)'
with ProgressReportSubstep(progress, 2, "OBJ Export path: %r" % filepath, "OBJ Export Finished") as subprogress1:
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
# Write Header
fw('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
fw('# www.blender.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
# filepath can contain non utf8 chars, use repr
fw('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1])
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
mtl_dict = {}
# Used to reduce the usage of matname_texname materials, which can become annoying in case of
# repeated exports/imports, yet keeping unique mat names per keys!
# mtl_name: (material.name, image.name)
mtl_rev_dict = {}
copy_set = set()
# Get all meshes
subprogress1.enter_substeps(len(objects))
for i, ob_main in enumerate(objects):
# ignore dupli children
if ob_main.parent and ob_main.parent.dupli_type in {'VERTS', 'FACES'}:
# XXX
subprogress1.step("Ignoring %s, dupli child..." % ob_main.name)
continue
obs = [(ob_main, ob_main.matrix_world)]
if ob_main.dupli_type != 'NONE':
# XXX
print('creating dupli_list on', ob_main.name)
ob_main.dupli_list_create(scene)
obs += [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
# XXX debug print
print(ob_main.name, 'has', len(obs) - 1, 'dupli children')
subprogress1.enter_substeps(len(obs))
for ob, ob_mat in obs:
with ProgressReportSubstep(subprogress1, 6) as subprogress2:
uv_unique_count = no_unique_count = 0
# Nurbs curve support
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
ob_mat = EXPORT_GLOBAL_MATRIX * ob_mat
totverts += write_nurb(fw, ob, ob_mat)
continue
# END NURBS
try:
me = ob.to_mesh(scene, EXPORT_APPLY_MODIFIERS, calc_tessface=False,
settings='RENDER' if EXPORT_APPLY_MODIFIERS_RENDER else 'PREVIEW')
except RuntimeError:
me = None
if me is None:
continue
me.transform(EXPORT_GLOBAL_MATRIX * ob_mat)
if EXPORT_TRI:
# _must_ do this first since it re-allocs arrays
mesh_triangulate(me)
if EXPORT_UV:
faceuv = len(me.uv_textures) > 0
if faceuv:
uv_texture = me.uv_textures.active.data[:]
uv_layer = me.uv_layers.active.data[:]
else:
faceuv = False
me_verts = me.vertices[:]
# Make our own list so it can be sorted to reduce context switching
face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)]
# faces = [ f for f in me.tessfaces ]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write
# clean up
bpy.data.meshes.remove(me)
continue # dont bother with this mesh.
if EXPORT_NORMALS and face_index_pairs:
me.calc_normals_split()
# No need to call me.free_normals_split later, as this mesh is deleted anyway!
loops = me.loops
if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs:
smooth_groups, smooth_groups_tot = me.calc_smooth_groups(EXPORT_SMOOTH_GROUPS_BITFLAGS)
if smooth_groups_tot <= 1:
smooth_groups, smooth_groups_tot = (), 0
else:
smooth_groups, smooth_groups_tot = (), 0
materials = me.materials[:]
material_names = [m.name if m else None for m in materials]
# avoid bad index errors
if not materials:
materials = [None]
material_names = [name_compat(None)]
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
else:
if faceuv:
if smooth_groups:
sort_func = lambda a: (a[0].material_index,
hash(uv_texture[a[1]].image),
smooth_groups[a[1]] if a[0].use_smooth else False)
else:
sort_func = lambda a: (a[0].material_index,
hash(uv_texture[a[1]].image),
a[0].use_smooth)
elif len(materials) > 1:
if smooth_groups:
sort_func = lambda a: (a[0].material_index,
smooth_groups[a[1]] if a[0].use_smooth else False)
else:
sort_func = lambda a: (a[0].material_index,
a[0].use_smooth)
else:
# no materials
if smooth_groups:
sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False]
else:
sort_func = lambda a: a[0].use_smooth
face_index_pairs.sort(key=sort_func)
del sort_func
# Set the default mat to no material and no image.
contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.data.name
if name1 == name2:
obnamestring = name_compat(name1)
else:
obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2))
if EXPORT_BLEN_OBS:
fw('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
fw('g %s\n' % obnamestring)
subprogress2.step()
# Vert
for v in me_verts:
fw('v %.6f %.6f %.6f\n' % v.co[:])
subprogress2.step()
# UV
if faceuv:
# in case removing some of these dont get defined.
uv = f_index = uv_index = uv_key = uv_val = uv_ls = None
uv_face_mapping = [None] * len(face_index_pairs)
uv_dict = {}
uv_get = uv_dict.get
for f, f_index in face_index_pairs:
uv_ls = uv_face_mapping[f_index] = []
for uv_index, l_index in enumerate(f.loop_indices):
uv = uv_layer[l_index].uv
# include the vertex index in the key so we don't share UV's between vertices,
# allowed by the OBJ spec but can cause issues for other importers, see: T47010.
# this works too, shared UV's for all verts
#~ uv_key = veckey2d(uv)
uv_key = loops[l_index].vertex_index, veckey2d(uv)
uv_val = uv_get(uv_key)
if uv_val is None:
uv_val = uv_dict[uv_key] = uv_unique_count
fw('vt %.6f %.6f\n' % uv[:])
uv_unique_count += 1
uv_ls.append(uv_val)
del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val
# Only need uv_unique_count and uv_face_mapping
subprogress2.step()
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
no_key = no_val = None
normals_to_idx = {}
no_get = normals_to_idx.get
loops_to_normals = [0] * len(loops)
for f, f_index in face_index_pairs:
for l_idx in f.loop_indices:
no_key = veckey3d(loops[l_idx].normal)
no_val = no_get(no_key)
if no_val is None:
no_val = normals_to_idx[no_key] = no_unique_count
fw('vn %.4f %.4f %.4f\n' % no_key)
no_unique_count += 1
loops_to_normals[l_idx] = no_val
del normals_to_idx, no_get, no_key, no_val
else:
loops_to_normals = []
if not faceuv:
f_image = None
subprogress2.step()
# XXX
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
vertGroupNames = ob.vertex_groups.keys()
if vertGroupNames:
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in range(len(me_verts))]
for v_idx, v_ls in enumerate(vgroupsMap):
v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups]
for f, f_index in face_index_pairs:
f_smooth = f.use_smooth
if f_smooth and smooth_groups:
f_smooth = smooth_groups[f_index]
f_mat = min(f.material_index, len(materials) - 1)
if faceuv:
tface = uv_texture[f_index]
f_image = tface.image
# MAKE KEY
if faceuv and f_image: # Object is always true.
key = material_names[f_mat], f_image.name
else:
key = material_names[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if vertGroupNames:
# find what vertext group the face belongs to
vgroup_of_face = findVertexGroupName(f, vgroupsMap)
if vgroup_of_face != currentVGroup:
currentVGroup = vgroup_of_face
fw('g %s\n' % vgroup_of_face)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context already switched, dont do anything
else:
if key[0] is None and key[1] is None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name)))
if EXPORT_MTL:
fw("usemtl (null)\n") # mat, image
else:
mat_data = mtl_dict.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with name_compat.
# If none image dont bother adding it to the name
# Try to avoid as much as possible adding texname (or other things)
# to the mtl name (see [#32102])...
mtl_name = "%s" % name_compat(key[0])
if mtl_rev_dict.get(mtl_name, None) not in {key, None}:
if key[1] is None:
tmp_ext = "_NONE"
else:
tmp_ext = "_%s" % name_compat(key[1])
i = 0
while mtl_rev_dict.get(mtl_name + tmp_ext, None) not in {key, None}:
i += 1
tmp_ext = "_%3d" % i
mtl_name += tmp_ext
mat_data = mtl_dict[key] = mtl_name, materials[f_mat], f_image
mtl_rev_dict[mtl_name] = key
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0]))
if EXPORT_MTL:
fw("usemtl %s\n" % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
if smooth_groups:
f_smooth = smooth_groups[f_index]
fw('s %d\n' % f_smooth)
else:
fw('s 1\n')
else: # was off now on
fw('s off\n')
contextSmooth = f_smooth
f_v = [(vi, me_verts[v_idx], l_idx)
for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))]
fw('f')
if faceuv:
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d/%d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
totno + loops_to_normals[li],
)) # vert, uv, normal
else: # No Normals
for vi, v, li in f_v:
fw(" %d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
)) # vert, uv
face_vert_index += len(f_v)
else: # No UV's
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li]))
else: # No Normals
for vi, v, li in f_v:
fw(" %d" % (totverts + v.index))
fw('\n')
subprogress2.step()
# Write edges.
if EXPORT_EDGES:
for ed in edges:
if ed.is_loose:
fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1]))
# Make the indices global rather then per mesh
totverts += len(me_verts)
totuvco += uv_unique_count
totno += no_unique_count
# clean up
bpy.data.meshes.remove(me)
if ob_main.dupli_type != 'NONE':
ob_main.dupli_list_clear()
subprogress1.leave_substeps("Finished writing geometry of '%s'." % ob_main.name)
subprogress1.leave_substeps()
subprogress1.step("Finished exporting geometry, now exporting materials")
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict)
# copy all collected files.
bpy_extras.io_utils.path_reference_copy(copy_set)
def _write(context, filepath,
EXPORT_TRI, # ok
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS, # ok
EXPORT_UV, # ok
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS, # ok
EXPORT_APPLY_MODIFIERS_RENDER, # ok
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_SEL_ONLY, # ok
EXPORT_ANIMATION,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE, # Not used
):
with ProgressReport(context.window_manager) as progress:
base_name, ext = os.path.splitext(filepath)
context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
scene = context.scene
# Exit edit mode before exporting, so current object states are exported properly.
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
orig_frame = scene.frame_current
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
progress.enter_substeps(len(scene_frames))
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filepath.
context_name[2] = '_%.6d' % frame
scene.frame_set(frame, 0.0)
if EXPORT_SEL_ONLY:
objects = context.selected_objects
else:
objects = scene.objects
full_path = ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
progress.enter_substeps(1)
write_file(full_path, objects, scene,
EXPORT_TRI,
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS,
EXPORT_UV,
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS,
EXPORT_APPLY_MODIFIERS_RENDER,
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE,
progress,
)
progress.leave_substeps()
scene.frame_set(orig_frame, 0.0)
progress.leave_substeps()
"""
Currently the exporter lacks these features:
* multiple scene export (only active scene is written)
* particles
"""
def save(context,
filepath,
*,
use_triangles=False,
use_edges=True,
use_normals=False,
use_smooth_groups=False,
use_smooth_groups_bitflags=False,
use_uvs=True,
use_materials=True,
use_mesh_modifiers=True,
use_mesh_modifiers_render=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_animation=False,
global_matrix=None,
path_mode='AUTO'
):
_write(context, filepath,
EXPORT_TRI=use_triangles,
EXPORT_EDGES=use_edges,
EXPORT_SMOOTH_GROUPS=use_smooth_groups,
EXPORT_SMOOTH_GROUPS_BITFLAGS=use_smooth_groups_bitflags,
EXPORT_NORMALS=use_normals,
EXPORT_UV=use_uvs,
EXPORT_MTL=use_materials,
EXPORT_APPLY_MODIFIERS=use_mesh_modifiers,
EXPORT_APPLY_MODIFIERS_RENDER=use_mesh_modifiers_render,
EXPORT_BLEN_OBS=use_blen_objects,
EXPORT_GROUP_BY_OB=group_by_object,
EXPORT_GROUP_BY_MAT=group_by_material,
EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
EXPORT_POLYGROUPS=use_vertex_groups,
EXPORT_CURVE_AS_NURBS=use_nurbs,
EXPORT_SEL_ONLY=use_selection,
EXPORT_ANIMATION=use_animation,
EXPORT_GLOBAL_MATRIX=global_matrix,
EXPORT_PATH_MODE=path_mode,
)
return {'FINISHED'}
| gpl-3.0 | -8,155,557,614,567,815,000 | 44.181925 | 132 | 0.444317 | false |
rwl/PyCIM | CIM15/IEC61970/Informative/InfERPSupport/OrgErpPersonRole.py | 1 | 3445 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Informative.InfCommon.Role import Role
class OrgErpPersonRole(Role):
"""Roles played between Persons and Organisations.Roles played between Persons and Organisations.
"""
def __init__(self, clientID='', ErpPerson=None, ErpOrganisation=None, *args, **kw_args):
"""Initialises a new 'OrgErpPersonRole' instance.
@param clientID: Identifiers of the person held by an organisation, such as a government agency (federal, state, province, city, county), financial institutions, etc.
@param ErpPerson:
@param ErpOrganisation:
"""
#: Identifiers of the person held by an organisation, such as a government agency (federal, state, province, city, county), financial institutions, etc.
self.clientID = clientID
self._ErpPerson = None
self.ErpPerson = ErpPerson
self._ErpOrganisation = None
self.ErpOrganisation = ErpOrganisation
super(OrgErpPersonRole, self).__init__(*args, **kw_args)
_attrs = ["clientID"]
_attr_types = {"clientID": str}
_defaults = {"clientID": ''}
_enums = {}
_refs = ["ErpPerson", "ErpOrganisation"]
_many_refs = []
def getErpPerson(self):
return self._ErpPerson
def setErpPerson(self, value):
if self._ErpPerson is not None:
filtered = [x for x in self.ErpPerson.ErpOrganisationRoles if x != self]
self._ErpPerson._ErpOrganisationRoles = filtered
self._ErpPerson = value
if self._ErpPerson is not None:
if self not in self._ErpPerson._ErpOrganisationRoles:
self._ErpPerson._ErpOrganisationRoles.append(self)
ErpPerson = property(getErpPerson, setErpPerson)
def getErpOrganisation(self):
return self._ErpOrganisation
def setErpOrganisation(self, value):
if self._ErpOrganisation is not None:
filtered = [x for x in self.ErpOrganisation.ErpPersonRoles if x != self]
self._ErpOrganisation._ErpPersonRoles = filtered
self._ErpOrganisation = value
if self._ErpOrganisation is not None:
if self not in self._ErpOrganisation._ErpPersonRoles:
self._ErpOrganisation._ErpPersonRoles.append(self)
ErpOrganisation = property(getErpOrganisation, setErpOrganisation)
| mit | -5,857,759,180,858,380,000 | 40.506024 | 175 | 0.697823 | false |
OpenMined/PySyft | packages/grid/apps/worker/src/main/core/groups/group_ops.py | 1 | 2683 | # stdlib
from datetime import datetime
from datetime import timedelta
from json import dumps
from json import loads
from json.decoder import JSONDecodeError
import logging
from secrets import token_hex
# grid relative
from ..codes import RESPONSE_MSG
from ..database import Group
from ..database import Role
from ..database import User
from ..database import UserGroup
from ..database import db
from ..database.utils import model_to_json
from ..exceptions import AuthorizationError
from ..exceptions import GroupNotFoundError
from ..exceptions import InvalidCredentialsError
from ..exceptions import MissingRequestKeyError
from ..exceptions import PyGridError
from ..exceptions import RoleNotFoundError
from ..exceptions import UserNotFoundError
def create_group(current_user, name):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
new_group = Group(name=name)
db.session.add(new_group)
db.session.commit()
return model_to_json(new_group)
def get_group(current_user, group_id):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_triage_requests:
raise AuthorizationError
group = Group.query.get(group_id)
if group is None:
raise GroupNotFoundError
return model_to_json(group)
def get_all_groups(current_user):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_triage_requests:
raise AuthorizationError
groups = Group.query.all()
groups = [model_to_json(g) for g in groups]
return groups
def put_group(current_user, group_id, new_fields):
user_role = db.session.query(Role).get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
group = db.session.query(Group).get(group_id)
if group is None:
raise GroupNotFoundError
for key, value in new_fields.items():
setattr(group, key, value)
db.session.commit()
return model_to_json(group)
def delete_group(current_user, group_id):
user_role = db.session.query(Role).get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
group = db.session.query(Group).get(group_id)
if group is None:
raise GroupNotFoundError
db.session.delete(group)
db.session.commit()
return model_to_json(group)
| apache-2.0 | -8,161,439,248,849,288,000 | 24.798077 | 61 | 0.71748 | false |
priendeau/PyNOAAGeoMagIndiceHandler | build/lib.linux-x86_64-2.6/PyNOAAGeoMagIndiceHandler/GeoMagReferences.py | 1 | 8642 | from __future__ import with_statement
import os, sys, re, pynav, time, datetime, pytz ,pyaeso, spharm, matplotlib,xml_marshaller, xmlbuilder
from xml_marshaller import xml_marshaller
from xml_marshaller.xml_marshaller import *
from xmlbuilder import XMLBuilder
import numpy as np
from pynav import Pynav
from pyaeso import ets
from bctc import BC_TZ
from bctc.load import yield_load_points
from PyNOAAGeoMagIndiceHandler import decorator
from decorator import DictAssign
class GeoMagReferences( object ):
NodeUpdate=None
class GeoMagReferenceImpl( object ):
FieldReference={ }
SatelliteName=None
LapsInterleave=None
DictReference={
'field':{
'name':'dict',
'value':[ 'RealTimeSolarIndiceReference' ],
'dict':{
'name':'position',
'value':[ 'system' ],
'position':{
'name':'localtion',
'value':[ 'earth','sonde','satellite' ], },
'localtion':{
'name':'site',
'value':[ 'sk-ta3','ace','stereo-a','stereo-b' ] },
'site':{
'name':'detector',
'value':['neutronmonitor','swepam','magnetometer'],
'detector':{
'name':['stringfield','listfield','collectionfield'],
'value':[ 'title','field','laps','url','1m','5m','1h','12h','24h','1w','1m','1y','2y' ],
'stringfield':{
'name':'str',
'value':[ 'title', 'url'] },
'listfield':{
'name':'list',
'value':['field'] },
'collectionfield':{
'name':'dict',
'value':['laps','1m','5m','1h','12h','24h','1w','1m','1y','2y'] }
}
}
}
}
}
RealTimeSolarIndiceReference={
'system':{
'earth':{
'sk-ta3':{
'neutronmonitor':{
'laps':{
'1m':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/6h.dat' },
'5m':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/24h.dat'},
'1h':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/30d.dat' }
}
}
}
},
'satellite':{
'ace':{
'swepam':{
'title':'Solar Wind Electron Proton Alpha Monitor',
'field':['UT Date YR', 'UT Date MO','UT Date DA','UT Date HHMM','Modified Julian Day','Seconds of the Day','S','Density','Speed','Temperature' ],
'laps':{
'1m':{ 'url':"http://www.swpc.noaa.gov/ftpdir/lists/ace/ace_swepam_1m.txt" }
}
}
},
'stereo-a':{
'name':{
'a':{
'field':[ 'UT Date YR', 'UT Date MO', 'UT Date DA', 'UT Date HHMM', 'Modified Julian Day','Seconds of the Day','S','BR','BT','BN','Bt','Lat.','Long.' ],
'magnetometer':{
'laps':{
'1m':{
'url':"http://www.swpc.noaa.gov/ftpdir/lists/stereo/sta_mag_1m.txt" }
}
}
}
}
},
'stereo-b':{
'name':{
'a':{
'field':[ 'UT Date YR', 'UT Date MO', 'UT Date DA', 'UT Date HHMM', 'Modified Julian Day','Seconds of the Day','S','BR','BT','BN','Bt','Lat.','Long.' ],
'magnetometer':{
'laps':{
'1m':{
'url':"http://www.swpc.noaa.gov/ftpdir/lists/stereo/stb_mag_1m.txt" }
}
}
}
}
}
}
}
}
RootName=None
RootNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetRoot( self ):
return self.RootName, self.RootNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetRoot( self, value ):
DictRef=self.PropertyDictName
self.RootName = value
self.RootNameContent=self.RealTimeSolarIndiceReference[self.RootName]
PropertyRoot=property( GetRoot, SetRoot )
CollectionType=None
CollectionTypeContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionType( self ):
return self.CollectionType, self.CollectionTypeContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionType( self, value ):
self.CollectionType = value
self.CollectionTypeContent=self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType]
PropertyCollectionType=property( GetCollectionType, SetCollectionType )
CollectionName=None
CollectionNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionName( self ):
return self.CollectionName, CollectionNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionName( self, value ):
self.CollectionName = value
self.CollectionNameContent=self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName]
PropertyCollectionName=property( GetCollectionName, SetCollectionName )
CollectionSection=None
CollectionSectionContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionSection( self ):
return self.CollectionSection, self.CollectionSectionContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionSection( self, value ):
self.CollectionSection = value
self.CollectionSectionContent = self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName][self.CollectionSection]
PropertyCollectionSection=property( GetCollectionSection, SetCollectionSection )
InstrumentName=None
InstrumentNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetInstrumentName( self ):
return self.InstrumentName, self.InstrumentNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetInstrumentName( self, value ):
self.InstrumentName = value
self.InstrumentNameContent = self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName][self.CollectionSection]
PropertyInstrumentName=property( GetInstrumentName, SetInstrumentName )
RTSIR=None
RTSIRContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetRTSIR( self ):
return self.RTSIR
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetRTSIR( self, value ):
self.PropertyRoot, self.PropertyCollectionType, self.PropertyCollectionName, self.PropertyCollectionSection = value
self.RTSIR = MainDict[self.PropertyRoot][self.PropertyCollectionType][self.PropertyCollectionName][self.PropertyCollectionSection]
PropertyRTSIR=property( GetRTSIR, SetRTSIR )
### Property By Instrument:
FieldName=None
def GetFieldName( self ):
return self.RTSIR['field']
def SetFieldName( self, value ):
self.FieldName = value
self.RTSIR['field']=self.FieldName
PropertyFieldName=property( GetFieldName, SetFieldName )
LapsValue=None
def GetLapsValue( self ):
return self.RTSIR['laps'][self.LapsValue]
def SetLapsValue( self, value ):
self.LapsValue = value
PropertyLapsValue=property( GetLapsValue, SetLapsValue )
UrlName=None
UrlContent=None
def GetUrlName( self ):
return self.UrlContent
def SetUrlName( self, value ):
if len( value ) == 2 :
self.LapsValue, self.UrlName = value
else:
self.UrlName = value
if self.UrlName:
self.UrlContent = self.RTSIR['laps'][self.LapsValue]['url']
PropertyUrlName=property( GetUrlName, SetUrlName )
Title=None
def GetTitle( self ):
return self.Title
def SetTitle( self, value ):
if value:
self.RTSIR['laps'][self.LapsValue]['title']
PropertyTitle=property( GetTitle, SetTitle )
###self.PropertyInstrumentName, self.PropertyFieldName, self.PropertyLapsValue
def __init__( self , **Kargs ):
Node=self.GeoMagReferenceImpl()
for ItemKey in Kargs.keys( ):
setattr( self.Node, ItemKey, Kargs[ItemKey] )
def UpdateReference( self ):
self.UrlNav=pynav.Pynav()
if __name__.__eq__( '__main__' ):
AGeoLocE=GeoMagReferences()
| bsd-3-clause | -6,327,240,926,643,782,000 | 33.293651 | 171 | 0.5891 | false |
tarasane/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_NOPASS_random_attack_medium.py | 1 | 4940 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def random_attack():
def attack(family, train, valid, x, y):
kwargs = {}
kwargs['family'] = family
gaussian_links = ["inverse", "log", "identity"]
binomial_links = ["logit"]
poisson_links = ["log", "identity"]
gamma_links = ["inverse", "log", "identity"]
# randomly select parameters and their corresponding values
if random.randint(0,1): kwargs['max_iterations'] = random.randint(1,50)
if random.random() > 0.8: kwargs['beta_epsilon'] = random.random()
if random.randint(0,1): kwargs['solver'] = ["IRLSM", "L_BFGS"][random.randint(0,1)]
if random.randint(0,1): kwargs['standardize'] = [True, False][random.randint(0,1)]
if random.randint(0,1):
if family == "gaussian": kwargs['link'] = gaussian_links[random.randint(0,2)]
elif family == "binomial": kwargs['link'] = binomial_links[random.randint(0,0)]
elif family == "poisson" : kwargs['link'] = poisson_links[random.randint(0,1)]
elif family == "gamma" : kwargs['link'] = gamma_links[random.randint(0,2)]
if random.randint(0,1): kwargs['alpha'] = [random.random()]
if family == "binomial":
if random.randint(0,1): kwargs['prior'] = random.random()
if random.randint(0,1): kwargs['lambda_search'] = [True, False][random.randint(0,1)]
if 'lambda_search' in kwargs.keys():
if random.randint(0,1): kwargs['nlambdas'] = random.randint(2,10)
do_validation = [True, False][random.randint(0,1)]
# beta constraints
if random.randint(0,1):
bc = []
for n in x:
name = train.names[n]
lower_bound = random.uniform(-1,1)
upper_bound = lower_bound + random.random()
bc.append([name, lower_bound, upper_bound])
beta_constraints = h2o.H2OFrame(python_obj=bc)
beta_constraints.setNames(['names', 'lower_bounds', 'upper_bounds'])
kwargs['beta_constraints'] = beta_constraints.send_frame()
# display the parameters and their corresponding values
print "-----------------------"
print "x: {0}".format(x)
print "y: {0}".format(y)
print "validation: {0}".format(do_validation)
for k, v in zip(kwargs.keys(), kwargs.values()):
if k == 'beta_constraints':
print k + ": "
beta_constraints.show()
else:
print k + ": {0}".format(v)
if do_validation: h2o.glm(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
else: h2o.glm(x=train[x], y=train[y], **kwargs)
print "-----------------------"
print "Import and data munging..."
pros = h2o.upload_file(h2o.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
r = pros[0].runif() # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars.csv"))
r = cars[0].runif()
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print
print "======================================================================"
print "============================== Binomial =============================="
print "======================================================================"
for i in range(10):
attack("binomial", pros_train, pros_valid, random.sample([2,3,4,5,6,7,8],random.randint(1,7)), 1)
print
print "======================================================================"
print "============================== Gaussian =============================="
print "======================================================================"
for i in range(10):
attack("gaussian", cars_train, cars_valid, random.sample([2,3,4,5,6,7],random.randint(1,6)), 1)
print
print "======================================================================"
print "============================== Poisson =============================="
print "======================================================================"
for i in range(10):
attack("poisson", cars_train, cars_valid, random.sample([1,3,4,5,6,7],random.randint(1,6)), 2)
print
print "======================================================================"
print "============================== Gamma =============================="
print "======================================================================"
for i in range(10):
attack("gamma", pros_train, pros_valid, random.sample([1,2,3,5,6,7,8],random.randint(1,7)), 4)
if __name__ == "__main__":
tests.run_test(sys.argv, random_attack)
| apache-2.0 | -440,998,212,565,106,400 | 46.5 | 113 | 0.470445 | false |
GoogleCloudPlatform/declarative-resource-client-library | python/services/networkservices/beta/endpoint_config_selector.py | 1 | 21391 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2,
)
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2_grpc,
)
from typing import List
class EndpointConfigSelector(object):
def __init__(
self,
name: str = None,
create_time: str = None,
update_time: str = None,
labels: dict = None,
type: str = None,
authorization_policy: str = None,
http_filters: dict = None,
endpoint_matcher: dict = None,
traffic_port_selector: dict = None,
description: str = None,
server_tls_policy: str = None,
client_tls_policy: str = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.type = type
self.authorization_policy = authorization_policy
self.http_filters = http_filters
self.endpoint_matcher = endpoint_matcher
self.traffic_port_selector = traffic_port_selector
self.description = description
self.server_tls_policy = server_tls_policy
self.client_tls_policy = client_tls_policy
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ApplyNetworkservicesBetaEndpointConfigSelectorRequest()
)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyNetworkservicesBetaEndpointConfigSelector(request)
self.name = Primitive.from_proto(response.name)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.labels = Primitive.from_proto(response.labels)
self.type = EndpointConfigSelectorTypeEnum.from_proto(response.type)
self.authorization_policy = Primitive.from_proto(response.authorization_policy)
self.http_filters = EndpointConfigSelectorHttpFilters.from_proto(
response.http_filters
)
self.endpoint_matcher = EndpointConfigSelectorEndpointMatcher.from_proto(
response.endpoint_matcher
)
self.traffic_port_selector = EndpointConfigSelectorTrafficPortSelector.from_proto(
response.traffic_port_selector
)
self.description = Primitive.from_proto(response.description)
self.server_tls_policy = Primitive.from_proto(response.server_tls_policy)
self.client_tls_policy = Primitive.from_proto(response.client_tls_policy)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.DeleteNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteNetworkservicesBetaEndpointConfigSelector(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ListNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListNetworkservicesBetaEndpointConfigSelector(request).items
def to_proto(self):
resource = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelector()
)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
resource.server_tls_policy = Primitive.to_proto(self.server_tls_policy)
if Primitive.to_proto(self.client_tls_policy):
resource.client_tls_policy = Primitive.to_proto(self.client_tls_policy)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class EndpointConfigSelectorHttpFilters(object):
def __init__(self, http_filters: list = None):
self.http_filters = http_filters
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorHttpFilters()
)
if Primitive.to_proto(resource.http_filters):
res.http_filters.extend(Primitive.to_proto(resource.http_filters))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorHttpFilters(
http_filters=Primitive.from_proto(resource.http_filters),
)
class EndpointConfigSelectorHttpFiltersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorHttpFilters.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorHttpFilters.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcher(object):
def __init__(self, metadata_label_matcher: dict = None):
self.metadata_label_matcher = metadata_label_matcher
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
):
res.metadata_label_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
)
)
else:
res.ClearField("metadata_label_matcher")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcher(
metadata_label_matcher=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(
resource.metadata_label_matcher
),
)
class EndpointConfigSelectorEndpointMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorEndpointMatcher.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorEndpointMatcher.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(object):
def __init__(
self, metadata_label_match_criteria: str = None, metadata_labels: list = None
):
self.metadata_label_match_criteria = metadata_label_match_criteria
self.metadata_labels = metadata_labels
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
):
res.metadata_label_match_criteria = EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
):
res.metadata_labels.extend(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(
metadata_label_match_criteria=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.from_proto(
resource.metadata_label_match_criteria
),
metadata_labels=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.from_proto(
resource.metadata_labels
),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(i)
for i in resources
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(object):
def __init__(self, label_name: str = None, label_value: str = None):
self.label_name = label_name
self.label_value = label_value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels()
)
if Primitive.to_proto(resource.label_name):
res.label_name = Primitive.to_proto(resource.label_name)
if Primitive.to_proto(resource.label_value):
res.label_value = Primitive.to_proto(resource.label_value)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(
label_name=Primitive.from_proto(resource.label_name),
label_value=Primitive.from_proto(resource.label_value),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.from_proto(
i
)
for i in resources
]
class EndpointConfigSelectorTrafficPortSelector(object):
def __init__(self, ports: list = None):
self.ports = ports
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector()
)
if Primitive.to_proto(resource.ports):
res.ports.extend(Primitive.to_proto(resource.ports))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorTrafficPortSelector(
ports=Primitive.from_proto(resource.ports),
)
class EndpointConfigSelectorTrafficPortSelectorArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorTrafficPortSelector.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorTrafficPortSelector.from_proto(i) for i in resources
]
class EndpointConfigSelectorTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Name(
resource
)[
len("NetworkservicesBetaEndpointConfigSelectorTypeEnum") :
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(
object
):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Name(
resource
)[
len(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum"
) :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| apache-2.0 | 6,402,314,300,991,686,000 | 36.201739 | 157 | 0.663457 | false |
romana/networking-romana | networking_romana/_i18n.py | 1 | 1404 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking_romana"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
| apache-2.0 | 6,573,460,745,959,770,000 | 31.651163 | 78 | 0.730057 | false |
caronc/nzbget-subliminal | Subliminal/pkg_resources.py | 1 | 87985 | """Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_distribution_finders = {}
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
for line in open(os.path.join(path_item, entry)):
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
_namespace_handlers = {}
_namespace_packages = {}
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in (".egg",".egg-info"):
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()), self.precedence, self.key,
-len(self.location or ''), self.location, self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata('PKG-INFO'):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or PKG-INFO file", self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and normalize_path(fn).startswith(loc):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if '0.7' in version:
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
working_set = WorkingSet()
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
| gpl-3.0 | 6,505,110,880,996,614,000 | 31.916199 | 94 | 0.602159 | false |
ethz-asl/segmatch | segmappy/segmappy/models/model_groups_tf.py | 1 | 6314 | import tensorflow as tf
# define the cnn model
def init_model(input_shape, n_classes):
with tf.name_scope("InputScope") as scope:
cnn_input = tf.placeholder(
dtype=tf.float32, shape=(None,) + input_shape + (1,), name="input"
)
# base convolutional layers
y_true = tf.placeholder(dtype=tf.float32, shape=(None, n_classes), name="y_true")
scales = tf.placeholder(dtype=tf.float32, shape=(None, 3), name="scales")
training = tf.placeholder_with_default(
tf.constant(False, dtype=tf.bool), shape=(), name="training"
)
conv1 = tf.layers.conv3d(
inputs=cnn_input,
filters=32,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv1",
)
pool1 = tf.layers.max_pooling3d(
inputs=conv1, pool_size=(2, 2, 2), strides=(2, 2, 2), name="pool1"
)
conv2 = tf.layers.conv3d(
inputs=pool1,
filters=64,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv3",
)
pool2 = tf.layers.max_pooling3d(
inputs=conv2, pool_size=(2, 2, 2), strides=(2, 2, 2), name="pool2"
)
conv3 = tf.layers.conv3d(
inputs=pool2,
filters=64,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv5",
)
flatten = tf.contrib.layers.flatten(inputs=conv3)
flatten = tf.concat([flatten, scales], axis=1, name="flatten")
# classification network
dense1 = tf.layers.dense(
inputs=flatten,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
use_bias=True,
name="dense1",
)
bn_dense1 = tf.layers.batch_normalization(
dense1, training=training, name="bn_dense1"
)
dropout_dense1 = tf.layers.dropout(
bn_dense1, rate=0.5, training=training, name="dropout_dense1"
)
descriptor = tf.layers.dense(
inputs=dropout_dense1,
units=64,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
use_bias=True,
name="descriptor",
)
bn_descriptor = tf.layers.batch_normalization(
descriptor, training=training, name="bn_descriptor"
)
with tf.name_scope("OutputScope") as scope:
tf.add(bn_descriptor, 0, name="descriptor_bn_read")
tf.add(descriptor, 0, name="descriptor_read")
dropout_descriptor = tf.layers.dropout(
bn_descriptor, rate=0.35, training=training, name="dropout_descriptor"
)
y_pred = tf.layers.dense(
inputs=dropout_descriptor,
units=n_classes,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
use_bias=True,
name="classes",
)
loss_c = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pred, labels=y_true),
name="loss_c",
)
# reconstruction network
dec_dense1 = tf.layers.dense(
inputs=descriptor,
units=8192,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
use_bias=True,
name="dec_dense1",
)
reshape = tf.reshape(dec_dense1, (tf.shape(cnn_input)[0], 8, 8, 4, 32))
dec_conv1 = tf.layers.conv3d_transpose(
inputs=reshape,
filters=32,
kernel_size=(3, 3, 3),
strides=(2, 2, 2),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
name="dec_conv1",
)
dec_conv2 = tf.layers.conv3d_transpose(
inputs=dec_conv1,
filters=32,
kernel_size=(3, 3, 3),
strides=(2, 2, 2),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
name="dec_conv2",
)
dec_reshape = tf.layers.conv3d_transpose(
inputs=dec_conv2,
filters=1,
kernel_size=(3, 3, 3),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.sigmoid,
name="dec_reshape",
)
reconstruction = dec_reshape
with tf.name_scope("ReconstructionScopeAE") as scope:
tf.add(reconstruction, 0, name="ae_reconstruction_read")
FN_TO_FP_WEIGHT = 0.9
loss_r = -tf.reduce_mean(
FN_TO_FP_WEIGHT * cnn_input * tf.log(reconstruction + 1e-10)
+ (1 - FN_TO_FP_WEIGHT) * (1 - cnn_input) * tf.log(1 - reconstruction + 1e-10)
)
tf.identity(loss_r, "loss_r")
# training
LOSS_R_WEIGHT = 200
LOSS_C_WEIGHT = 1
loss = tf.add(LOSS_C_WEIGHT * loss_c, LOSS_R_WEIGHT * loss_r, name="loss")
global_step = tf.Variable(0, trainable=False, name="global_step")
update_step = tf.assign(
global_step, tf.add(global_step, tf.constant(1)), name="update_step"
)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
# add batch normalization updates to the training operation
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, name="train_op")
# statistics
y_prob = tf.nn.softmax(y_pred, name="y_prob")
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
roc_auc = tf.placeholder(dtype=tf.float32, shape=(), name="roc_auc")
with tf.name_scope("summary"):
tf.summary.scalar("loss", loss, collections=["summary_batch"])
tf.summary.scalar("loss_c", loss_c, collections=["summary_batch"])
tf.summary.scalar("loss_r", loss_r, collections=["summary_batch"])
tf.summary.scalar("accuracy", accuracy, collections=["summary_batch"])
tf.summary.scalar("roc_auc", roc_auc, collections=["summary_epoch"])
| bsd-3-clause | -7,130,928,450,581,762,000 | 29.8 | 86 | 0.606589 | false |
ivanlyon/exercises | kattis/k_rockscissorspaper.py | 1 | 3696 | '''
Deliver state of Rock-Paper-Scissors style of Conway game of life
Status: Accepted
'''
###############################################################################
class RockPaperScissorsGrid():
'''Conway life-style grid'''
def __init__(self, grid):
self._grid = grid
self._rows = len(grid)
self._cols = len(grid[0])
def neighbors(self, row, col):
"List valid neighbors for grid location"
results = []
if row > 0:
results.append(self._grid[row - 1][col])
if col > 0:
results.append(self._grid[row][col - 1])
if row < self._rows - 1:
results.append(self._grid[row + 1][col])
if col < self._cols - 1:
results.append(self._grid[row][col + 1])
return ''.join(results)
def compete(self, days):
"Perform all the changes resulting from competition over some days"
trumped_by = {'S': 'R', 'P': 'S', 'R': 'P'}
newline = [''] * self._cols
for _ in range(days):
newgrid = [''] * self._rows
for row, text in enumerate(self._grid):
for col, glyph in enumerate(text):
if trumped_by[glyph] in self.neighbors(row, col):
newline[col] = trumped_by[glyph]
else:
newline[col] = glyph
newgrid[row] = ''.join(newline)
self._grid = newgrid
def __repr__(self):
return '\n'.join(self._grid)
###############################################################################
def main():
"""Read input and print output"""
for test_case in range(int(input())):
if test_case:
print()
rows, _, days = [int(i) for i in input().split()]
grid = []
for _ in range(rows):
grid.append(input())
rpsg = RockPaperScissorsGrid(grid)
rpsg.compete(days)
print(rpsg)
###############################################################################
def demo():
'''RPS animation over random grid'''
import matplotlib.pyplot as plt
from matplotlib import animation
from random import choice
# Test case data
rows, columns = 20, 20
matrix = []
for _r in range(rows):
matrix.append(''.join([choice('RRRRRPS') for _ in range(columns)]))
days = 24
# Console display
rpsg = RockPaperScissorsGrid(matrix)
print("Random input:")
print(rpsg)
print("\nDay", days, "Output:")
rpsg.compete(days)
print(rpsg)
_fig, axes = plt.subplots()
plt.suptitle('Rock-Paper-Scissors Demonstration (Random Grid)')
axes.axis('off')
rpsg2 = None
bg_color = {'R': '#FFBBBB', 'P': '#BBFFBB', 'S': '#BBBBFF'}
def animate(i):
'''Show Rock Paper Scissors grid per day'''
nonlocal rpsg2
if i:
rpsg2.compete(1)
else:
rpsg2 = RockPaperScissorsGrid(matrix)
table = plt.table(cellText=rpsg2._grid, loc='center', cellLoc='center')
axes.title.set_text('Day {:d}'.format(i))
for the_cell in table.get_children():
the_cell.set_facecolor(bg_color[the_cell.get_text().get_text()])
_ = animation.FuncAnimation(_fig, animate, frames=days+1, interval=500, blit=False)
plt.show()
###############################################################################
if __name__ == '__main__':
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--demo", help="demonstration of rock-paper-scissors", action="store_true")
ARGS = PARSER.parse_args()
if ARGS.demo:
demo()
else:
main()
| mit | 4,729,091,299,634,914,000 | 29.295082 | 99 | 0.502706 | false |
CollabQ/CollabQ | invite/views.py | 1 | 2741 | # Copyright 2010 http://www.collabq.com
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
from common import api
from common import display
from common import util
from common import views as common_views
def invite_email(request, code):
"""User has received the invite email, and has followed the link to accept or
or refuse it."""
if request.user:
handled = common_views.handle_view_action(
request,
{'invite_accept': request.user.url('/overview'),
'invite_reject': request.user.url('/overview')
}
)
if handled:
return handled
# Retrieve the invite
invite = api.invite_get(api.ROOT, code)
from_actor = invite.from_actor
# Translate the from_actor into a display name
from_actor_ref = api.actor_get(api.ROOT, from_actor)
view = from_actor_ref
if not from_actor_ref:
# Corner case: from_actor was deleted since the invite was sent.
# In this case, do we want to consider the invitation invalid?
# (probably we do, because it's more likely that it was spam)
return util.RedirectError("That invite is no longer valid")
# We use api.ROOT in the next set of functions because the
# invite is giving possibly private access to the user
inbox = api.inbox_get_actor_contacts(api.ROOT,
view.nick,
limit=5)
entries = api.entry_get_entries(api.ROOT, inbox)
stream_keys = [e.stream for e in entries]
streams = api.stream_get_streams(api.ROOT, stream_keys)
actor_nicks = ([view.nick] +
[s.owner for s in streams.values() if s] +
[e.owner for e in entries] +
[e.actor for e in entries])
actors = api.actor_get_actors(api.ROOT, actor_nicks)
streams = display.prep_stream_dict(streams, actors)
entries = display.prep_entry_list(entries, streams, actors)
sidebar_green_top = True
c = template.RequestContext(request, locals())
t = loader.get_template('invite/templates/email.html')
return http.HttpResponse(t.render(c)) | apache-2.0 | -7,093,654,507,555,927,000 | 33.708861 | 79 | 0.684422 | false |
kayhayen/Nuitka | nuitka/nodes/ReturnNodes.py | 1 | 6750 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Return node
This one exits functions. The only other exit is the default exit of functions with 'None' value, if no return is done.
"""
from abc import abstractmethod
from .NodeBases import StatementBase, StatementChildHavingBase
class StatementReturn(StatementChildHavingBase):
kind = "STATEMENT_RETURN"
named_child = "expression"
nice_child = "return value"
def __init__(self, expression, source_ref):
assert expression
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
@staticmethod
def mayReturn():
return True
@staticmethod
def isStatementAborting():
return True
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseException(exception_type)
def computeStatement(self, trace_collection):
expression = trace_collection.onExpression(self.subnode_expression)
if expression.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
if expression.willRaiseException(BaseException):
from .NodeMakingHelpers import (
makeStatementExpressionOnlyReplacementNode,
)
result = makeStatementExpressionOnlyReplacementNode(
expression=expression, node=self
)
return (
result,
"new_raise",
"""\
Return statement raises in returned expression, removed return.""",
)
trace_collection.onFunctionReturn()
if expression.isExpressionConstantRef():
result = makeStatementReturnConstant(
constant=expression.getCompileTimeConstant(), source_ref=self.source_ref
)
del self.parent
return (
result,
"new_statements",
"""\
Return value is constant.""",
)
return self, None, None
class StatementReturnConstantBase(StatementBase):
__slots__ = ()
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
@staticmethod
def isStatementReturn():
return True
@staticmethod
def isStatementReturnConstant():
return True
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayReturn():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
def computeStatement(self, trace_collection):
trace_collection.onFunctionReturn()
return self, None, None
@abstractmethod
def getConstant(self):
"""The returned constant value."""
@staticmethod
def getStatementNiceName():
return "return statement"
class StatementReturnNone(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_NONE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return None
class StatementReturnFalse(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_FALSE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return False
class StatementReturnTrue(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_TRUE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return True
class StatementReturnConstant(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_CONSTANT"
__slots__ = ("constant",)
def __init__(self, constant, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
self.constant = constant
def finalize(self):
del self.parent
del self.constant
def getConstant(self):
return self.constant
def getDetails(self):
return {"constant": self.constant}
class StatementReturnReturnedValue(StatementBase):
kind = "STATEMENT_RETURN_RETURNED_VALUE"
__slots__ = ()
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
@staticmethod
def isStatementReturnReturnedValue():
return True
@staticmethod
def isStatementReturn():
return True
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayReturn():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
def computeStatement(self, trace_collection):
trace_collection.onFunctionReturn()
return self, None, None
@staticmethod
def getStatementNiceName():
return "rereturn statement"
def makeStatementReturnConstant(constant, source_ref):
if constant is None:
return StatementReturnNone(source_ref=source_ref)
elif constant is True:
return StatementReturnTrue(source_ref=source_ref)
elif constant is False:
return StatementReturnFalse(source_ref=source_ref)
else:
return StatementReturnConstant(constant=constant, source_ref=source_ref)
def makeStatementReturn(expression, source_ref):
"""Create the best return statement variant."""
if expression is None:
return StatementReturnNone(source_ref=source_ref)
elif expression.isCompileTimeConstant():
return makeStatementReturnConstant(
constant=expression.getCompileTimeConstant(), source_ref=source_ref
)
else:
return StatementReturn(expression=expression, source_ref=source_ref)
| apache-2.0 | 6,049,847,874,321,408,000 | 25.162791 | 119 | 0.661185 | false |
cblop/tropic | instal-linux/instal/firstprinciples/cliinvocation/TestInstalSolveCLIInvocation.py | 1 | 2757 | from instal.firstprinciples.TestEngine import InstalTestCase, InstalCompareJSONTestCase
from instal.instalexceptions import InstalTestNotImplemented
import subprocess
import os
from instal.instalsolve import instal_solve_keyword
from instal.instalutility import temporary_text_file
import tempfile
class SolveCLIInvocation(InstalTestCase):
def test_solve_cli_runs(self):
return_code = subprocess.call(["../../instalsolve.py", "-i", "cliinvocation/inst.ial", "-d",
"cliinvocation/domain.idc", "-q", "cliinvocation/query.iaq"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self.assertTrue(return_code == 0)
def test_solve_cli_json_out(self):
out_txt = temporary_text_file("", ".json")
return_code = subprocess.call(["../../instalsolve.py", "-i", "cliinvocation/inst.ial", "-d", "cliinvocation/domain.idc",
"-q", "cliinvocation/query.iaq", "-j", out_txt.name], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self.assertTrue(return_code == 0)
self.assertTrue(len(out_txt.read()) > 0)
def test_solve_cli_json_dir(self):
out_dir = tempfile.TemporaryDirectory()
return_code = subprocess.call(["../../instalsolve.py", "-i", "cliinvocation/inst.ial", "-d", "cliinvocation/domain.idc",
"-q", "cliinvocation/query.iaq", "-j", out_dir.name], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self.assertTrue(return_code == 0)
contents = os.listdir(out_dir.name)
self.assertTrue(len(contents) == 1)
def test_solve_invalid_ial(self):
return_code = subprocess.call(["../../instalsolve.py", "-i", "cliinvocation/invalid.ial", "-d",
"cliinvocation/domain.idc", "-q", "cliinvocation/query.iaq"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self.assertTrue(return_code != 0)
def test_solve_cli_compare_to_keyword(self):
out_1_json = temporary_text_file("", ".json")
out_2_json = temporary_text_file("", ".json")
return_code = subprocess.call(["../../instalsolve.py", "-i", "cliinvocation/inst.ial", "-d", "cliinvocation/domain.idc",
"-q", "cliinvocation/query.iaq", "-j", out_1_json.name], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self.assertTrue(return_code == 0)
instal_solve_keyword(input_files=["cliinvocation/inst.ial"], domain_files=[
"cliinvocation/domain.idc"], query="cliinvocation/query.iaq", json_file=out_2_json.name)
test_runner = InstalCompareJSONTestCase(out_1_json, out_2_json)
test_runner.run_test()
| epl-1.0 | -1,831,571,274,394,504,700 | 53.058824 | 154 | 0.630758 | false |
pdinges/python-schoof | support/profiling.py | 1 | 11632 | # -*- coding: utf-8 -*-
# Copyright (c) 2010--2012 Peter Dinges <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Class decorators for more expressive call profiles.
The Python profiler in the @c cProfile module registers methods as elements
of the class that contains their source code. Thus, @c cProfile merges calling
statistics of the defining class and subclasses that do not re-implement the
methods (such as template class specializations). The decorator
@c local_method_names() addresses this problem. Also, in case of nested template
classes (say, elliptic_curves.polynomials.naive.CurvePolynomials),
the template parameters bloat the type name; the decorator @c profiling_name()
alleviates the issue.
@package support.profiling
@author Peter Dinges <[email protected]>
"""
def profiling_name(name):
"""
A class decorator that sets the @p name that will show up in profiles
generated with the @c cProfile module.
Usage example:
@code
@local_method_names
@profiling_name( "GF<{_size}>" )
class FiniteField( Field, metaclass=template( "_size" ) ):
...
@endcode
@param name A string that contains the class name (which is usually
shorter than the original name). For template classes, the
string may contain format-string variables for the
parameters.
@note The function requires the @c local_method_name() decorator to show
an effect.
@see The sources of rings.quotients.naive.QuotientRing for a usage
example and support.types.template() for information about template
classes.
"""
def class_wrapper(cls):
setattr(cls, "__profiling_name__", name)
return cls
return class_wrapper
from .types import is_incomplete
def local_method_names(cls):
"""
A class decorator that makes the function names used by the @c cProfile
module local to the class of use (rather than the class of definition).
The @c cProfile module uses the name of a function's code object as the
profile name. Therefore, calls to methods in subclasses that do not
re-implement the method are counted as calls to the parent class. In
template classes, this makes the call profile too coarse.
Use profiling_name() to get shorter class names.
@note The decorator copies the code objects; it "flattens" the class.
Therefore, re-assigning methods will annihilate the decorator
effects for the method.
@see The sources of rings.quotients.naive.QuotientRing for a usage
example and support.types.template() for information about template
classes.
"""
if getattr( cls.__class__, "__operation_renaming__", False ):
return cls
original_new = cls.__class__.__new__
def prefixing_new(meta_class, class_name, bases, class_dict, **kw_arguments):
class_object = original_new( meta_class, class_name, bases, class_dict, **kw_arguments )
if is_incomplete( class_object ):
return class_object
if "__operation_renaming__" in class_object.__dict__:
return class_object
profiling_name = __profiling_str( class_object )
__localize_method_names(
class_object.__class__,
[ "__new__", "__call__" ],
"{cls}::<meta>{{meth}}".format( cls = profiling_name )
)
__flatten_methods( class_object )
__localize_method_names(
class_object,
__method_names( class_object ),
"{cls}::{{meth}}".format( cls = profiling_name )
)
setattr( class_object, "__operation_renaming__", True )
return class_object
cls.__class__.__new__ = prefixing_new
setattr( cls.__class__, "__operation_renaming__", True )
return cls
def rename_function( function, name, filename=None, firstlineno=-1 ):
"""
Rename a function and its associated code object.
This is handy when using the @c profile and @c cProfile modules:
both retrieve the function names from the code object
(the @c co_name attribute); @c __name__ is ignored.
"""
# Renaming a function in the profile thus requires generating a new
# code object. As CodeType.__doc__ notes, this is not for the
# faint of heart.
# Modify the unbound function object of methods
if hasattr( function, "__func__" ):
function = function.__func__
try:
code = function.__code__
except AttributeError:
message = "expected '{func}' to have an associated code object ('__code__' attribute)"
raise ValueError( message.format( function ) )
# Copy old values if unspecified
if filename is None:
filename = code.co_filename
if firstlineno == -1:
firstlineno = code.co_firstlineno
renamed_code = types.CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
str( filename ),
str( name ),
int( firstlineno ),
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
function.__name__ = str( name )
function.__code__ = renamed_code
def __method_names( class_object ):
"""
Return a dictionary with the methods defined in the class (not counting
inherited methods).
This function is not intended for direct use.
"""
return [ key for key, value in class_object.__dict__.items() \
if type( value ) in function_types ]
def __localize_method_names( class_object, method_names, format_string ):
"""
Make all inherited (and not re-implemented) methods local to the class
and rename them accordingly. That way, the @c cProfile module
distinguishes between calls the original and the inherited implementation.
This function is not intended for direct use.
"""
for method_name in method_names:
method = __get_dict_item( class_object, method_name )
method_copy = __copy_function( method )
new_name = format_string.format( meth = method_name )
rename_function( method_copy, new_name )
setattr( class_object, method_name, method_copy )
def __flatten_methods( class_object ):
"""
Copy all inherited (and not re-implemented) methods to the local
class dictionary.
This function is not intended for direct use.
"""
for attribute_name in dir( class_object ):
# Skip local attributes
if attribute_name in class_object.__dict__:
continue
# Skip non-method attributes (for example class variables)
method = __get_dict_item( class_object, attribute_name )
if type( method ) not in function_types:
continue
method_copy = __copy_function( __unwrap( method ) )
setattr(class_object, attribute_name, method_copy )
def __get_dict_item( class_object, key ):
"""
Return the class dictionary entry with key @p key; traverse the parent
classes until a matching entry was found. Otherwise raise an
@c AttributeError.
This function is not intended for direct use.
"""
for cls in class_object.__mro__:
if key in cls.__dict__:
return cls.__dict__[ key ]
message = "object '{name}' has no attribute '{key}'"
raise AttributeError(
message.format( name = class_object.__name__, key = key )
)
def __copy_function( function ):
"""
Create a completely independent copy of @p function.
The function also copies the code object of @p function.
This function is not intended for direct use.
"""
if type( function ) in [ staticmethod, classmethod ]:
return type( function )( __copy_function( function.__func__ ) )
if type( function ) not in [ types.FunctionType, types.MethodType ]:
message = "expected function or method type (got {0})"
raise ValueError( message.format( function ) )
if type( function ) is types.MethodType:
function = function.__func__
code = function.__code__
code_copy = types.CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
function_copy = types.FunctionType(
code_copy,
function.__globals__,
function.__name__,
function.__defaults__,
function.__closure__
)
# Re-bind methods to their instance
if type( function ) is types.MethodType:
return types.MethodType( function_copy, function.__self__)
return function_copy
def __unwrap( method ):
"""
Return the original function inside a method wrapper.
This function is not intended for direct use.
@see support.operators.operand_casting()
"""
if hasattr( method, "__wrapped_method__" ):
return __unwrap( getattr( method, "__wrapped_method__" ) )
return method
def __profiling_str(obj):
"""
Return the formatted name of @p obj as set by the @c profiling_name()
decorator. Fall back to __str__() if necessary.
This function is not intended for direct use.
"""
# FIXME: Endless recursion for cyclic dependencies.
if isinstance(obj, type) and hasattr(obj, "__profiling_name__"):
if hasattr( obj.__class__, "__parameter_map__" ):
args = [ (k, __profiling_str(v)) for k,v in obj.__class__.__parameter_map__.items() ]
else:
args = []
try:
return obj.__profiling_name__.format( **dict( args ) )
except KeyError:
pass
return str(obj)
import types
function_types = [
types.FunctionType,
types.MethodType,
staticmethod,
classmethod,
]
| gpl-3.0 | -6,382,795,003,153,329,000 | 33.11437 | 97 | 0.588635 | false |
OxES/OxKeplerSC | src/cbvc/VBLinRegARD.py | 1 | 3845 | import numpy, sys
import scipy.linalg, scipy.special
'''
VBLinRegARD: Linear basis regression with automatic relevance priors
using Variational Bayes.
For more details on the algorithm see Apprendix of
Roberts, McQuillan, Reece & Aigrain, 2013, MNRAS, 354, 3639.
History:
2011: Translated by Thomas Evans from original Matlab code by Stephen J Roberts
2013: Documentation added by Suzanne Aigrain
'''
def logdet(a):
'''
Compute log of determinant of matrix a using Cholesky decomposition
'''
# First make sure that matrix is symmetric:
if numpy.allclose(a.T,a) == False:
print ('MATRIX NOT SYMMETRIC')
# Second make sure that matrix is positive definite:
eigenvalues = scipy.linalg.eigvalsh(a)
if min(eigenvalues) <=0:
print ('Matrix is NOT positive-definite')
print (' min eigv = %.16f' % min(eigenvalues))
step1 = scipy.linalg.cholesky(a)
step2 = numpy.diag(step1.T)
out = 2. * numpy.sum(numpy.log(step2), axis=0)
return out
def bayes_linear_fit_ard(X, y):
'''
Fit linear basis model with design matrix X to data y.
Calling sequence:
w, V, invV, logdetV, an, bn, E_a, L = bayes_linear_fit_ard(X, y)
Inputs:
X: design matrix
y: target data
Outputs
w: basis function weights
***need to document the others!***
'''
# uninformative priors
a0 = 1e-2
b0 = 1e-4
c0 = 1e-2
d0 = 1e-4
# pre-process data
[N, D] = X.shape
X_corr = X.T * X
Xy_corr = X.T * y
an = a0 + N / 2.
gammaln_an = scipy.special.gammaln(an)
cn = c0 + 1 / 2.
D_gammaln_cn = D * scipy.special.gammaln(cn)
# iterate to find hyperparameters
L_last = -sys.float_info.max
max_iter = 500
E_a = numpy.matrix(numpy.ones(D) * c0 / d0).T
for iter in range(max_iter):
# covariance and weight of linear model
invV = numpy.matrix(numpy.diag(numpy.array(E_a)[:,0])) + X_corr
V = numpy.matrix(scipy.linalg.inv(invV))
logdetV = -logdet(invV)
w = numpy.dot(V, Xy_corr)[:,0]
# parameters of noise model (an remains constant)
sse = numpy.sum(numpy.power(X*w-y, 2), axis=0)
if numpy.imag(sse)==0:
sse = numpy.real(sse)[0]
else:
print ('Something went wrong')
bn = b0 + 0.5 * (sse + numpy.sum((numpy.array(w)[:,0]**2) * numpy.array(E_a)[:,0], axis=0))
E_t = an / bn
# hyperparameters of covariance prior (cn remains constant)
dn = d0 + 0.5 * (E_t * (numpy.array(w)[:,0]**2) + numpy.diag(V))
E_a = numpy.matrix(cn / dn).T
# variational bound, ignoring constant terms for now
L = -0.5 * (E_t*sse + numpy.sum(scipy.multiply(X,X*V))) + \
0.5 * logdetV - b0 * E_t + gammaln_an - an * scipy.log(bn) + an + \
D_gammaln_cn - cn * numpy.sum(scipy.log(dn))
# variational bound must grow!
if L_last > L:
# if this happens, then something has gone wrong....
file = open('ERROR_LOG','w')
file.write('Last bound %6.6f, current bound %6.6f' % (L, L_last))
file.close()
raise Exception('Variational bound should not reduce - see ERROR_LOG')
return
# stop if change in variation bound is < 0.001%
if abs(L_last - L) < abs(0.00001 * L):
break
# print L, L_last
L_last = L
if iter == max_iter:
warnings.warn('Bayes:maxIter ... Bayesian linear regression reached maximum number of iterations.')
# augment variational bound with constant terms
L = L - 0.5 * (N * numpy.log(2 * numpy.pi) - D) - scipy.special.gammaln(a0) + \
a0 * numpy.log(b0) + D * (-scipy.special.gammaln(c0) + c0 * numpy.log(d0))
return w, V, invV, logdetV, an, bn, E_a, L
| gpl-3.0 | 4,759,305,985,593,180,000 | 35.971154 | 108 | 0.584135 | false |
EternalPo/PythonTest | main/pythonmain.py | 1 | 1646 | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
# 导入hello.py 文件 和
import hello
import pythontest
import pythondef
#hello.print_func("World")
# Python 读写文件
# 1.打开文件 使用 open 打开文件后, 一定要记得调用文件对象的close()方法,比如可用try/finally 语句来确保最后能关闭文件.
file_object = open("testfile")
print(file_object.name)
try:
all_the_test = file_object.read()
hello.print_func(all_the_test)
finally:
file_object.close()
#2. 读文件 读文本文件
input = open("data","r")
# 第二个属性默认为r
input = open("data")
# 读二进制文件
input = open("data","rb")
# 读取所有内容
file_object = open("thefile.txt")
try:
# print (file_object.readline())
# print(file_object.encoding)
all_the_test = file_object.read()
# hello.print_func(all_the_test)
finally:
file_object.close()
# 读取固定字节
file_object = open("abinfile","rb")
try:
while True:
chunk = file_object.read(100)
# hello.print_func(chunk)
if not chunk:
break
# print("not chunk:", chunk)
finally:
file_object.close()
# 写文件 写文本文件
output = open("data","w")
# 写二进制文件
output = open("data","wb")
# 追加写文件
output = open("data","w+")
#写数据
all_the_text = "1.打开文件 使用 open 打开文件后, 一定要记得调用文件对象的close()方法,比如可用try/finally 语句来确保最后能关闭文件."
file_object = open("thefile.tex","w+")
#print (file_object.readline())
file_object.write(all_the_text)
file_object.writelines(all_the_text)
print (file_object.readline())
file_object.close
| apache-2.0 | 6,295,898,723,262,873,000 | 14.54023 | 90 | 0.677515 | false |
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/misc/xmlReader.py | 5 | 4764 | from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import sys
import os
import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
if fileOrPath == '-':
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
if self.stackSize == 1 and self.contentOnly:
# We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if not self.contentOnly:
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
| apache-2.0 | 1,176,087,762,665,243,600 | 27.023529 | 86 | 0.690386 | false |
pytroll/pytroll-db | trolldb/sqltypes.py | 1 | 1645 | import sqlalchemy
from shapely import geometry, wkb
class Geography(sqlalchemy.types.TypeEngine):
"""PostGIS Geometry Type."""
def __init__(self, type_, dimension):
super(Geography, self).__init__()
self.SRID = 4326
self.type = type_.upper()
self.dimension = dimension
def bind_processor(self, dialect):
"""Convert from Python type to database type."""
def process(value):
"""``value`` is a Python/Shapely geometry object."""
if value is None:
return None
else:
return 'SRID=%s;%s' % (self.SRID, value)
return process
def result_processor(self, dialect, *args):
"""Convert from database type to Python type."""
def process(value):
"""``value`` is a hex-encoded WKB string."""
if value is None:
return None
else:
return wkb.loads(value.decode('hex'))
return process
def get_col_spec(self):
return 'GEOGRAPHY'
class POINT(Geography):
def __init__(self):
super(POINT, self).__init__('POINT', 2)
class LINESTRING(Geography):
def __init__(self):
super(LINESTRING, self).__init__('LINESTRING', 2)
class MULTILINESTRING(Geography):
def __init__(self):
super(MULTILINESTRING, self).__init__('MULTILINESTRING', 2)
class MULTIPOLYGON(Geography):
def __init__(self):
super(MULTIPOLYGON, self).__init__('MULTIPOLYGON', 2)
class POLYGON(Geography):
def __init__(self):
super(POLYGON, self).__init__('POLYGON', 2)
| gpl-3.0 | 2,036,326,957,917,313,500 | 25.967213 | 67 | 0.564134 | false |
mainakibui/dkobo | dkobo/koboform/views/survey_draft_views.py | 1 | 11283 | import json
import requests
import pyxform.survey_from
from guardian.shortcuts import assign_perm
from django.http import HttpResponseBadRequest, HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.utils.encoding import smart_unicode
from django.contrib.auth.decorators import login_required
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.authtoken.models import Token
from dkobo.koboform.models import SurveyDraft
from dkobo.koboform.serializers import ListSurveyDraftSerializer, DetailSurveyDraftSerializer
from dkobo.koboform.kobo_to_xlsform import convert_any_kobo_features_to_xlsform_survey_structure
from dkobo.koboform import pyxform_utils, kobocat_integration, xlform
def export_form(request, id):
survey_draft = SurveyDraft.objects.get(pk=id)
file_format = request.GET.get('format', 'xml')
if file_format == "xml":
contents = survey_draft.to_xml()
mimetype = 'application/force-download'
# content_length = len(contents) + 2 # the length of the string != the length of the file
elif file_format == "xls":
contents = survey_draft.to_xls()
mimetype = 'application/vnd.ms-excel; charset=utf-8'
# contents.read()
# content_length = contents.tell()
# contents.seek(0)
elif file_format == "csv":
contents = survey_draft.body
mimetype = 'text/csv; charset=utf-8'
# content_length = len(contents)
else:
return HttpResponseBadRequest(
"Format not supported: '%s'. Supported formats are [xml,xls,csv]." % file_format)
response = HttpResponse(contents, mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename=%s.%s' % (survey_draft.id_string,
file_format)
# response['Content-Length'] = content_length
return response
# def export_all_questions(request):
# queryset = SurveyDraft.objects.filter(user=request.user)
# queryset = queryset.exclude(asset_type=None)
# from dkobo.koboform import pyxform_utils
# response = HttpResponse(pyxform_utils.convert_csv_to_xls(concentrated_csv), mimetype='application/vnd.ms-excel; charset=utf-8')
# response['Content-Disposition'] = 'attachment; filename=all_questions.xls'
# return response
@login_required
def create_survey_draft(request):
raw_draft = json.loads(request.body)
name = raw_draft.get('title', raw_draft.get('name'))
csv_details = {u'user': request.user,
u'body': raw_draft.get("body"),
u'description': raw_draft.get("description"),
u'name': name}
survey_draft = SurveyDraft.objects.create(**csv_details)
return HttpResponse(json.dumps(model_to_dict(survey_draft)))
@login_required
@api_view(['GET', 'PUT', 'DELETE', 'PATCH'])
def survey_draft_detail(request, pk, format=None):
kwargs = {'pk': pk}
if not request.user.is_superuser:
kwargs['user'] = request.user
try:
survey_draft = SurveyDraft.objects.get(**kwargs)
except SurveyDraft.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DetailSurveyDraftSerializer(survey_draft)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = DetailSurveyDraftSerializer(survey_draft, data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PATCH':
for key, value in request.DATA.items():
if key == 'tags':
survey_draft.tags.clear()
for val in value: survey_draft.tags.add(val)
else:
survey_draft.__setattr__(key, value)
survey_draft.save()
return Response(DetailSurveyDraftSerializer(survey_draft).data)
elif request.method == 'DELETE':
survey_draft.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
XLS_CONTENT_TYPES = [
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/octet-stream",
]
@login_required
def bulk_delete_questions(request):
question_ids = json.loads(request.body)
SurveyDraft.objects.filter(user=request.user).filter(id__in=question_ids).delete()
return HttpResponse('')
@login_required
def import_survey_draft(request):
"""
Imports an XLS or CSV file into the user's SurveyDraft list.
Returns an error in JSON if the survey was not valid.
"""
output = {}
posted_file = request.FILES.get(u'files')
response_code = 200
if not posted_file:
response_code = 204 # Error 204: No input
output[u'error'] = "No file posted"
elif posted_file.name.endswith('.xml'):
warnings = []
try:
survey_object = pyxform.survey_from.xform(filelike_obj=posted_file, warnings=warnings)
_csv = survey_object.to_csv(warnings=warnings, koboform=True).read()
new_survey_draft = SurveyDraft.objects.create(**{
u'body': smart_unicode(_csv),
u'name': posted_file.name,
u'user': request.user
})
output[u'survey_draft_id'] = new_survey_draft.id
except Exception, err:
response_code = 500
output[u'error'] = err.message or str(err)
output[u'warnings'] = warnings
else:
try:
# create and validate the xform but ignore the results
warnings = []
pyxform_utils.validate_kobo_xlsform(posted_file, warnings=warnings)
output[u'xlsform_valid'] = True
posted_file.seek(0)
if posted_file.content_type in XLS_CONTENT_TYPES:
_csv = pyxform_utils.convert_xls_to_csv_string(posted_file)
elif posted_file.content_type == "text/csv":
_csv = posted_file.read()
else:
raise Exception("Content-type not recognized: '%s'" % posted_file.content_type)
new_survey_draft = SurveyDraft.objects.create(**{
u'body': smart_unicode(_csv),
u'name': posted_file.name,
u'user': request.user
})
output[u'survey_draft_id'] = new_survey_draft.id
except Exception, err:
response_code = 500
output[u'error'] = err.message or str(err)
return HttpResponse(json.dumps(output), content_type="application/json", status=response_code)
@login_required
def import_questions(request):
"""
Imports an XLS or CSV file into the user's SurveyDraft list.
Returns an error in JSON if the survey was not valid.
"""
output = {}
posted_file = request.FILES.get(u'files')
response_code = 200
if posted_file:
posted_file.seek(0)
if posted_file.content_type in XLS_CONTENT_TYPES:
imported_sheets_as_csv = pyxform_utils.convert_xls_to_csv_string(posted_file)
elif posted_file.content_type == "text/csv":
imported_sheets_as_csv = posted_file.read()
else:
raise Exception("Content-type not recognized: '%s'" % posted_file.content_type)
split_surveys = xlform.split_apart_survey(imported_sheets_as_csv)
new_survey_drafts = []
for _split_survey in split_surveys:
sd = SurveyDraft(name='New Form',
body=_split_survey[0],
user=request.user,
asset_type='question')
sd._summarize()
new_survey_drafts.append(sd)
SurveyDraft.objects.bulk_create(new_survey_drafts)
output[u'survey_draft_id'] = -1
else:
response_code = 204 # Error 204: No input
output[u'error'] = "No file posted"
return HttpResponse(json.dumps(output), content_type="application/json", status=response_code)
@login_required
@api_view(['GET', 'POST'])
def publish_survey_draft(request, pk, format=None):
if not kobocat_integration._is_enabled():
return Response({'error': 'KoBoCat Server not specified'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)
try:
survey_draft = SurveyDraft.objects.get(pk=pk, user=request.user)
except SurveyDraft.DoesNotExist:
return Response({'error': 'SurveyDraft not found'}, status=status.HTTP_404_NOT_FOUND)
# convert csv to ss_struct
ss_struct = pyxform_utils.convert_csv_to_ss_structure(survey_draft.body)
form_id_string = request.DATA.get('id_string', False)
# set the form_id based on the payload
if 'settings' not in ss_struct:
ss_struct['settings'] = []
if len(ss_struct['settings']) == 0:
ss_struct['settings'].append({})
ss_struct['settings'][0]['form_id'] = form_id_string
# convert kobo-specific data structures into valid xlsform (e.g. score, rank)
xlsform_ss_struct = convert_any_kobo_features_to_xlsform_survey_structure(ss_struct)
valid_xlsform_csv_repr = pyxform_utils.convert_ss_structure_to_csv(xlsform_ss_struct)
_set_necessary_permissions(request.user)
(token, is_new) = Token.objects.get_or_create(user=request.user)
headers = {u'Authorization':'Token ' + token.key}
payload = {u'text_xls_form': valid_xlsform_csv_repr}
try:
url = kobocat_integration._kobocat_url('/api/v1/forms', internal=True)
response = requests.post(url, headers=headers, data=payload)
status_code = response.status_code
resp = response.json()
except Exception, e:
resp = {'status_code': 504, 'detail': str(e)}
status_code = 504
if 'formid' in resp:
survey_draft.kobocat_published_form_id = resp[u'formid']
survey_draft.save()
serializer = DetailSurveyDraftSerializer(survey_draft)
resp.update({
u'message': 'Successfully published form',
u'published_form_url': kobocat_integration._kobocat_url('/%s/forms/%s' % (request.user.username, resp.get('id_string')))
})
return Response(resp, status=status_code)
def _set_necessary_permissions(user):
"""
defeats the point of permissions, yes. But might get things working for now until we understand
the way kobocat uses permissions.
"""
necessary_perms = {'logger': ['add_datadictionary', 'add_xform', 'change_datadictionary', \
'change_xform', 'delete_datadictionary', 'delete_xform', \
'report_xform', 'view_xform',]}
for app, perms in necessary_perms.items():
for perm in perms:
assign_perm('%s.%s' % (app, perm), user)
def published_survey_draft_url(request, pk):
try:
survey_draft = SurveyDraft.objects.get(pk=pk, user=request.user)
except SurveyDraft.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = survey_draft.user.name
return HttpResponseRedirect(kobocat_integration._kobocat_url("/%s" % username))
| agpl-3.0 | -7,562,220,105,088,083,000 | 38.869258 | 133 | 0.639901 | false |
CollectQT/qapc | test/test_utils.py | 1 | 5877 | # builtin
import os
import sys
############################################################
# utils / setup
############################################################
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.append(base_dir)
from lib import utils, file_load
############################################################
# tests
############################################################
def test_true(): assert True
def test_video_add_worker_and_roles():
shoot_roles = file_load.load_shoot_roles()
video = list(file_load.get_table().items())[0][1]
assert video.get('Workers') is None
video = utils.video_add_worker_and_roles(video, shoot_roles)
assert isinstance(video['Workers'], dict)
def test_video_add_role_unscaled_percents():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
assert video.get('role percents unscaled') is None
video = utils.video_add_role_unscaled_percents(video, role_percents)
assert 0 <= video['role percents unscaled']['QAPC'] <= 100
def test_video_create_scaling_factor():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
assert video.get('scaling factor') is None
video = utils.video_create_scaling_factor(video)
assert 0 <= video.get('scaling factor') <= 1
def test_video_scale_role_percents():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
assert video.get('role percents') is None
video = utils.video_scale_role_percents(video)
assert 0 <= video['role percents']['QAPC'] <= 100
def test_scaling_factor_applies_properly():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
expected_scaled_percent = video['role percents unscaled']['QAPC'] * video['scaling factor']
scaled_percent = video['role percents']['QAPC']
assert expected_scaled_percent == scaled_percent
def test_video_get_total_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
assert video.get('total earnings') is None
video = utils.video_get_total_earnings(video)
assert video.get('total earnings') is not None
def test_video_get_worker_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
assert video.get('earnings') is None
video = utils.video_get_worker_earnings(video)
assert isinstance(video.get('earnings'), dict)
def test_validate_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
video = utils.video_get_worker_earnings(video)
total_earnings = video['total earnings']
sum_all_earnings = 0
for earning in video['earnings'].values():
sum_all_earnings += earning
assert round(total_earnings, 2) == round(sum_all_earnings, 2)
def test_all_videos():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
for video in file_load.get_table().values():
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
video = utils.video_get_worker_earnings(video)
total_earnings = video['total earnings']
sum_all_earnings = 0
for earning in video['earnings'].values():
sum_all_earnings += earning
assert round(total_earnings, 2) == round(sum_all_earnings, 2)
def test_video_add_image():
video = list(file_load.get_table().items())[0][1]
images = file_load.get_images()
assert video.get('image') is None
video = utils.video_add_images(video, images)
assert video.get('image') is not None
| agpl-3.0 | -8,849,307,184,144,119,000 | 34.403614 | 95 | 0.65884 | false |
diath/pyfsw | pyfsw/models/shop.py | 1 | 2866 | from sqlalchemy import Column, Integer, String, Text, ForeignKey
from pyfsw import db
class ShopCategory(db.Model):
__tablename__ = 'shop_category'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
enabled = Column(Integer, default=1)
# Relationship
items = db.relationship('ShopItem', backref='shop_category')
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopCategory.{}>'.format(self.id)
class ShopItem(db.Model):
__tablename__ = 'shop_item'
# Constants
Type = {
'Item': 1,
'Container': 2,
'Addon': 3,
'Mount': 4
}
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
description = Column(Text)
category_id = Column(Integer, ForeignKey('shop_category.id'))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
custom_image = Column(String(128), default='')
enabled = Column(Integer, default=1)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopItem.{}>'.format(self.id)
class ShopOrder(db.Model):
__tablename__ = 'shop_order'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
ordered = Column(Integer)
character_id = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopOrder.{}>'.format(self.id)
class ShopHistory(db.Model):
__tablename__ = 'shop_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
ordered = Column(Integer)
delivered = Column(Integer)
character_id = Column(Integer)
account_id = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopHistory.{}>'.format(self.id)
class PayPalHistory(db.Model):
__tablename__ = 'paypal_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
account_id = Column(Integer)
timestamp = Column(Integer)
status = Column(String(32))
test = Column(Integer)
origin = Column(String(64))
amount = Column(String(16))
points = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<PayPalHistory.{}>'.format(self.id)
class ZayPayHistory(db.Model):
__tablename__ = 'zaypay_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
account_id = Column(Integer)
timestamp = Column(Integer)
payment_id = Column(Integer)
price_setting_id = Column(Integer)
amount = Column(Integer)
points = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ZayPayHistory.{}>'.format(self.id)
| mit | -5,246,374,000,673,775,000 | 20.877863 | 64 | 0.684229 | false |
justinvforvendetta/test1234 | src/blockchain_processor.py | 1 | 27894 | import ast
import hashlib
from json import dumps, loads
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from utils import *
from storage import Storage
from utils import logger
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
self.mtimes = {} # monitoring
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {}
self.mempool_hashes = set([])
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.dblock = threading.Lock()
self.reecoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.reecoind('getblock', [self.storage.last_hash]))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("reecoind is responding")
self.shared.unpause()
time.sleep(10)
def mtime(self, name):
now = time.time()
if name != '':
delta = now - self.now
t = self.mtimes.get(name, 0)
self.mtimes[name] = t + delta
self.now = now
def print_mtime(self):
s = ''
for k, v in self.mtimes.items():
s += k+':'+"%.2f"%v+' '
print_log(s)
def reecoind(self, method, params=[]):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
respdata = urllib.urlopen(self.reecoind_url, postdata).read()
break
except:
print_log("cannot reach reecoind...")
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise
continue
r = loads(respdata)
if r['error'] is not None:
raise BaseException(r['error'])
return r.get('result')
def block2header(self, b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.reecoind('getblockhash', [height])
b = self.reecoind('getblock', [block_hash])
return self.block2header(b)
def init_headers(self, db_height):
self.chunk_cache = {}
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
def hash_header(self, header):
return rev_hex(HashX11(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if self.chunk_cache.get(chunk_index):
self.chunk_cache.pop(chunk_index)
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.reecoind('getrawtransaction', [txid, 0])
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
with self.dblock:
hist = self.storage.get_history(addr)
# add memory pool
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, []):
hist.append({'tx_hash':txid, 'height':0})
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, []):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''
for tx in tx_points:
status += tx.get('tx_hash') + ':%d:' % tx.get('height')
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height):
block_hash = self.reecoind('getblockhash', [height])
b = self.reecoind('getblock', [block_hash])
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
return {"block_height": height, "merkle": s, "pos": tx_pos}
def add_to_history(self, addr, tx_hash, tx_pos, tx_height):
# keep it sorted
s = self.serialize_item(tx_hash, tx_pos, tx_height) + 40*chr(0)
assert len(s) == 80
serialized_hist = self.batch_list[addr]
l = len(serialized_hist)/80
for i in range(l-1, -1, -1):
item = serialized_hist[80*i:80*(i+1)]
item_height = int(rev_hex(item[36:39].encode('hex')), 16)
if item_height <= tx_height:
serialized_hist = serialized_hist[0:80*(i+1)] + s + serialized_hist[80*(i+1):]
break
else:
serialized_hist = s + serialized_hist
self.batch_list[addr] = serialized_hist
# backlink
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
self.batch_txio[txo] = addr
def deserialize_block(self, block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, sync, revert=False):
touched_addr = set([])
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.reecoind_height, undo_info)
# add the max
self.storage.db_undo.put('height', repr( (block_hash, block_height, self.storage.db_version) ))
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
self.watched_addresses.pop(addr)
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', [])
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address, cache_only)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex(pos, 4)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.reecoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
result = str(e) # do not send an error
print_log("error:", result, params)
elif method == 'blockchain.transaction.get_merkle':
if cache_only:
result = -1
else:
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.reecoind('getrawtransaction', [tx_hash, 0])
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.reecoind('estimatefee', [num])
else:
raise BaseException("unknown method:%s" % method)
if cache_only and result == -1:
return -1
return result
def getfullblock(self, block_hash):
block = self.reecoind('getblock', [block_hash])
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": [txid],
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
try:
respdata = urllib.urlopen(self.reecoind_url, postdata).read()
except:
logger.error("reecoind error (getfullblock)",exc_info=True)
self.shared.stop()
r = loads(respdata)
rawtxdata = []
for ir in r:
if ir['error'] is not None:
self.shared.stop()
print_log("Error: make sure you run reecoind with txindex=1; use -reindex if needed.")
raise BaseException(ir['error'])
rawtxdata.append(ir['result'])
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
prev_root_hash = None
while not self.shared.stopped():
self.mtime('')
# are we done yet?
info = self.reecoind('getinfo')
self.reecoind_height = info.get('blocks')
reecoind_block_hash = self.reecoind('getblockhash', [self.reecoind_height])
if self.storage.last_hash == reecoind_block_hash:
self.up_to_date = True
break
# fixme: this is unsafe, if we revert when the undo info is not yet written
revert = (random.randint(1, 100) == 1) if self.test_reorgs else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.reecoind('getblockhash', [self.storage.height + 1])
next_block = self.getfullblock(next_block_hash)
except BaseException, e:
revert = True
next_block = self.getfullblock(self.storage.last_hash)
self.mtime('daemon')
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
self.import_block(next_block, next_block_hash, self.storage.height+1, sync)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
self.mtime('import')
if self.storage.height % 1000 == 0 and not sync:
t_daemon = self.mtimes.get('daemon')
t_import = self.mtimes.get('import')
print_log("catch_up: block %d (%.3fs %.3fs)" % (self.storage.height, t_daemon, t_import), self.storage.get_root_hash().encode('hex'))
self.mtimes['daemon'] = 0
self.mtimes['import'] = 0
else:
# revert current block
block = self.getfullblock(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
self.import_block(block, self.storage.last_hash, self.storage.height, sync, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
self.header = self.block2header(self.reecoind('getblock', [self.storage.last_hash]))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
mempool_hashes = set(self.reecoind('getrawmempool'))
touched_addresses = set([])
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
self.mempool_hashes.add(tx_hash)
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.items():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
for x in tx.get('outputs'):
out_values.append( x['value'] )
addr = x.get('address')
if not addr:
continue
v = mpa.get(addr,0)
v += x['value']
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
# check all inputs
for tx_hash, tx in new_tx.items():
mpa = self.mempool_addresses.get(tx_hash, {})
for x in tx.get('inputs'):
# we assume that the input address can be parsed by deserialize(); this is true for Electrum transactions
addr = x.get('address')
if not addr:
continue
v = self.mempool_values.get(x.get('prevout_hash'))
if v:
value = v[ x.get('prevout_n')]
else:
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
try:
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
v = mpa.get(addr,0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
self.mempool_addresses.pop(tx_hash)
self.mempool_values.pop(tx_hash)
for addr in addresses:
touched_addresses.add(addr)
# rebuild mempool histories
new_mempool_hist = {}
for tx_hash, addresses in self.mempool_addresses.items():
for addr, delta in addresses.items():
h = new_mempool_hist.get(addr, [])
if tx_hash not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
print_log("cache: invalidating", address)
self.history_cache.pop(address)
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
with self.dblock:
t1 = time.time()
self.catch_up()
t2 = time.time()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': [self.storage.height],
})
if self.sent_header != self.header:
print_log("blockchain: %d (%.3fs)" % (self.storage.height, t2 - t1))
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': [self.header],
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': [addr, status],
})
| agpl-3.0 | -8,479,303,598,396,417,000 | 32.688406 | 153 | 0.525203 | false |
ericmjl/bokeh | bokeh/core/property/string.py | 1 | 3831 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the Regex property.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import base64
import re
# Bokeh imports
from .primitive import String
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Regex',
'Base64String',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Regex(String):
''' Accept strings that match a given regular expression.
Args:
default (string or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
Example:
.. code-block:: python
>>> class RegexModel(HasProps):
... prop = Regex("foo[0-9]+bar")
...
>>> m = RegexModel()
>>> m.prop = "foo123bar"
>>> m.prop = "foo" # ValueError !!
>>> m.prop = [1, 2, 3] # ValueError !!
'''
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super().__init__(default=default, help=help)
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
def validate(self, value, detail=True):
super().validate(value, detail)
if not (value is None or self.regex.match(value) is not None):
msg = "" if not detail else "expected a string matching %r pattern, got %r" % (self.regex.pattern, value)
raise ValueError(msg)
class Base64String(String):
def serialize_value(self, value):
''' Encode a ascii string using Base64.
Args:
value : a string to encode
Returns:
string
'''
if isinstance(value, str):
value = base64.b64encode(value.encode("utf-8")).decode("utf-8")
return value
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 7,637,204,481,124,638,000 | 30.661157 | 117 | 0.394936 | false |
jacobzelek/ffprobe | ffprobe/ffprobe.py | 1 | 12463 | #!/usr/bin/python
# Filename: ffprobe.py
"""
Python wrapper for ffprobe command line tool. ffprobe must exist in the path or in a common installation path
"""
version = '0.4'
import subprocess
import re
import os
import sys
from os import listdir
from os.path import isfile, join
import json
import mimetypes
class FFProbe(object):
"""
FFProbe wraps the ffprobe command and pulls the data into an object form::
metadata = FFProbe('multimedia-file.mov')
OR
metadata = FFProbe(file_contents)
OR
metadata = FFProbe('multimedia-file.mov', ffprobe_path='/usr/local/bin/ffprobe')
"""
def __init__(self, source, ffprobe_path=None):
ffprobe_cmd = None
if ffprobe_path is not None and os.path.exists(ffprobe_path):
ffprobe_cmd = ffprobe_path
else:
ffprobe_cmd = os.environ.get('FFPROBE', 'ffprobe')
try:
with open(os.devnull, 'w') as tempf:
subprocess.check_call([ffprobe_cmd, "-h"], stdout=tempf,
stderr=tempf)
except:
paths = {
"Windows": ["ffprobe.exe"],
"Darwin": ["ffprobe", "/opt/local/bin/ffprobe", "/usr/local/bin/ffprobe"],
"Linux": ["ffprobe", "/opt/local/bin/ffprobe", "/usr/local/bin/ffprobe"]
}
# Find path of transcoder
found = False
for path in paths[platform.system()]:
if os.path.exists(path):
ffprobe_cmd = path
found = True
if not found:
raise IOError('ffprobe not found')
self.streams = []
self.video = []
self.audio = []
self.duration = 0.0
self.mimetype = None
self.returncode = None
# If source is file and it exists the use path, otherwise
# open file and send contents to ffprobe through stdin
DEVNULL = open(os.devnull, 'wb')
args = [ffprobe_cmd, "-show_streams", "-print_format", "json", "-show_format", "-i"]
if os.path.isfile(source):
try:
type, encoding = mimetypes.guess_type(source)
self.mimetype = type
except:
pass
args.append(source)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=DEVNULL)
else:
args.append("-")
proc = subprocess.Popen(args, stdin=source, stdout=subprocess.PIPE, stderr=DEVNULL)
raw_out = ""
while self.returncode is None:
for line in proc.stdout:
raw_out += line
self.returncode = proc.poll()
proc.stdout.close()
if self.returncode != 0:
raise IOError('ffprobe failed')
json_out = json.loads(raw_out)
for key in json_out["format"]:
self.__dict__[key] = json_out["format"][key]
for stream in json_out["streams"]:
self.streams.append(FFStream(stream))
for stream in self.streams:
if stream.isAudio() or stream.isVideo():
if "duration" not in stream.__dict__ or stream.__dict__["duration"] == 0.0:
stream.__dict__["duration"] = self.duration
if stream.isAudio():
self.audio.append(stream)
if stream.isVideo():
self.video.append(stream)
# @todo If mp4 extension but no video stream then set mimetype to audio/mp4
# @todo Needs to follow http://tools.ietf.org/html/rfc6381
# @todo Need to add mp4v and mp4a (aac)
def html5SourceType(self):
string = ''
if self.mimetype is not None:
if self.mimetype == 'audio/mpeg':
return self.mimetype
string += self.mimetype
video = None
audio = None
if len(self.video) > 0:
video = self.video[0]
if len(self.audio) > 0:
audio = self.audio[0]
if video is not None or audio is not None:
string += '; codecs="'
codecs = []
if video is not None:
if video.codec() == 'h264':
codec = 'avc1.'
profile = video.__dict__["profile"]
if profile == 'High':
codec += '6400'
elif profile == 'Baseline':
codec += '42E0'
elif profile == 'Constrained Baseline':
codec += '42E0'
elif profile == 'Main':
codec += '4D40'
elif profile == 'Extended':
codec += '58A0'
codec += hex(int(video.__dict__["level"]))[2:].upper()
codecs.append(codec)
else:
codecs.append(video.codec())
if audio is not None:
if audio.codec() == 'aac':
codecs.append('mp4a.40.2')
else:
codecs.append(audio.codec())
string += ', '.join(codecs)
string += '"'
return string
def durationSeconds(self):
"""
Returns the runtime duration of the file as a floating point number of seconds.
Returns 0.0 if value not found
"""
f = 0.0
if 'duration' in self.__dict__:
try:
f = float(self.__dict__['duration'])
except Exception as e:
pass
return f
def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b = 0
if 'bit_rate' in self.__dict__:
try:
b = int(self.__dict__['bit_rate'])
except Exception as e:
pass
return b
class FFStream(object):
"""
An object representation of an individual stream in a multimedia file.
"""
def __init__(self, obj):
for key in obj.keys():
self.__dict__[key] = obj[key]
def isData(self):
"""
Is this stream labelled as an data stream?
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'data':
val = True
return val
def isAudio(self):
"""
Is this stream labelled as an audio stream?
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'audio':
val = True
return val
def isVideo(self):
"""
Is the stream labelled as a video stream.
"""
val = False
if 'codec_type' in self.__dict__:
if self.codec_type == 'video':
val = True
return val
def isSubtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.codec_type)=='subtitle':
val = True
return val
def frameSize(self):
"""
Returns the pixel frame size as an integer tuple (width,height) if the stream is a video stream.
Returns None if it is not a video stream.
"""
size = None
if self.isVideo():
if 'width' in self.__dict__ and 'height' in self.__dict__:
try:
size = (int(self.__dict__['width']),int(self.__dict__['height']))
except Exception as e:
pass
size = (0,0)
return size
def pixelFormat(self):
"""
Returns a string representing the pixel format of the video stream. e.g. yuv420p.
Returns none is it is not a video stream.
"""
f = None
if self.isVideo():
if 'pix_fmt' in self.__dict__:
f = self.__dict__['pix_fmt']
return f
def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
f = 0
if self.isVideo() or self.isAudio():
if 'nb_frames' in self.__dict__:
try:
f = int(self.__dict__['nb_frames'])
except Exception as e:
pass
return f
def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f = 0.0
if self.isVideo() or self.isAudio():
if 'duration' in self.__dict__:
try:
f = float(self.__dict__['duration'])
except Exception as e:
pass
return f
def language(self):
"""
Returns language tag of stream. e.g. eng
"""
lang = None
if 'TAG:language' in self.__dict__:
lang = self.__dict__['TAG:language']
return lang
def codec(self):
"""
Returns a string representation of the stream codec.
"""
codec_name = None
if 'codec_name' in self.__dict__:
codec_name = self.__dict__['codec_name']
return codec_name
def codecDescription(self):
"""
Returns a long representation of the stream codec.
"""
codec_d = None
if 'codec_long_name' in self.__dict__:
codec_d = self.__dict__['codec_long_name']
return codec_d
def codecTag(self):
"""
Returns a short representative tag of the stream codec.
"""
codec_t = None
if 'codec_tag_string' in self.__dict__:
codec_t = self.__dict__['codec_tag_string']
return codec_t
def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b = 0
if 'bit_rate' in self.__dict__:
try:
b = int(self.__dict__['bit_rate'])
except Exception as e:
pass
return b
def frameRate(self):
"""
Returns the framerate as an float in frames/second
"""
f = 0.0
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'video':
try:
if 'r_frame_rate' in self.__dict__:
values = self.__dict__['r_frame_rate']
values = values.split('/')
try:
f = float(values[0])/float(values[1])
except Exception as e:
pass
else:
if 'nb_frames' in self.__dict__ and 'duration' in self.__dict__:
try:
f = float(self.__dict__['nb_frames'])/float(self.__dict__['duration'])
except Exception as e:
pass
except Exception as e:
pass
return f
def printMeta(path):
m = FFProbe(path)
name = os.path.split(path)[1]
stream_count = 1
for s in m.streams:
type = "Video" if s.isVideo else "Audio"
print "[ %s - Stream #%s - %s ]" % (name, stream_count, type)
stream_count += 1
if s.isVideo():
print "Framerate: %f" % s.frameRate()
print "Frames: %i" % s.frames()
print "Width: %i" % s.frameSize()[0]
print "Height: %i" % s.frameSize()[1]
print "Duration: %f" % s.durationSeconds()
print "Bitrate: %i" % s.bitrate()
print ""
if __name__ == '__main__':
if len(sys.argv) == 2:
path = sys.argv[1]
if os.path.isfile(path):
printMeta(path)
elif os.path.isdir(path):
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
for file in files:
if not file.startswith("."):
printMeta(path + file)
else:
sys.exit(1)
else:
print "Usage: python ffprobe.py <file>|<directory>"
| mit | 3,546,136,138,872,157,700 | 31.371429 | 109 | 0.473482 | false |
Frky/scat | src/shell/chart/alloc.py | 1 | 6907 | #-*- coding: utf-8 -*-
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pylab as P
import pandas as pd
from .chart import Chart
from .entry.alloc import AllocEntry
class AllocChart(Chart):
def __init__(self, oracle, *args, **kwargs):
super(AllocChart, self).__init__(*args, **kwargs)
with open("test/coreutils.txt", "r") as f:
self.__coreutils = [line[:-1] for line in f.readlines()]
self._analysis = "alloc"
self.__parse_log()
self._data = sum(self._data.values(), list())
self.__oracle = oracle
def __parse_log(self):
if not os.path.exists(self._log):
return
with open(self._log, "r") as f:
for line in f.readlines():
pgm = line[:-1].split(":")[0]
self._data.setdefault(pgm, list())
entry = AllocEntry(line)
self._data[pgm].append(entry)
def __ok_or_ko(self, pgm, res, entry):
if res == "None":
return "n.c."
try:
if self.__oracle[pgm][entry] is not None and res in self.__oracle[pgm][entry]:
return "\\checked"
else:
return "\\texttimes"
except KeyError:
return "n.c."
def get(self, pgm=None):
if pgm is None:
return self._data
else:
return filter(lambda a: a.pgm == pgm, self._data)
def table(self):
tot = {
"alloc": {
"\\texttimes": 0,
"n.c.": 0,
"\\checked": 0,
},
"free": {
"\\texttimes": 0,
"n.c.": 0,
"\\checked": 0,
},
}
for e in sorted(self._data, key=lambda a:a.pgm):
if e.pgm not in self.__coreutils:
continue
if e.alloc == "None":
continue
print "{{\\tt {}}} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} \\\\".format(
e.pgm,
self.__ok_or_ko(e.pgm, e.alloc, "alloc"),
self.__ok_or_ko(e.pgm, e.free, "free"),
e.error_rate,
e.online,
e.offline[0],
e.offline[1],
)
tot["alloc"][self.__ok_or_ko(e.pgm, e.alloc, "alloc")] += 1
tot["free"][self.__ok_or_ko(e.pgm, e.free, "free")] += 1
for e in sorted(self._data, key=lambda a:a.pgm):
if e.pgm in self.__coreutils:
continue
print "{{\\tt {}}} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} \\\\".format(
e.pgm,
self.__ok_or_ko(e.pgm, e.alloc, "alloc"),
self.__ok_or_ko(e.pgm, e.free, "free"),
e.error_rate,
e.online,
e.offline[0],
e.offline[1],
)
print tot
def table_cmp(self, other):
for c in sorted(self._data, key=lambda a:a.pgm):
t = other.get(c.pgm)[0]
if c.pgm in self.__coreutils:
continue
print "{{\\tt {}}} & {}/{} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} & {:.3g}/{:.3g} \\\\".format(
c.pgm,
self.__ok_or_ko(c.pgm, c.alloc, "alloc"),
self.__ok_or_ko(c.pgm, c.free, "free"),
self.__ok_or_ko(t.pgm, t.alloc, "alloc"),
self.__ok_or_ko(t.pgm, t.free, "free"),
c.online,
t.online,
c.offline[0],
c.offline[1],
t.offline[0],
t.offline[1],
)
def draw_consistency(self):
data = dict()
for entry in self._data:
data.setdefault(entry.pgm, list())
data[entry.pgm].append(entry)
plt.figure(figsize=(12, 9))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.ylabel("consistency rate")
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.set_ticklabels([])
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
N = 0
for rank, (pgm, entries) in enumerate(data.items()):
consistency_rate = map(lambda a: a.consistency, entries)
color = Chart.generic_colors[rank % len(Chart.generic_colors)]
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
for e in entries:
if self.__ok_or_ko(e.pgm, e.alloc, "alloc") == "\\checked" and \
self.__ok_or_ko(e.pgm, e.free, "free") == "\\checked":
if e.consistency >= 0.95:
color = Chart.colors["acc"]
else:
color = Chart.colors["fn"]
else:
if e.consistency > 0.95:
if self.__ok_or_ko(e.pgm, e.alloc, "alloc") == "\\checked":
color = Chart.generic_colors[-1]
else:
print e.pgm, e.alloc, e.free
color = Chart.colors["tot"]
else:
color = Chart.colors["acc"]
plt.plot(N, e.consistency, 'o', color=color, mec=color)
N += 1
# plt.plot(range(N, N + len(error_rate)), error_rate, 'o',
# lw=0, color=color, label=pgm, mec=color)
# plt.text(N, -0.05 * (1 + ((1 + rank) % 2)), pgm, color=color, fontsize=18)
N += 1
if rank < len(data.keys()) - 1:
plt.plot((N - 1, N - 1), (0, 1), '--', color="black", alpha=0.3)
xmin, xmax = -1, N
ymin, ymax = -0.1, 1.1
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.plot([xmin, xmax], [0.95, 0.95], "-", lw=0.5, color="black", alpha=0.5)
plt.plot([xmin, xmax], [0, 0], "-", lw=1, color="black")
plt.savefig("test/chart/alloc_consistency.png", bbox_inches="tight")
| mit | -6,983,431,459,954,425,000 | 37.586592 | 110 | 0.413638 | false |
hjoliver/cylc | cylc/flow/scripts/trigger.py | 1 | 2594 | #!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc trigger [OPTIONS] ARGS
Manually trigger tasks.
Examples:
$ cylc trigger REG # trigger all tasks in a running workflow
$ cylc trigger REG TASK_GLOB ... # trigger some tasks in a running workflow
NOTE waiting tasks that are queue-limited will be queued if triggered, to
submit as normal when released by the queue; queued tasks will submit
immediately if triggered, even if that violates the queue limit (so you may
need to trigger a queue-limited task twice to get it to submit immediately).
"""
import os.path
from cylc.flow.network.client_factory import get_client
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.terminal import cli_function
MUTATION = '''
mutation (
$wFlows: [WorkflowID]!,
$tasks: [NamespaceIDGlob]!,
$reflow: Boolean,
) {
trigger (
workflows: $wFlows,
tasks: $tasks,
reflow: $reflow
) {
result
}
}
'''
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask_nocycles=True,
argdoc=[
('REG', 'Workflow name'),
('[TASK_GLOB ...]', 'Task matching patterns')])
parser.add_option(
"-r", "--reflow",
help="Start a new flow from the triggered task.",
action="store_true", default=False, dest="reflow")
return parser
@cli_function(get_option_parser)
def main(parser, options, workflow, *task_globs):
"""CLI for "cylc trigger"."""
workflow = os.path.normpath(workflow)
pclient = get_client(workflow, timeout=options.comms_timeout)
mutation_kwargs = {
'request_string': MUTATION,
'variables': {
'wFlows': [workflow],
'tasks': list(task_globs),
'reflow': options.reflow,
}
}
pclient('graphql', mutation_kwargs)
if __name__ == "__main__":
main()
| gpl-3.0 | -334,009,959,158,072,200 | 27.822222 | 78 | 0.676561 | false |
quarkslab/irma | probe/modules/custom/skeleton/plugin.py | 1 | 2240 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from datetime import datetime
from irma.common.utils.utils import timestamp
from irma.common.plugins import PluginBase
from irma.common.plugin_result import PluginResult
from irma.common.base.utils import IrmaProbeType
from irma.common.plugins.exceptions import PluginLoadError
class SkeletonPlugin(PluginBase):
class SkeletonResult:
ERROR = -1
FAILURE = 0
SUCCESS = 1
# =================
# plugin metadata
# =================
_plugin_name_ = "Skeleton"
_plugin_display_name_ = "Skeleton Display Name"
_plugin_author_ = "<author name>"
_plugin_version_ = "<version>"
_plugin_category_ = "custom"
_plugin_description_ = "Plugin skeleton"
_plugin_dependencies_ = []
_mimetype_regexp = None
# =============
# constructor
# =============
def __init__(self):
pass
@classmethod
def verify(cls):
raise PluginLoadError("Skeleton plugin is not meant to be loaded")
# ==================
# probe interfaces
# ==================
def run(self, paths):
response = PluginResult(name=type(self).plugin_display_name,
type=type(self).plugin_category,
version=None)
try:
started = timestamp(datetime.utcnow())
response.results = "Main analysis call here"
stopped = timestamp(datetime.utcnow())
response.duration = stopped - started
response.status = self.SkeletonResult.SUCCESS
except Exception as e:
response.status = self.SkeletonResult.ERROR
response.results = type(e).__name__ + " : " + str(e)
return response
| apache-2.0 | 5,575,655,391,303,937,000 | 30.549296 | 74 | 0.615179 | false |
jabber-at/hp | hp/core/tests/tests_views.py | 1 | 1386 | # -*- coding: utf-8 -*-
#
# This file is part of the jabber.at homepage (https://github.com/jabber-at/hp).
#
# This project is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this project. If
# not, see <http://www.gnu.org/licenses/>.
from django.urls import reverse
from .base import SeleniumTestCase
class SetLangTestCase(SeleniumTestCase):
def test_basic(self):
self.selenium.get('%s%s' % (self.live_server_url, reverse('blog:home')))
self.wait_for_page_load()
de_selector = '.lang-item-de a'
en_selector = '.lang-item-en a'
de_link = self.find(de_selector)
self.assertDisplayed(de_link)
self.assertNoElementExists(en_selector)
de_link.click()
self.wait_for_page_load()
en_link = self.find(en_selector)
self.assertDisplayed(en_link)
self.assertNoElementExists(de_selector)
| gpl-3.0 | -114,297,983,326,547,660 | 36.459459 | 99 | 0.701299 | false |
ZeitOnline/zeit.cms | src/zeit/cms/workingcopy/browser/preview.py | 1 | 2690 | import urllib2
import urlparse
import zeit.cms.browser.preview
import zeit.cms.interfaces
import zeit.connector.interfaces
import zope.component
class WorkingcopyPreview(zeit.cms.browser.preview.Preview):
"""Preview for workingcopy versions of content objects.
This supports two modes of operation:
1. Upload the workingcopy version of an object to the repository, retrieve
the html and return it (proxying the result).
2. Give the workingcopy URL to the preview service (for those who can
traverse it directly) and redirect to it as for the repository preview.
"""
def __call__(self):
url = self.get_preview_url_for(self.context)
if self.should_upload(url):
return self.proxied_preview()
else:
return self.redirect(self.workingcopy_url(url), trusted=True)
def should_upload(self, url):
return 'friedbert' not in url # XXX Really kludgy heuristics
def proxied_preview(self):
preview_obj = self.temporary_checkin()
url = self.get_preview_url_for(preview_obj)
preview_request = urllib2.urlopen(url)
del preview_obj.__parent__[preview_obj.__name__]
return preview_request.read()
def get_preview_url_for(self, preview_context):
url = zope.component.getMultiAdapter(
(preview_context, self.preview_type),
zeit.cms.browser.interfaces.IPreviewURL)
querystring = self.request.environment['QUERY_STRING']
if querystring:
url = '%s?%s' % (url, querystring)
return url
def temporary_checkin(self):
content = zeit.cms.interfaces.ICMSContent(
zeit.connector.interfaces.IResource(self.context))
content.uniqueId = None
target_folder = zeit.cms.interfaces.ICMSContent(
self.context.uniqueId).__parent__
temp_id = self.get_temp_id(self.context.__name__)
target_folder[temp_id] = content
return content
def get_temp_id(self, name):
return 'preview-%s-%s' % (
self.request.principal.id, name)
def workingcopy_url(self, url):
repository_path = urlparse.urlparse(self.context.uniqueId).path
fullpath = self.url(self.context)
workingcopy = self.url(zope.component.getUtility(
zeit.cms.workingcopy.interfaces.IWorkingcopyLocation))
workingcopy_path = fullpath.replace(workingcopy, '')
config = zope.app.appsetup.product.getProductConfiguration('zeit.cms')
workingcopy_path = config[
'friebert-wc-preview-prefix'] + workingcopy_path
url = url.replace(repository_path, workingcopy_path)
return url
| bsd-3-clause | -3,901,331,997,260,017,700 | 35.351351 | 78 | 0.662825 | false |
mluscon/ci-dnf-stack | dnf-docker-test/features/steps/repo_steps.py | 1 | 5554 | from __future__ import absolute_import
from __future__ import unicode_literals
import glob
import os
import tempfile
from behave import given
from behave import register_type
from behave import when
from behave.model import Table
import jinja2
import parse
from whichcraft import which
from command_steps import step_i_successfully_run_command
from file_steps import HEADINGS_INI
from file_steps import conf2table
from file_steps import step_a_file_filepath_with
from file_steps import step_an_ini_file_filepath_with
import file_utils
import table_utils
PKG_TMPL = """
Name: {{ name }}
Summary: {{ summary|default("Empty") }}
Version: {{ version|default("1") }}
Release: {{ release|default("1") }}%{?dist}
License: {{ license|default("Public Domain") }}
BuildArch: noarch
{%- if buildrequires is defined %}
{% for buildreq in buildrequires %}
BuildRequires: {{ buildreq }}
{%- endfor %}
{%- endif %}
{%- if requires is defined %}
{% for req in requires %}
Requires: {{ req }}
{%- endfor %}
{%- endif %}
{%- if obsoletes is defined %}
{% for obs in obsoletes %}
Obsoletes: {{ obs }}
{%- endfor %}
{%- endif %}
{%- if provides is defined %}
{% for prv in provides %}
Provides: {{ prv }}
{%- endfor %}
{%- endif %}
%description
%{summary}.
%files
"""
REPO_TMPL = "/etc/yum.repos.d/{!s}.repo"
HEADINGS_REPO = ["Package", "Tag", "Value"]
PKG_TAGS_REPEATING = ["BuildRequires", "Requires", "Obsoletes", "Provides"]
PKG_TAGS = ["Summary", "Version", "Release", "License"] + PKG_TAGS_REPEATING
JINJA_ENV = jinja2.Environment(undefined=jinja2.StrictUndefined)
@parse.with_pattern(r"enable|disable")
def parse_enable_disable(text):
if text == "enable":
return True
if text == "disable":
return False
assert False
register_type(enable_disable=parse_enable_disable)
@when('I remove all repositories')
def step_i_remove_all_repositories(ctx):
"""
Remove all ``*.repo`` files in ``/etc/yum.repos.d/``.
"""
for f in glob.glob("/etc/yum.repos.d/*.repo"):
os.remove(f)
@given('repository "{repository}" with packages')
def given_repository_with_packages(ctx, repository):
"""
Builds dummy noarch packages, creates repo and *.repo* file.
.. note::
Requires *rpmbuild* and *createrepo_c*.
Requires table with following headers:
========= ===== =======
Package Tag Value
========= ===== =======
*Tag* is tag in RPM. Supported ones are:
============= ===============
Tag Default value
============= ===============
Summary Empty
Version 1
Release 1
License Public Domain
BuildRequires []
Requires []
Obsoletes []
Provides []
============= ===============
All packages are built during step execution.
.. note::
*BuildRequires* are ignored for build-time (*rpmbuild* is executed
with ``--nodeps`` option).
Examples:
.. code-block:: gherkin
Feature: Working with repositories
Background: Repository base with dummy package
Given repository base with packages
| Package | Tag | Value |
| foo | | |
Scenario: Installing dummy package from background
When I enable repository base
Then I successfully run "dnf -y install foo"
"""
packages = table_utils.parse_skv_table(ctx, HEADINGS_REPO,
PKG_TAGS, PKG_TAGS_REPEATING)
rpmbuild = which("rpmbuild")
ctx.assertion.assertIsNotNone(rpmbuild, "rpmbuild is required")
createrepo = which("createrepo_c")
ctx.assertion.assertIsNotNone(createrepo, "createrepo_c is required")
tmpdir = tempfile.mkdtemp()
template = JINJA_ENV.from_string(PKG_TMPL)
for name, settings in packages.items():
settings = {k.lower(): v for k, v in settings.items()}
ctx.text = template.render(name=name, **settings)
fname = "{!s}/{!s}.spec".format(tmpdir, name)
step_a_file_filepath_with(ctx, fname)
cmd = "{!s} --define '_rpmdir {!s}' -bb {!s}".format(
rpmbuild, tmpdir, fname)
step_i_successfully_run_command(ctx, cmd)
cmd = "{!s} {!s}".format(createrepo, tmpdir)
step_i_successfully_run_command(ctx, cmd)
repofile = REPO_TMPL.format(repository)
ctx.table = Table(HEADINGS_INI)
ctx.table.add_row([repository, "name", repository])
ctx.table.add_row(["", "enabled", "False"])
ctx.table.add_row(["", "gpgcheck", "False"])
ctx.table.add_row(["", "baseurl", "file://{!s}".format(tmpdir)])
step_an_ini_file_filepath_with(ctx, repofile)
@given('empty repository "{repository}"')
def given_empty_repository(ctx, repository):
"""
Same as :ref:`Given repository "{repository}" with packages`, but without
packages (empty).
"""
ctx.table = Table(HEADINGS_REPO)
given_repository_with_packages(ctx, repository)
@when('I {state:enable_disable} repository "{repository}"')
def i_enable_disable_repository(ctx, state, repository):
"""
Enable/Disable repository with given name.
"""
repofile = REPO_TMPL.format(repository)
conf = file_utils.read_ini_file(repofile)
conf.set(repository, "enabled", str(state))
ctx.table = conf2table(conf)
step_an_ini_file_filepath_with(ctx, repofile)
| gpl-3.0 | -8,378,289,381,050,230,000 | 28.700535 | 77 | 0.601728 | false |
NSasquatch/vocoder | toolset.py | 1 | 2495 | # -*- coding: latin-1 -*-
"""
This module was created by Silas Gyger, [email protected].
It stands under CC BY 4.0 License.
http://creativecommons.org/licenses/by/4.0/
"""
import thinkdsp
import numpy as np
from matplotlib import pyplot
class Wave(thinkdsp.Wave):
def __getitem__(self, key):
return self.ys[key]
def __setitem__(self, key, value):
self.ys[key] = value
return self
@property
def length(self):
return len(self.ys)
def multiply_with(self, obj):
"""
Multipliziert alle y-Werte mit den y-Werte einer anderen Welle oder einem Array.
:param obj: toolset.Wave, thinkdsp.Wave or np.array
:return: self
"""
assert isinstance(obj, (Wave, thinkdsp.Wave, np.ndarray)), "The object this Wave should be multiplied with must" \
" either be a %s, %s or a %s instance." % (Wave, thinkdsp.Wave, np.ndarray)
if isinstance(obj, (Wave, thinkdsp.Wave)):
self.ys *= obj.ys
else:
self.ys *= obj
return self
def make_average_amp_curve(self, buffer_size=800):
"""
Creates an amp-curve using the "average" algorithm.
:return: array containing amps
"""
positive = lambda x: 0 if x < 0 else x
amps = np.zeros(self.length)
for i, y in enumerate(self.ys):
buffer_ys = self.ys[positive(i-buffer_size/2):i+buffer_size/2]
amps[i] = abs(np.sum(np.abs(buffer_ys))/buffer_size)
return amps
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = np.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
class Spectrum(thinkdsp.Spectrum):
def band_pass(self, position, range, factor=0):
"""
Attenuate all frequencies except the ones inside the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
low_cutoff, high_cutoff = position-range, position+range
self.high_pass(low_cutoff, factor)
self.low_pass(high_cutoff, factor)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = np.fft.irfft(self.hs)
return Wave(ys, self.framerate)
def to_wave(obj, framerate=None):
"""
Converts a thinkdsp-Wave or a numpy-Array to a toolset.Wave.
:param obj: The wave/array
:param framerate: Framerate of wanted wave if obj is a numpy array
"""
if isinstance(obj, thinkdsp.Wave):
return Wave(obj.ys, obj.framerate)
if isinstance(obj, np.ndarray):
if framerate is None:
raise ValueError, "Missing framerate to covert numpy-Array to wave."
else:
return Wave(obj, framerate)
| cc0-1.0 | 4,411,227,925,583,878,000 | 24.20202 | 116 | 0.694188 | false |
nugget/python-insteonplm | insteonplm/devices/climateControl.py | 1 | 3689 | """INSTEON Climate Control Device Class."""
import logging
from insteonplm.devices import Device
from insteonplm.constants import COMMAND_EXTENDED_GET_SET_0X2E_0X00
from insteonplm.messages.extendedSend import ExtendedSend
from insteonplm.messages.userdata import Userdata
from insteonplm.states.thermostat import (
Temperature,
Humidity,
SystemMode,
FanMode,
CoolSetPoint,
HeatSetPoint,
)
from insteonplm.states.statusReport import StatusReport
_LOGGER = logging.getLogger(__name__)
class ClimateControl_2441th(Device):
"""Thermostat model 2441TH."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = CoolSetPoint(
self._address,
"coolSetPoint",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0x02] = HeatSetPoint(
self._address,
"heatSetPoint",
0x02,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0xEF] = StatusReport(
self._address,
"statusReport",
0xEF,
self._send_msg,
self._message_callbacks,
0x00,
)
self._system_mode = SystemMode(
self._address,
"systemMode",
0x10,
self._send_msg,
self._message_callbacks,
0x00,
)
self._fan_mode = FanMode(
self._address,
"fanMode",
0x11,
self._send_msg,
self._message_callbacks,
0x00,
)
self._temp = Temperature(
self._address,
"temperature",
0x12,
self._send_msg,
self._message_callbacks,
0x00,
)
self._humidity = Humidity(
self._address,
"humidity",
0x13,
self._send_msg,
self._message_callbacks,
0x00,
)
@property
def cool_set_point(self):
"""Return the cool set point state."""
return self._stateList[0x01]
@property
def heat_set_point(self):
"""Return the heat set point state."""
return self._stateList[0x02]
@property
def system_mode(self):
"""Return the mode state."""
return self._system_mode
@property
def fan_mode(self):
"""Return the mode state."""
return self._fan_mode
@property
def temperature(self):
"""Return the temperature state."""
return self._temp
@property
def humidity(self):
"""Return the humidity state."""
return self._humidity
def async_refresh_state(self):
"""Request each state to provide status update."""
_LOGGER.debug("Setting up extended status")
ext_status = ExtendedSend(
address=self._address,
commandtuple=COMMAND_EXTENDED_GET_SET_0X2E_0X00,
cmd2=0x02,
userdata=Userdata(),
)
ext_status.set_crc()
_LOGGER.debug("Sending ext status: %s", ext_status)
self._send_msg(ext_status)
_LOGGER.debug("Sending temp status request")
self.temperature.async_refresh_state()
# pylint: disable=unused-argument
def _mode_changed(self, addr, group, val):
self.async_refresh_state()
| mit | -4,059,959,734,203,768,000 | 24.978873 | 87 | 0.548658 | false |
johan92/yafpgatetris | string_stuff/big_string_to_mif.py | 1 | 1180 | #!/usr/bin/python
import sys
def print_usage( argv ):
print "Creates mif file for ROM for strings.\n Input: raw file with zeros and ones.\n Output: MIF file \nUsage: %s STRING_FILE_NAME MIF_FILE_NAME" % ( sys.argv[0] )
if __name__ == "__main__":
if len( sys.argv ) < 3:
print_usage( sys.argv )
exit( -1 )
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print_usage( sys.argv )
exit( 0 )
f1 = open( sys.argv[1], "r" )
line_num = 0
lines = []
for line in f1:
print line
lines.append( line[:-1] ) # minus /n
orig_x = len( lines[0] )
orig_y = len( lines )
print "MSG_X = %d" % ( orig_x )
print "MSG_Y = %d" % ( orig_y )
rev_lines = []
for x in xrange( orig_x ):
l = ""
for y in xrange( orig_y ):
l = lines[y][x] + l
rev_lines.append( l )
rom_width = orig_y
rom_depth = orig_x
f2 = open( sys.argv[2], "w" )
f2.write("WIDTH=%d;\n" % rom_width )
f2.write("DEPTH=%d;\n" % rom_depth )
f2.write("ADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT BEGIN\n" )
for (i, l) in enumerate( rev_lines ):
f2.write( "%s : %s;\n" % ( "{:04x}".format(i), l ) )
f2.write("END;")
| mit | 5,125,996,493,227,969,000 | 20.454545 | 169 | 0.534746 | false |
california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed_flatfiles/managers/candidacies.py | 1 | 1893 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Managers for generating flatfiles that combine multiple table into a simplified file.
"""
from __future__ import unicode_literals
from calaccess_processed.postgres import (
JSONArrayLength,
JSONExtractPath,
MaxFromJSONIntegerArray,
)
from django.db.models import F, Q
from django.db.models import Count, Max
from calaccess_processed.managers import BulkLoadSQLManager
class OCDFlatCandidacyManager(BulkLoadSQLManager):
"""
Custom manager for flattening the contents of the OCD Candidacy model.
"""
def get_queryset(self):
"""
Returns the custom QuerySet for this manager.
"""
return super(
OCDFlatCandidacyManager, self
).get_queryset().filter(
Q(person__identifiers__scheme='calaccess_filer_id')
| Q(person__identifiers__isnull=True)
).annotate(
name=F('candidate_name'),
office=F('post__label'),
party_name=F('party__name'),
election_name=F('contest__election__name'),
election_date=F('contest__election__date'),
special_election=F('contest__previous_term_unexpired'),
ocd_person_id=F('person__id'),
ocd_candidacy_id=F('id'),
ocd_election_id=F('contest__election'),
ocd_post_id=F('post__id'),
ocd_contest_id=F('contest'),
ocd_party_id=F('party'),
latest_calaccess_filer_id=Max('person__identifiers__identifier'),
calaccess_filer_id_count=Count('person__identifiers__identifier'),
latest_form501_filing_id=MaxFromJSONIntegerArray(
'extras',
'form501_filing_ids'
),
form501_filing_count=JSONArrayLength(
JSONExtractPath('extras', 'form501_filing_ids')
),
)
| mit | -4,057,872,674,531,777,000 | 35.403846 | 85 | 0.603275 | false |
ajyoon/brown | brown/core/brace.py | 1 | 3241 | from brown.core.multi_staff_object import MultiStaffObject
from brown.core.music_font import MusicFontGlyphNotFoundError
from brown.core.music_text import MusicText
from brown.core.staff_object import StaffObject
from brown.utils.point import Point
from brown.utils.units import GraphicUnit
class Brace(MultiStaffObject, StaffObject, MusicText):
"""A brace spanning staves, recurring at line beginnings.
The brace is drawn at the beginning of every line
after its initial x position until the end of the staff.
A brace will be drawn on the first line it appears on
if and only if it is placed *exactly* at the line beginning.
Consequently, `Brace(Mm(0), Mm(1000), some_staves)` will appear
on the first line of the flowable, while
`Brace(Mm(1), Mm(1000), some_staves)` will not begin drawing
until the second line.
"""
def __init__(self, pos_x, staves):
"""
Args:
pos_x (Unit): Where this brace goes into effect
staves (set(Staff)): The staves this brace spans
"""
MultiStaffObject.__init__(self, staves)
StaffObject.__init__(self, self.highest_staff)
# Calculate the height of the brace in highest_staff staff units
scale = self.vertical_span / self.highest_staff.unit(4)
if self.vertical_span > self.highest_staff.unit(50):
text = ('brace', 4)
elif self.vertical_span > self.highest_staff.unit(30):
text = ('brace', 3)
elif self.vertical_span > self.highest_staff.unit(15):
text = ('brace', 2)
elif self.vertical_span > self.highest_staff.unit(4):
text = 'brace'
else:
text = ('brace', 1)
try:
# Attempt to use size-specific optional glyph
MusicText.__init__(self,
(pos_x, self.vertical_span),
text,
self.highest_staff,
scale_factor=scale)
except MusicFontGlyphNotFoundError:
# Default to non-optional glyph
MusicText.__init__(self,
(pos_x, self.vertical_span),
'brace',
self.highest_staff,
scale_factor=scale)
######## PUBLIC PROPERTIES ########
@property
def length(self):
"""Unit: The breakable width of the object.
This is used to determine how and where rendering cuts should be made.
"""
return (self.staff.length
- self.flowable.map_between_locally(self.staff, self).x)
######## PRIVATE METHODS ########
def _render_before_break(self, local_start_x, start, stop, dist_to_line_start):
if start.x == GraphicUnit(0):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
def _render_after_break(self, local_start_x, start, stop):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
def _render_spanning_continuation(self, local_start_x, start, stop):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
| gpl-3.0 | 2,192,175,412,026,653,000 | 39.012346 | 85 | 0.59241 | false |
yw374cornell/e-mission-server | emission/analysis/intake/cleaning/cleaning_methods/speed_outlier_detection.py | 1 | 1286 | # Techniques for outlier detection of speeds. Each of these returns a speed threshold that
# can be used with outlier detection techniques.
# Standard imports
import logging
class BoxplotOutlier(object):
MINOR = 1.5
MAJOR = 3
def __init__(self, multiplier = MAJOR, ignore_zeros = False):
self.multiplier = multiplier
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
quartile_vals = df_to_use.quantile([0.25, 0.75]).speed
logging.debug("quartile values are %s" % quartile_vals)
iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]
logging.debug("iqr %s" % iqr)
return quartile_vals.iloc[1] + self.multiplier * iqr
class SimpleQuartileOutlier(object):
def __init__(self, quantile = 0.99, ignore_zeros = False):
self.quantile = quantile
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
return df_to_use.speed.quantile(self.quantile)
| bsd-3-clause | 4,354,731,408,748,637,700 | 34.722222 | 91 | 0.635303 | false |
smurn/ifaddr | setup.py | 1 | 1701 | # Copyright (c) 2014 Stefan C. Mueller
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os.path
from setuptools import setup, find_packages
if os.path.exists('README.rst'):
with open('README.rst') as f:
long_description = f.read()
else:
long_description = ""
setup(
name = 'ifaddr',
version = '0.1.6',
description='Enumerates all IP addresses on all network adapters of the system.',
long_description=long_description,
author='Stefan C. Mueller',
author_email='[email protected]',
url='https://github.com/pydron/ifaddr',
packages = find_packages(),
install_requires = ['ipaddress'] if sys.version_info[:2] < (3, 3) else [],
) | mit | 8,916,418,490,596,393,000 | 40.512195 | 85 | 0.736626 | false |
snower/forsun | forsun/servers/server.py | 1 | 2811 | # -*- coding: utf-8 -*-
# 15/6/10
# create by: snower
import logging
import threading
from tornado.ioloop import IOLoop, asyncio
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAcceleratedFactory
from torthrift.transport import TIOStreamTransportFactory
from torthrift.server import TTornadoServer as BaseTTornadoServer
from .processor.Forsun import Processor
from .handler import Handler
from .http import HTTPServer, Application
from .. import timer
from ..status import forsun_status
from .. import config
class TTornadoServer(BaseTTornadoServer):
def process(self, *args, **kwargs):
try:
forsun_status.connecting_count += 1
forsun_status.connected_count += 1
return super(TTornadoServer, self).process(*args, **kwargs)
finally:
forsun_status.connecting_count -= 1
class Server(object):
def __init__(self, forsun):
self.forsun = forsun
self.server = None
self.http_server = None
self.thread = None
def serve_thrift(self):
handler = Handler(self.forsun)
processor = Processor(handler)
tfactory = TIOStreamTransportFactory()
protocol = TBinaryProtocolAcceleratedFactory()
bind_address = config.get("BIND_ADDRESS", "127.0.0.1")
port = config.get("PORT", 6458)
self.server = TTornadoServer(processor, tfactory, protocol)
self.server.bind(port, bind_address)
self.server.start(1)
logging.info("starting server by %s:%s", bind_address, port)
def serve_http(self):
http_bind = config.get("HTTP_BIND")
if not http_bind:
return
(address, port) = http_bind.split(":")
application = Application(self.forsun, debug=False, autoreload=False)
self.http_server = HTTPServer(application, xheaders=True)
self.http_server.bind(int(port), address)
self.http_server.start(1)
logging.info("starting http server by %s", http_bind)
def start(self, init_callback):
def _():
try:
if asyncio is not None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.serve_thrift()
self.serve_http()
ioloop = IOLoop.instance()
ioloop.add_callback(init_callback)
ioloop.start()
except Exception as e:
logging.error("server error: %s", e)
self.forsun.read_event.set()
timer.stop()
self.thread = threading.Thread(target=_)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
IOLoop.current().add_callback(lambda :IOLoop.current().stop())
logging.info("server stoping") | mit | 2,440,367,594,320,221,000 | 33.292683 | 77 | 0.622199 | false |
kakapocoin/kakapocoin-old | contrib/pyminer/pyminer.py | 1 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9131
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit | 3,967,279,225,010,473,500 | 24.531746 | 84 | 0.648896 | false |
yaybu/touchdown | touchdown/ssh/terminal.py | 1 | 3244 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import time
from touchdown.core import plan, serializers
from .connection import Connection
try:
from .agent import PosixAgentServer
except ImportError:
PosixAgentServer = None
class SshMixin(object):
def get_proxy_command(self):
kwargs = serializers.Resource().render(self.runner, self.resource)
cmd = [
"/usr/bin/ssh",
"-o",
'User="{username}"'.format(**kwargs),
"-o",
'Port="{port}"'.format(**kwargs),
"-W",
"%h:%p",
kwargs["hostname"],
]
return ["-o", "ProxyCommand={}".format(" ".join(cmd))]
def get_command_and_args(self):
kwargs = serializers.Resource().render(self.runner, self.resource)
cmd = [
self.get_command(),
"-o",
'User="{username}"'.format(**kwargs),
"-o",
'Port="{port}"'.format(**kwargs),
"-o",
'HostName="{hostname}"'.format(**kwargs),
]
if self.resource.proxy:
proxy = self.runner.get_plan(self.resource.proxy)
cmd.extend(proxy.get_proxy_command())
return cmd
def run(self, args):
cmd = self.get_command_and_args()
cmd.extend(args)
environ = os.environ.copy()
if self.resource.private_key and PosixAgentServer:
socket_dir = tempfile.mkdtemp(prefix="ssh-")
socket_file = os.path.join(socket_dir, "agent.{}".format(os.getpid()))
environ["SSH_AUTH_SOCK"] = socket_file
del environ["SHELL"]
child_pid = os.fork()
if child_pid:
a = PosixAgentServer(socket_file)
a.add(self.resource.private_key, "touchdown.pem")
try:
a.serve_while_pid(child_pid)
finally:
shutil.rmtree(socket_dir)
return
while not os.path.exists(socket_file):
time.sleep(0.5)
os.execvpe(cmd[0], cmd, environ)
class SshPlan(plan.Plan, SshMixin):
name = "ssh"
resource = Connection
def get_command(self):
return "/usr/bin/ssh"
def get_command_and_args(self):
cmd = super(SshPlan, self).get_command_and_args()
cmd.append("remote")
return cmd
def execute(self, args):
self.run(args)
class ScpPlan(plan.Plan, SshMixin):
name = "scp"
resource = Connection
def get_command(self):
return "/usr/bin/scp"
def execute(self, source, destination):
self.run([source, destination])
| apache-2.0 | -4,779,550,030,188,554,000 | 26.965517 | 82 | 0.579531 | false |
HumanDynamics/openbadge-analysis | openbadge_analysis/visualization/contribution.py | 1 | 4081 | import datetime
from bokeh.charts import Area, output_file, show
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.models.widgets import Panel, Tabs
def unix_time_ms(dt):
"""
Converts datetime to timestamp float (milliseconds) for plotting
:param dt: datetime
:return: timestamp float (ms)
"""
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()*1000
def contribution_plot(df_stitched, meeting_name, rolling=True, member_names=None):
"""
Creates a collection of 4 stacked area graphs that show seconds of contribution per member per minute for a meeting.
The four graphs are: 1 min, 30 sec, 10 sec, 5 sec resampling frequencies.
:param df_stitched: DataFrame whose values are boolean and indicate whether a badge wearer (column name) was
speaking at a particular timestamp, has columns: datetime, member1, member2, etc.
:param meeting_name: Name of meeting (usually uuid), i.e. the part of the log file before '.txt'
:param rolling: True or False. Whether or not to generate the graph with a rolling mean (which makes the graph
smoother but might not most accurately represent the data). True by default
:param member_names: A dictionary mapping member keys to member names (First Last format)
:return: bokeh Tabs holding 4 stacked area graphs
"""
def area_chart(df, interval, rolling):
# re-sampling
df = df_stitched.resample(str(interval)+'S').sum().fillna(0)/(1000/50)*(60/interval) # Each sample is 50ms
# Gives number of seconds spoken per min
# rename columns if names were given
if member_names:
for member_key in member_names:
df.rename(columns=member_names, inplace=True)
if rolling:
df = df.rolling(min_periods=1, window=5, center=True).mean() # To smooth graph
start = unix_time_ms(df.index[0])
start_datetime = datetime.datetime.utcfromtimestamp(start/1000)
end = unix_time_ms(df.index[len(df.index)-1])
end_datetime = datetime.datetime.utcfromtimestamp(end/1000)
df.reset_index(level='datetime', inplace=True) # To input x values into area chart
if rolling:
graph_title = 'Contribution per Minute per Member for Meeting ' + meeting_name + ' (with rolling mean) \
from ' + start_datetime.strftime('%I:%M %p')+' to '+end_datetime.strftime('%I:%M %p')
else:
graph_title = 'Contribution per Minute per Member for Meeting ' + meeting_name + ' (without rolling mean) \
from ' + start_datetime.strftime('%I:%M %p')+' to '+end_datetime.strftime('%I:%M %p')
area = Area(
df,
x='datetime', # Column name
title=graph_title, legend='top_left',
stack=True, xlabel='Time of Day', ylabel='Number of Seconds',
xscale='datetime',
width=1700, height=400,
tools='xpan, xwheel_zoom, box_zoom, reset, resize',
)
# Format tick labels on x-axis
area.below[0].formatter = DatetimeTickFormatter()
area.below[0].formatter.formats = dict(years=['%Y'], months=['%b %Y'], days=['%d %b %Y'],
hours=['%I:%M %P'], hourmin=['%I:%M %P'],
minutes=['%I:%M %P'], minsec=['%I:%M:%S %P'],
seconds=['%I:%M:%S %P'])
return area
area5 = area_chart(df_stitched, 5, rolling)
tab5 = Panel(child=area5, title='5 Second Intervals')
area10 = area_chart(df_stitched, 10, rolling)
tab10 = Panel(child=area10, title='10 Second Intervals')
area30 = area_chart(df_stitched, 30, rolling)
tab30 = Panel(child=area30, title='30 Second Intervals')
area60 = area_chart(df_stitched, 60, rolling)
tab60 = Panel(child=area60, title='60 Second Intervals')
plots = Tabs(tabs=[tab60, tab30, tab10, tab5])
return plots | mit | 410,644,883,875,665,500 | 44.865169 | 120 | 0.616025 | false |
ludobox/ludobox-ui | server/ludobox/history.py | 2 | 5286 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Record and manage file changes and keep track of history.
Key concepts are :
- events : everytime somethin is changed, we use this event
- history : the whole thread of events that applies to a page
For each event, a unique SHA id is created (like git https://stackoverflow.com/questions/29106996/git-what-is-a-git-commit-id )
"""
import hashlib
import time
import json
from flask import current_app
from jsonpatch import make_patch, JsonPatch
# TODO : implement state changes (draft -> reviewed, etc.)
event_types = ["create", "update", "delete", "change_state"]
# hashing changes to create an id
sha_1 = hashlib.sha1()
def new_event(event_type, content, user=None):
if event_type not in event_types:
raise ValueError(
"Event type should be one of the following %s"%", ".join(event_types))
if type(content) is not dict:
raise ValueError(
"Event content should be a JSON-compatible object.")
# timestamp
ts = int(time.time())
# generate unique ID using the whole content
sha_1.update("%s - %s - %s - %s"%(event_type, content, user, ts) )
sha_id = sha_1.hexdigest()
return {
"type" : event_type,
"content" : content,
"user" : user,
"id" : sha_id,
"ts" : ts
}
def is_valid_event(event):
assert type(event) is dict
assert type(event["id"]) is str or unicode
assert len(event["id"]) is 40
assert type(event["content"]) is dict
assert type(event["ts"]) is int
assert event["type"] in event_types
return True
def add_event_to_history(content_previous_version, event):
"""
Does 3 things :
- create threaded history of events if empty
- add current event to history
- replace old content by the new
"""
assert is_valid_event(event)
# immutable: clone original reference
content_with_updated_history = content_previous_version.copy()
# init history if empty
if "history" not in content_with_updated_history.keys():
content_with_updated_history["history"] = []
# re-apply changes and store last version
if event["type"] == "update":
content_with_updated_history = apply_update_patch(content_with_updated_history, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
content_with_updated_history["state"] = new_state
# add event to history
content_with_updated_history["history"].append(event)
current_app.logger.debug("Event : %s - %s"%(event["type"], content_with_updated_history))
return content_with_updated_history
def make_create_event(content, user=None):
# make sure there is no prior history
if "history" in content.keys() and len(content["history"]) !=0:
raise ValueError("You are trying to use the CREATE action on a game that already has an history.")
# check if there is actual changes
if content is None or len(content.keys()) == 0:
return None
# create a new event and add it to history
event = new_event("create", content.copy(), user)
return event
def make_update_event(old_content, new_content, user=None):
# make things immutable
new = new_content.copy()
old = old_content.copy()
# ignore keys we don't want to track in the history events
ignored_keys = ["history", "files", "errors", "has_errors"]
for k in ignored_keys:
new.pop(k, None)
old.pop(k, None)
# create json diff
patch = make_patch(new, old)
# check if there is actual changes
if not len(list(patch)) :
return None
# create a new event and add it to history
event = new_event("update", { "changes" : list(patch) }, user)
return event
def make_update_state_event(old_content, updated_content_state, user=None):
"""Store an event reflecting content update"""
original_state = old_content["state"]
state_change = { "from" : original_state, "to" : updated_content_state}
# create a new event and add it to history
event = new_event("change_state", state_change, user)
return event
def apply_update_patch(content, event):
"""Apply JSON diff patches to content"""
patch = JsonPatch(event["content"]["changes"])
final_content = patch.apply(content)
return final_content
def apply_history(history, selected_id):
"""
Re-apply the chain of events from the history until selected id
returns the content *without* the history
"""
# check the hash format
assert type(selected_id) is str
assert len(selected_id) is 40
# filter history
final_content = {}
# run again the course of events
for event in history:
if not is_valid_event(event) :
raise ValueError("Event does not follow a proper format.")
# check event type
if event["type"] == "create": # init with full content
final_content = event["content"]
elif event["type"] == "update":
final_content = apply_update_patch(final_content, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
# run until last is
if event["id"] == selected_id :
return final_content
| agpl-3.0 | -5,003,481,011,867,409,000 | 29.37931 | 127 | 0.644344 | false |
cineuse/CNCGToolKit | cgtkLibs/cgtk_os/TemporaryDirectory.py | 1 | 2998 | # coding=utf8
# Copyright (c) 2016 Strack
import os
import warnings
from tempfile import mkdtemp
import logging
import cgtk_log
log = cgtk_log.cgtk_log(level=logging.INFO)
class TemporaryDirectory(object):
"""
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
Examples:
>>> with TemporaryDirectory() as tmp_dir:
>>> ...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix="tmp", dir_=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir_)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
log.info("ERROR: {!r} while cleaning up {!r}".format(ex, self, ))
raise
return
self._closed = True
if _warn:
log.warning("Implicitly cleaning up {!r}".format(self))
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(os.listdir)
_path_join = staticmethod(os.path.join)
_isdir = staticmethod(os.path.isdir)
_islink = staticmethod(os.path.islink)
_remove = staticmethod(os.remove)
_rmdir = staticmethod(os.rmdir)
_warn = warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except OSError:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except OSError:
pass
try:
self._rmdir(path)
except OSError:
pass
if __name__ == "__main__":
with TemporaryDirectory() as tmp_dir:
print tmp_dir
| mit | -9,125,791,930,769,588,000 | 29.907216 | 85 | 0.571714 | false |
modcloth/tory-client | tory_client/unregister.py | 1 | 2146 | # vim:fileencoding=utf-8
import argparse
import logging
import os
import sys
from . import __version__
from .client import delete_host
from .junkdrawer import HelpFormatter
USAGE = """%(prog)s [options]
Unregister host(s) in tory.
"""
EPILOGUE = """\
Examples:
# Unregister a machine by name
%(prog)s --name foo-bar.example.com
# Unregister a machine by ipv4
%(prog)s --name 192.168.113.29
# Unregister a whole bunch of machines with hostnames that
# start with "generic-"
tory-inventory | \\
jq -r '._meta | .hostvars | .[] |
select(.hostname | startswith("generic-")) |
.hostname' | \\
xargs %(prog)s -n
"""
DEFAULT_TORY_SERVER = 'http://localhost:9462/ansible/hosts'
def main(sysargs=sys.argv[:]):
parser = argparse.ArgumentParser(
usage=USAGE,
formatter_class=HelpFormatter,
epilog=EPILOGUE,
)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-n', '--name',
nargs='+',
metavar='TORY_HOSTNAME',
default=list(filter(lambda s: s != '', [
_s.strip() for _s in os.environ.get('TORY_HOSTNAME', '').split()
])),
help='host name(s) or ip(s) to uregister',
)
parser.add_argument(
'-s', '--tory-server',
default=os.environ.get('TORY_SERVER', DEFAULT_TORY_SERVER),
help='tory inventory server (including path)'
)
parser.add_argument(
'-A', '--auth-token',
default=os.environ.get('TORY_AUTH_TOKEN', 'swordfish'),
metavar='TORY_AUTH_TOKEN',
help='tory server auth token'
)
args = parser.parse_args(sysargs[1:])
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
log = logging.getLogger('tory-unregister')
n_failures = 0
for identifier in args.name:
status = delete_host(args.tory_server, args.auth_token, identifier)
if status == 204:
log.info('Removed host %s', identifier)
else:
log.warn('Failed to remove host %s: %s',
identifier, status)
n_failures += 1
return n_failures
| mit | -5,951,089,021,157,053,000 | 25.493827 | 76 | 0.602982 | false |
bitcoinfees/bitcoin-feemodel | feemodel/txmempool.py | 1 | 32024 | from __future__ import division
import os
import threading
import sqlite3
import decimal
import logging
from time import time
from copy import copy
from itertools import groupby
from operator import attrgetter, itemgetter
from bitcoin.core import b2lx
from feemodel.config import config, datadir, MINRELAYTXFEE, PRIORITYTHRESH
from feemodel.util import (proxy, StoppableThread, get_feerate, WorkerThread,
cumsum_gen, BlockMetadata, StepFunction)
from feemodel.stranding import tx_preprocess, calc_stranding_feerate
from feemodel.simul.simul import SimEntry
logger = logging.getLogger(__name__)
db_lock = threading.Lock()
MEMBLOCK_SCHEMA = {
"blocks": [
'height INTEGER PRIMARY KEY',
'size INTEGER',
'time INTEGER'
],
"txs": [
"id INTEGER PRIMARY KEY",
"txid TEXT",
"size INTEGER",
"fee TEXT",
"startingpriority TEXT",
"time INTEGER",
"height INTEGER",
"depends TEXT",
"feerate INTEGER",
"heightremoved INTEGER"
],
"blocktxs": [
"blockheight INTEGER",
"txrowid INTEGER",
"currentpriority TEXT",
"isconflict INTEGER",
"inblock INTEGER"
]
}
# TODO: remove this when transition to new DB is complete
OLD_MEMBLOCK_TABLE_SCHEMA = {
'blocks': [
'height INTEGER UNIQUE',
'size INTEGER',
'time INTEGER'
],
'txs': [
'blockheight INTEGER',
'txid TEXT',
'size INTEGER',
'fee TEXT',
'startingpriority TEXT',
'currentpriority TEXT',
'time INTEGER',
'height INTEGER',
'depends TEXT',
'feerate INTEGER',
'leadtime INTEGER',
'isconflict INTEGER',
'inblock INTEGER'
]
}
MEMBLOCK_DBFILE = os.path.join(datadir, 'memblock.db')
class TxMempool(StoppableThread):
'''Thread that tracks the mempool state at points of block discovery.
When the thread is running, Bitcoin Core is polled every poll_period
seconds over JSON-RPC for:
1. The current block count, via getblockcount().
2. The transactions in the mempool, via getrawmempool(verbose=True)
If the block count has increased in between polls, we record:
1. Transactions in the mempool just prior to block discovery
2. For each transaction, whether or not it was included in the block.
The goal is to make inferences about the transaction selection policies
of miners.
The polling is done via batch call; however, they are not processed
atomically by Bitcoin Core - there is the probability of a race condition
in which the block count increases in between processing the two requests.
In this case the statistics for that block will be somewhat degraded.
In addition, chain re-orgs are not handled. If a re-org happens, the
transactions that we record are not necessarily representative of the
pool of valid transactions seen by the miner. Any inference algorithm
must be tolerant of such errors, in addition to any other kinds of network
errors.
'''
def __init__(self, dbfile=MEMBLOCK_DBFILE,
blocks_to_keep=config.getint("txmempool", "blocks_to_keep"),
poll_period=config.getfloat("txmempool", "poll_period")):
self.state = None
self.blockworker = None
self.dbfile = dbfile
self.blocks_to_keep = blocks_to_keep
self.poll_period = poll_period
super(TxMempool, self).__init__()
@StoppableThread.auto_restart(60)
def run(self):
"""Target function of the thread.
Updates mempool every poll_period seconds.
"""
logger.info("Starting TxMempool with {} blocks_to_keep.".
format(self.blocks_to_keep))
logger.info("memblock dbfile is at {}".format(self.dbfile))
self.blockworker = WorkerThread(self.process_blocks)
self.blockworker.start()
try:
self.state = get_mempool_state()
while not self.is_stopped():
self.update()
self.sleep(self.poll_period)
finally:
self.blockworker.stop()
self.state = None
logger.info("TxMempool stopped.")
def update(self):
"""Update the mempool state.
If block height has increased, call self.process_blocks through
blockworker thread.
"""
newstate = get_mempool_state()
if newstate.height > self.state.height:
self.blockworker.put(self.state, newstate)
self.state = newstate
logger.debug(repr(newstate))
return newstate
def process_blocks(self, prevstate, newstate):
"""Record the mempool state in a MemBlock.
This is called in self.blockworker.run.
"""
# Make a copy because we are going to mutate it
prevstate = copy(prevstate)
memblocks = []
while prevstate.height < newstate.height:
memblock = MemBlock()
memblock.record_block(prevstate)
memblocks.append(memblock)
# The set of transactions that were removed from the mempool, yet
# were not included in a block.
conflicts = (prevstate - newstate).entries
conflicts_size = sum([entry.size for entry in conflicts.values()])
for txid in conflicts:
# For the first block, label the MemBlock entries that are
# conflicts. Assume the conflict was removed after the first
# block, so remove them from the remaining blocks.
memblocks[0].entries[txid].isconflict = True
for memblock in memblocks[1:]:
del memblock.entries[txid]
if len(conflicts):
logger.info("process_blocks: {} conflicts ({} bytes) removed.".
format(len(conflicts), conflicts_size))
if conflicts_size > 10000:
# If many conflicts are removed, it can screw up the txsource
# estimation; so log a warning.
logger.warning("process_blocks: {} bytes of conflicts removed.".
format(conflicts_size))
if self.dbfile and self.is_alive():
for memblock in memblocks:
try:
memblock.write(self.dbfile, self.blocks_to_keep)
except Exception:
logger.exception("MemBlock write/del exception.")
return memblocks
def get_stats(self):
stats = {
"params": {
"poll_period": self.poll_period,
"blocks_to_keep": self.blocks_to_keep
},
"num_memblocks": len(MemBlock.get_heights())
}
state = self.state
if state is not None:
stats.update(state.get_stats())
return stats
def __nonzero__(self):
return self.state is not None
class MempoolState(object):
"""Mempool state.
Comprised of:
height - the block height
entries - dictionary of mempool entries
time - time in seconds
"""
def __init__(self, height, rawmempool):
self.height = height
self.entries = {txid: MemEntry.from_rawentry(rawentry)
for txid, rawentry in rawmempool.iteritems()}
self.time = int(time())
def get_sizefn(self):
entries = sorted(self.entries.values(), key=attrgetter("feerate"),
reverse=True)
sizebyfee = [
(feerate, sum([entry.size for entry in feegroup]))
for feerate, feegroup in groupby(entries, attrgetter("feerate"))]
if not sizebyfee:
return StepFunction([0, 1], [0, 0])
feerates_rev, sizes = zip(*sizebyfee)
cumsize_rev = list(cumsum_gen(sizes))
feerates = list(reversed(feerates_rev))
cumsize = list(reversed(cumsize_rev))
sizefn = StepFunction(feerates, cumsize)
sizefn.addpoint(feerates[-1]+1, 0)
return sizefn
def get_stats(self):
sizefn = self.get_sizefn()
approxfn = sizefn.approx()
feerates_approx, cumsize_approx = zip(*approxfn)
size_with_fee = sizefn(MINRELAYTXFEE)
stats = {
"cumsize": {
"feerates": feerates_approx,
"size": cumsize_approx,
},
"currheight": self.height,
"numtxs": len(self.entries),
"sizewithfee": size_with_fee
}
return stats
def __copy__(self):
cpy = MempoolState(self.height, {})
cpy.entries = {txid: copy(entry)
for txid, entry in self.entries.iteritems()}
cpy.time = self.time
return cpy
def __sub__(self, other):
if not isinstance(other, MempoolState):
raise TypeError("Operands must both be MempoolState instances.")
result = MempoolState(self.height - other.height, {})
result.time = self.time - other.time
result.entries = {
txid: self.entries[txid]
for txid in set(self.entries) - set(other.entries)
}
return result
def __repr__(self):
return "MempoolState(height: {}, entries: {}, time: {})".format(
self.height, len(self.entries), self.time)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
class BaseMemBlock(MempoolState):
"""Independent of DB format."""
def __init__(self):
# The attributes inherited from MempoolState
self.height = None
self.entries = None
self.time = None
# MemBlock specific attributes
self.blockheight = None
self.blocksize = None
def record_block(self, state):
self.height = state.height
self.entries = {txid: copy(entry)
for txid, entry in state.entries.iteritems()}
self.time = state.time
for entry in self.entries.values():
entry.inblock = False
entry.isconflict = False
entry.leadtime = self.time - entry.time
self.blockheight = state.height + 1
block = proxy.getblock(proxy.getblockhash(self.blockheight))
self.blocksize = len(block.serialize())
blockname = BlockMetadata(self.blockheight).get_poolname()
blocktxids = [b2lx(tx.GetHash()) for tx in block.vtx]
entries_inblock = set(self.entries) & set(blocktxids)
for txid in entries_inblock:
self.entries[txid].inblock = True
# Delete it, because state.entries will be used for the next block
# if there are > 1 blocks in this update cycle.
del state.entries[txid]
# Get rid of broken deps, for multiple blocks
for entry in state.entries.values():
entry.depends = filter(lambda dep: dep in state.entries,
entry.depends)
stats = self.calc_stranding_feerate(bootstrap=False)
if stats:
stranding_feerate = stats['sfr']
abovekn = stats['abovekn']
belowkn = stats['belowkn']
else:
stranding_feerate = None
abovekn = None
belowkn = None
blocktext = (
'Block {} ({} bytes) by {}: {}/{} in mempool, '
'SFR/akn/bkn: {}/{}/{}'.format(
self.blockheight, self.blocksize, blockname,
len(entries_inblock), len(blocktxids)-1,
stranding_feerate, abovekn, belowkn))
logger.info(blocktext)
# As a measure of our node's connectivity, we want to note the
# ratio below. If it is low, it means that our node is not being
# informed of many transactions.
if len(blocktxids) > 1:
incl_ratio = len(entries_inblock) / (len(blocktxids)-1)
if incl_ratio < 0.9:
logger.warning("Only {}/{} in block {}.".format(
len(entries_inblock), len(blocktxids)-1,
self.blockheight))
state.height += 1
def calc_stranding_feerate(self, bootstrap=False):
if not self:
raise ValueError("Empty memblock.")
txs = tx_preprocess(self)
if txs:
return calc_stranding_feerate(txs, bootstrap=bootstrap)
return None
def __nonzero__(self):
return self.entries is not None
def __repr__(self):
return "MemBlock(blockheight: %d, blocksize: %d, len(entries): %d)" % (
self.blockheight, self.blocksize, len(self.entries))
def __copy__(self):
raise NotImplementedError
class MemBlock(BaseMemBlock):
'''The mempool state at the time a block was discovered.'''
def write(self, dbfile, blocks_to_keep):
'''Write MemBlock to disk.
blocks_to_keep specifies how many blocks of information should be
retained. All MemBlocks older (with respect to this block) than
blocks_to_keep will be deleted.
'''
if not self:
raise ValueError("Failed write: empty memblock.")
# Temp tables
NONREMOVED = "nonremoved"
MEMBLOCKTXS = "memblocktxs"
db = None
memblocktxids = self.entries.keys()
try:
with db_lock:
db = sqlite3.connect(dbfile)
for key, val in MEMBLOCK_SCHEMA.items():
db.execute('CREATE TABLE IF NOT EXISTS %s (%s)' %
(key, ','.join(val)))
db.execute('CREATE INDEX IF NOT EXISTS heightidx '
'ON txs (heightremoved)')
db.execute('CREATE INDEX IF NOT EXISTS block_heightidx '
'ON blocktxs (blockheight)')
# Enter into blocks
db.execute(
'INSERT INTO blocks VALUES (?,?,?)',
(self.blockheight, self.blocksize, self.time))
# Temporary tables for data manipulation
db.execute(
"CREATE TEMP TABLE {} (id INTEGER, txid TEXT)".
format(NONREMOVED))
db.execute(
"CREATE TEMP TABLE {} "
"(txid TEXT, isconflict INTEGER, inblock INTEGER)".
format(MEMBLOCKTXS))
# Fetch the nonremoved txs
db.execute(
"INSERT INTO {} "
"SELECT id, txid FROM txs "
"WHERE heightremoved IS NULL".format(NONREMOVED)
)
# Table the memblocktxs
db.executemany(
"INSERT INTO {} VALUES (?,?,?)".format(MEMBLOCKTXS),
[(txid,
self.entries[txid].isconflict,
self.entries[txid].inblock)
for txid in memblocktxids])
# Update the heightremoved
db.execute(
"UPDATE txs SET heightremoved=? "
"WHERE id IN "
"(SELECT id FROM {0} LEFT JOIN {1} "
" ON {0}.txid={1}.txid WHERE "
" {1}.isconflict=1 OR "
" {1}.inblock=1 OR "
" {1}.inblock is NULL)".
format(NONREMOVED, MEMBLOCKTXS),
(self.blockheight,)
)
# Get the new txs to table
txidstoenter = db.execute(
"SELECT txid FROM {} EXCEPT SELECT txid FROM {}".
format(MEMBLOCKTXS, NONREMOVED)
)
txstoenter = [
(
txid,
self.entries[txid].size,
str(self.entries[txid].fee),
str(self.entries[txid].startingpriority),
self.entries[txid].time,
self.entries[txid].height,
','.join(self.entries[txid].depends),
self.entries[txid].feerate,
self.blockheight if (
self.entries[txid].isconflict or
self.entries[txid].inblock)
else None
)
for txid in map(itemgetter(0), txidstoenter)
]
# Enter new txs. There might be duplicate txid,
# but that's OK!
db.executemany(
"INSERT INTO txs(txid, size, fee, startingpriority, "
"time, height, depends, feerate, heightremoved) "
"VALUES (?,?,?,?,?,?,?,?,?)", txstoenter)
# Get the rowids, to enter into blocktxs
finaltxs = db.execute(
"SELECT id, txid FROM txs WHERE "
"heightremoved IS NULL OR "
"heightremoved=?",
(self.blockheight,)
).fetchall()
rowidmap = {txid: rowid for rowid, txid in finaltxs}
# Assert that there are no duplicate txids
assert len(finaltxs) == len(set(map(itemgetter(1), finaltxs)))
# Enter into blocktxs
blocktxstoenter = [(
self.blockheight,
rowidmap[txid],
str(self.entries[txid].currentpriority),
self.entries[txid].isconflict,
self.entries[txid].inblock)
for txid in memblocktxids
]
db.executemany("INSERT INTO blocktxs VALUES (?,?,?,?,?)",
blocktxstoenter)
# Remove old blocks
if blocks_to_keep > 0:
height_thresh = self.blockheight - blocks_to_keep
db.execute("DELETE FROM txs WHERE heightremoved<=?",
(height_thresh,))
db.execute("DELETE FROM blocks WHERE height<=?",
(height_thresh,))
db.execute("DELETE FROM blocktxs WHERE blockheight<=?",
(height_thresh,))
db.commit()
finally:
if db is not None:
db.close()
@classmethod
def read(cls, blockheight, dbfile=MEMBLOCK_DBFILE):
'''Read MemBlock from disk.
Returns the memblock with specified blockheight.
Returns None if no record exists for that block.
Raises one of the sqlite3 errors if there are other problems.
'''
if not os.path.exists(dbfile):
return None
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
block = db.execute('SELECT size, time FROM blocks '
'WHERE height=?',
(blockheight,)).fetchall()
txlist = db.execute(
"SELECT "
" txid,"
" size,"
" fee,"
" startingpriority,"
" currentpriority,"
" time,"
" height,"
" depends,"
" feerate,"
" isconflict,"
" inblock "
"FROM blocktxs LEFT JOIN txs ON blocktxs.txrowid=txs.id "
"WHERE blockheight=?",
(blockheight,)).fetchall()
finally:
if db is not None:
db.close()
# Make sure there are no missing txs.
txids = map(itemgetter(0), txlist)
assert not any([txid is None for txid in txids])
if block:
blocksize, blocktime = block[0]
else:
return None
memblock = cls()
memblock.height = blockheight - 1
entries = {}
for tx in txlist:
entry = MemEntry()
entry.size = tx[1]
entry.fee = decimal.Decimal(tx[2])
entry.startingpriority = decimal.Decimal(tx[3])
entry.currentpriority = decimal.Decimal(tx[4])
entry.time = tx[5]
entry.height = tx[6]
entry.depends = tx[7].split(",") if tx[7] else []
# We need to do this because depends is recorded upon first sight
# of the tx; some deps might have confirmed in the meantime
entry.depends = filter(lambda dep: dep in txids, entry.depends)
entry.feerate = tx[8]
entry.isconflict = bool(tx[9])
entry.inblock = bool(tx[10])
entry.leadtime = blocktime - tx[5]
entries[tx[0]] = entry
memblock.entries = entries
memblock.time = blocktime
memblock.blockheight = blockheight
memblock.blocksize = blocksize
return memblock
@staticmethod
def get_heights(blockrangetuple=None, dbfile=MEMBLOCK_DBFILE):
'''Get the list of MemBlocks stored on disk.
Returns a list of heights of all MemBlocks on disk within
range(*blockrangetuple)
'''
if not os.path.exists(dbfile):
return []
if blockrangetuple is None:
blockrangetuple = (0, float("inf"))
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
heights = db.execute(
'SELECT height FROM blocks '
'where height>=? and height <?',
blockrangetuple).fetchall()
finally:
if db is not None:
db.close()
return [r[0] for r in heights]
# TODO: Remove this when transition to new db is complete.
class OldMemBlock(BaseMemBlock):
'''The mempool state at the time a block was discovered.'''
def write(self, dbfile, blocks_to_keep):
'''Write MemBlock to disk.
blocks_to_keep specifies how many blocks of information should be
retained. All MemBlocks older (with respect to this block) than
blocks_to_keep will be deleted.
'''
if not self:
raise ValueError("Failed write: empty memblock.")
db = None
try:
db = sqlite3.connect(dbfile)
for key, val in OLD_MEMBLOCK_TABLE_SCHEMA.items():
db.execute('CREATE TABLE IF NOT EXISTS %s (%s)' %
(key, ','.join(val)))
db.execute('CREATE INDEX IF NOT EXISTS heightidx '
'ON txs (blockheight)')
db.execute('CREATE INDEX IF NOT EXISTS blocks_heightidx '
'ON blocks (height)')
with db_lock:
with db:
db.execute(
'INSERT INTO blocks VALUES (?,?,?)',
(self.blockheight, self.blocksize, self.time))
db.executemany(
'INSERT INTO txs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',
[(self.blockheight, txid) + entry._get_attr_tuple()
for txid, entry in self.entries.iteritems()])
if blocks_to_keep > 0:
height_thresh = self.blockheight - blocks_to_keep
with db:
db.execute('DELETE FROM blocks WHERE height<=?',
(height_thresh,))
db.execute('DELETE FROM txs WHERE blockheight<=?',
(height_thresh,))
finally:
if db is not None:
db.close()
@classmethod
def read(cls, blockheight, dbfile=MEMBLOCK_DBFILE):
'''Read MemBlock from disk.
Returns the memblock with specified blockheight.
Returns None if no record exists for that block.
Raises one of the sqlite3 errors if there are other problems.
'''
if not os.path.exists(dbfile):
return None
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
block = db.execute('SELECT size, time FROM blocks '
'WHERE height=?',
(blockheight,)).fetchall()
txlist = db.execute('SELECT * FROM txs WHERE blockheight=?',
(blockheight,)).fetchall()
finally:
if db is not None:
db.close()
if block:
blocksize, blocktime = block[0]
else:
return None
memblock = cls()
memblock.height = blockheight - 1
memblock.entries = {
tx[1]: MemEntry._from_attr_tuple(tx[2:]) for tx in txlist}
memblock.time = blocktime
memblock.blockheight = blockheight
memblock.blocksize = blocksize
return memblock
@staticmethod
def get_heights(blockrangetuple=None, dbfile=MEMBLOCK_DBFILE):
'''Get the list of MemBlocks stored on disk.
Returns a list of heights of all MemBlocks on disk within
range(*blockrangetuple)
'''
if not os.path.exists(dbfile):
return []
if blockrangetuple is None:
blockrangetuple = (0, float("inf"))
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
heights = db.execute(
'SELECT height FROM blocks '
'where height>=? and height <?',
blockrangetuple).fetchall()
finally:
if db is not None:
db.close()
return [r[0] for r in heights]
class MemEntry(SimEntry):
'''Represents a mempool entry.
This is basically the data returned by getrawmempool, but with additional
attributes if it is associated with a MemBlock:
inblock - whether or not the transaction was included in the block
leadtime - difference between block discovery time and mempool entry
time
isconflict - whether or not the transaction is a conflict, meaning
that it was subsequently removed from the mempool as a
result of being invalidated by some other transaction
in the block.
In addition, for convenience we compute and store the feerate (satoshis
per kB of transaction size).
Also, care is taken not to mutate the rawmempool_entry input.
'''
def __init__(self):
super(MemEntry, self).__init__(None, None)
self.fee = None
self.startingpriority = None
self.currentpriority = None
self.time = None
self.height = None
self.leadtime = None
self.isconflict = None
self.inblock = None
def is_high_priority(self):
'''Check if entry is high priority.
Returns True if entry is considered high priority by Bitcoin Core
with regard to priority inclusion in the next block.
Ideally this should simply return
(entry.currentpriority > prioritythresh), however, currentpriority,
as obtained by RPC, uses the current height, whereas miners in forming
a new block use the current height + 1, i.e. the height of the new
block. So currentpriority underestimates the 'true' mining priority.
(There are other complications, in that currentpriority doesn't take
into account cases where the entry has mempool dependencies, but
that's a different problem, which we live with for now.)
This difference is important because, for the purposes of minfeerate
policy estimation, we need to properly exclude all high priority
transactions. Previously in v0.9 of Bitcoin Core, this wasn't such a
big problem, because low priority transactions below minrelaytxfee
are still relayed / enter the mempool; there are thus sufficient
low-fee, low-priority transactions so that the minfeerate threshold
is still estimatable in a consistent manner.
With v0.10, however, only high (miners') priority transactions are
allowed into the mempool if the tx has low fee. If one relies on the
criteria (entry.currentpriority > prioritythresh), there will be false
negatives; however because there aren't any more truly low-priority
transactions with similar feerate, the minfeerate estimation can
become inconsistent.
It's not possible, however, to adjust entry.currentpriority to become
the miners' priority, solely from the information obtained from
getrawmempool. Therefore, we resort to this hack: the entry is classed
as high priority if (entry.currentpriority > prioritythresh) or
(entry.feerate < minrelaytxfee).
'''
return (self.currentpriority > PRIORITYTHRESH or
self.feerate < MINRELAYTXFEE)
# TODO: deprecate this
def _get_attr_tuple(self):
'''Get tuple of attributes.
Used when writing MemBlock to disk.
'''
for attr in ['leadtime', 'isconflict', 'inblock']:
if getattr(self, attr) is None:
raise ValueError("MemEntry not yet processed with MemBlock.")
attr_tuple = (
self.size,
str(self.fee),
str(self.startingpriority),
str(self.currentpriority),
self.time,
self.height,
','.join(self.depends),
self.feerate,
self.leadtime,
self.isconflict,
self.inblock
)
return attr_tuple
# TODO: deprecate this
@classmethod
def _from_attr_tuple(cls, tup):
'''Return MemEntry from attribute tuple.
Used when reading MemBlock from disk.
'''
entry = cls()
(
entry.size,
entry.fee,
entry.startingpriority,
entry.currentpriority,
entry.time,
entry.height,
entry.depends,
entry.feerate,
entry.leadtime,
entry.isconflict,
entry.inblock
) = tup
entry.fee = decimal.Decimal(entry.fee)
entry.currentpriority = decimal.Decimal(entry.currentpriority)
entry.startingpriority = decimal.Decimal(entry.startingpriority)
entry.depends = entry.depends.split(',') if entry.depends else []
entry.isconflict = bool(entry.isconflict)
entry.inblock = bool(entry.inblock)
return entry
@classmethod
def from_rawentry(cls, rawentry):
'''Return MemEntry from rawmempool dict.
rawentry is a value in the dict returned by
proxy.getrawmempool(verbose=True).
'''
entry = cls()
for attr in rawentry:
setattr(entry, attr, rawentry[attr])
entry.depends = entry.depends[:]
entry.feerate = get_feerate(rawentry)
return entry
def __copy__(self):
cpy = MemEntry()
for attr in self.__dict__:
setattr(cpy, attr, getattr(self, attr))
cpy.depends = cpy.depends[:]
return cpy
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def get_mempool_state():
starttime = time()
state = MempoolState(*proxy.poll_mempool())
elapsedtime = time() - starttime
time_msg = "get_mempool_state took {}s.".format(elapsedtime)
logger.debug(time_msg)
if elapsedtime > 15:
logger.warning(time_msg)
return state
| mit | 5,300,208,427,413,970,000 | 35.809195 | 79 | 0.548589 | false |
lukovnikov/teafacto | teafacto/scripts/simplequestions/subjdetchar.py | 1 | 5550 | import numpy as np
from teafacto.blocks.seq.oldseqproc import SimpleVec2Idx, MemVec2Idx
from teafacto.blocks.seq.enc import SimpleSeq2Vec
from teafacto.blocks.seq.enc import Seq2Idx
from teafacto.blocks.lang.wordvec import Glove
from teafacto.blocks.memory import LinearGateMemAddr, DotMemAddr
from teafacto.scripts.simplequestions.subjdet import _readdata, ents2labels
from teafacto.util import argprun
def readdata(p):
train, valid, test, x, newdic = _readdata(p)
return train, valid, test, x["chardic"], newdic
def getmemdata(reldic, worddic, labelp="../../../data/simplequestions/labels.map"): # updates worddic with words found in relation
rels = ents2labels(labelp, reldic)
allrelwords = set()
maxlen = 0
prevc = -1
for rel, c in rels:
assert(c-1 == prevc)
prevc = c
maxlen = max(maxlen, len(rel))
for relw in rel:
allrelwords.add(relw)
relwordsnotinworddic = allrelwords.difference(set(worddic.keys()))
for rwniw in relwordsnotinworddic:
worddic[rwniw] = len(worddic)
ret = [[worddic[w] for w in rel] for (rel, _) in rels]
retmat = np.zeros((len(rels), maxlen)).astype("int32") - 1
i = 0
for r in ret:
retmat[i, :len(r)] = r
i += 1
return retmat
def toglove(wordmat, worddic, dim=50):
g = Glove(dim)
gws = set(g.D.keys())
wdws = set(worddic.keys())
diff = wdws.difference(gws)
# gather states about diff
diffcounts = {worddic[k]: 0 for k in diff}
total = 0
moretal = 0
for i in range(wordmat.shape[0]):
for j in range(wordmat.shape[1]):
if wordmat[i, j] >= 0:
total += 1
if wordmat[i, j] in diffcounts:
diffcounts[wordmat[i, j]] += 1
moretal += 1
diffcounts = sorted(diffcounts.items(), key=lambda (k, v): v, reverse=True)
print "%d words unknown by Glove of %d total words" % (moretal, total)
revdic = {v: k for k, v in worddic.items()}
d2g = lambda x: g * revdic[x] if x in revdic else x
newdic = {k: d2g(v) for k, v in worddic.items()}
newmat = np.vectorize(d2g)(wordmat)
revgdic = {v: k for k, v in g.D.items()}
#embed()
def getdic2glove(worddic, dim=50):
g = Glove(dim)
revdic = {v: k for k, v in worddic.items()}
d2g = lambda x: g * revdic[x] if x in revdic else x
newdic = {k: d2g(v) for k, v in worddic.items()}
return d2g, newdic
def getcharmemdata(reldic, chardic, maxchar=70, maxwords=30,
labelp="../../../data/simplequestions/labels.map"):
rels = ents2labels(labelp, reldic, maxwords=maxwords)
rels = map(lambda (x, y): (" ".join(x), y), rels)
maxlen = 0
prevc = -1
allrelchars = set()
for rel, c in rels:
assert(c-1 == prevc)
prevc = c
maxlen = max(maxlen, len(rel))
for relchar in rel:
allrelchars.add(relchar)
charsnotinchardic = allrelchars.difference(set(chardic.keys()))
nextid = 0
for cnic in charsnotinchardic:
while nextid in chardic.values():
nextid += 1
chardic[cnic] = nextid
maxlen = min(maxlen, maxchar)
retmat = np.zeros((len(rels), maxlen)).astype("int32") - 1
for rel, k in rels:
rel = [chardic[c] for c in rel[:min(len(rel), maxchar)]]
retmat[k, :len(rel)] = rel
return retmat
def evaluate(pred, gold):
return np.sum(gold == pred) * 100. / gold.shape[0]
def run(
epochs=10,
numbats=100,
numsam=10000,
lr=0.1,
datap="../../../data/simplequestions/datamat.char.pkl",
innerdim=200,
wreg=0.00005,
bidir=False,
keepmincount=5,
mem=False,
sameenc=False,
memaddr="dot",
memattdim=100,
membidir=False,
memlayers=1,
memmaxwords=5,
memmaxchars=20,
layers=1,
):
(traindata, traingold), (validdata, validgold), (testdata, testgold), chardic, entdic\
= readdata(datap)
if mem:
memdata = getcharmemdata(entdic, chardic, maxwords=memmaxwords, maxchar=memmaxchars)
print traindata.shape, testdata.shape
numchars = max(chardic.values()) + 1
numrels = max(entdic.values()) + 1
print numchars, numrels
if bidir:
encinnerdim = [innerdim/2]*layers
else:
encinnerdim = [innerdim]*layers
enc = SimpleSeq2Vec(indim=numchars, inpembdim=None, innerdim=encinnerdim, maskid=-1, bidir=bidir)
if mem:
if membidir:
innerdim = [innerdim/2]*memlayers
else:
innerdim = [innerdim]*memlayers
memindim = numchars
memenc = SimpleSeq2Vec(indim=memindim, inpembdim=None, innerdim=innerdim, maskid=-1, bidir=membidir)
if memaddr is None or memaddr == "dot":
memaddr = DotMemAddr
elif memaddr == "lin":
memaddr = LinearGateMemAddr
dec = MemVec2Idx(memenc, memdata, memdim=innerdim, memaddr=memaddr, memattdim=memattdim)
else:
dec = SimpleVec2Idx(indim=innerdim, outdim=numrels)
m = Seq2Idx(enc, dec)
m = m.train([traindata], traingold).adagrad(lr=lr).l2(wreg).grad_total_norm(1.0).cross_entropy()\
.validate_on([validdata], validgold).accuracy().cross_entropy().takebest()\
.train(numbats=numbats, epochs=epochs)
pred = m.predict(testdata)
print pred.shape
evalres = evaluate(np.argmax(pred, axis=1), testgold)
print str(evalres) + "%"
if __name__ == "__main__":
argprun(run) | mit | -4,715,588,283,602,784,000 | 31.273256 | 133 | 0.609369 | false |
mvaled/sentry | src/sentry/reprocessing.py | 1 | 2078 | from __future__ import absolute_import
import uuid
REPROCESSING_OPTION = "sentry:processing-rev"
def get_reprocessing_revision(project, cached=True):
"""Returns the current revision of the projects reprocessing config set."""
from sentry.models import ProjectOption, Project
if cached:
return ProjectOption.objects.get_value(project, REPROCESSING_OPTION)
try:
if isinstance(project, Project):
project = project.id
return ProjectOption.objects.get(project=project, key=REPROCESSING_OPTION).value
except ProjectOption.DoesNotExist:
pass
def bump_reprocessing_revision(project):
"""Bumps the reprocessing revision."""
from sentry.models import ProjectOption
rev = uuid.uuid4().hex
ProjectOption.objects.set_value(project, REPROCESSING_OPTION, rev)
return rev
def report_processing_issue(event_data, scope, object=None, type=None, data=None):
"""Reports a processing issue for a given scope and object. Per
scope/object combination only one issue can be recorded where the last
one reported wins.
"""
if object is None:
object = "*"
if type is None:
from sentry.models import EventError
type = EventError.INVALID_DATA
uid = "%s:%s" % (scope, object)
event_data.setdefault("processing_issues", {})[uid] = {
"scope": scope,
"object": object,
"type": type,
"data": data,
}
def resolve_processing_issue(project, scope, object=None, type=None):
"""Given a project, scope and object (and optionally a type) this marks
affected processing issues are resolved and kicks off a task to move
events back to reprocessing.
"""
if object is None:
object = "*"
from sentry.models import ProcessingIssue
ProcessingIssue.objects.resolve_processing_issue(
project=project, scope=scope, object=object, type=type
)
def trigger_reprocessing(project):
from sentry.tasks.reprocessing import reprocess_events
reprocess_events.delay(project_id=project.id)
| bsd-3-clause | -1,081,322,319,088,191,900 | 29.115942 | 88 | 0.689124 | false |
patpatpatpatpat/pycolfin | pycolfin/cli.py | 1 | 1716 | # -*- coding: utf-8 -*-
import os
from getpass import getpass
import click
from .pycolfin import COLFin
verbosity_help = """
1 = User ID, Last Login
2 = Display all info from 1 and portfolio summary
3 = Display all info in 1 & 2 and detailed portfolio
"""
use_env_vars_help = """
Use USER_ID and PASSWORD from environment variables.
Not recommended if you are using a shared computer!
(This is like storing bank credentials in a text file)
"""
@click.command()
@click.option('--use-env-vars', is_flag=True, default=False, help=use_env_vars_help)
@click.option('-v', '--verbosity', default=3, type=click.IntRange(1, 3), help=verbosity_help)
def main(verbosity, use_env_vars):
if use_env_vars:
try:
user_id = os.environ['USER_ID']
password = os.environ['PASSWORD']
except KeyError:
click.echo('USER_ID and PASSWORD not found in environment variables!')
exit()
else:
user_id = getpass(prompt='User ID:')
password = getpass(prompt='Password:')
try:
account = COLFin(user_id, password, parser='html.parser')
except Exception as e:
click.echo(e.__str__())
exit()
if verbosity >= 1:
account.fetch_account_summary()
if verbosity >= 2:
account.fetch_portfolio_summary()
account.show_portfolio_summary()
if verbosity == 3:
account.fetch_detailed_portfolio()
try:
account.show_detailed_stocks()
except Exception as e:
print(e)
try:
account.show_detailed_mutual_fund()
except Exception as e:
print(e)
account.show_account_summary()
if __name__ == "__main__":
main()
| mit | -6,550,584,323,671,788,000 | 26.677419 | 93 | 0.61655 | false |
3324fr/spinalcordtoolbox | dev/sct_register_multimodal_old/msct_register.py | 1 | 35723 | #!/usr/bin/env python
#
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Tanguy Magnan
# Modified: 2015-07-29
#
# License: see the LICENSE.TXT
#=======================================================================================================================
#
import sys, commands
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from sct_register_multimodal import Paramreg
def register_slicereg2d_pointwise(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='seg', algo='slicereg2d_pointwise', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, verbose=0):
"""Slice-by-slice regularized registration by translation of two segmentations.
First we estimate for each slice the translation vector by calculating the difference of position of the two centers of
mass of the two segmentations. Then we remove outliers using Median Absolute Deviation technique (MAD) and smooth
the translation along x and y axis using moving average hanning window. Eventually, we generate two warping fields
(forward and inverse) resulting from this regularized registration technique.
The segmentations must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
warp_forward_out: name of output forward warp (type: string)
warp_inverse_out: name of output inverse warp (type: string)
factor: sensibility factor for outlier detection (higher the factor, smaller the detection) (type: int or float)
verbose: display parameter (type: int, value: 0,1 or 2)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
if paramreg.type != 'seg':
print '\nERROR: Algorithm slicereg2d_pointwise only operates for segmentation type.'
sys.exit(2)
else:
from msct_register_regularized import register_seg, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_seg(fname_source, fname_dest)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out) #name_warp= 'step'+str(paramreg.step)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_translation(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Translation', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration by translation of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations along x and y axis using moving average
hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this regularized
registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_rigid(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Rigid', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (rigid) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations and angle of rotation along x and y axis using
moving average hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this
regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp, theta_rot = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
theta_rot_a = asarray(theta_rot)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_theta_a = outliers_detection(theta_rot_a, type='median', factor=2, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
theta_rot_a_no_outliers = outliers_completion(mask_theta_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
theta_rot_smooth = smoothing_window(theta_rot_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, theta_rot_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, -theta_rot_smooth, fname=warp_inverse_out)
def register_slicereg2d_affine(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Affine', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (affine) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: affine) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total_step_'+str(paramreg.step) # 'Warp_total'
# Calculate displacement
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
'''
from sct_maths import multicomponent_merge
from msct_image import Image
data_warp_smooth = multicomponent_merge([data_warp_x_smooth, data_warp_y_smooth])[0]
hdr_warp.set_intent('vector', (), '')
warp_smooth = Image(param=data_warp_smooth, absolutepath=warp_forward_out, hdr=hdr_warp)
warp_smooth.save()
data_warp_smooth_inverse = multicomponent_merge([data_warp_x_smooth_inverse, data_warp_y_smooth_inverse])[0]
hdr_warp_inverse.set_intent('vector', (), '')
warp_smooth_inverse = Image(param=data_warp_smooth_inverse, absolutepath=warp_inverse_out, hdr=hdr_warp_inverse)
warp_smooth_inverse.save()
'''
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_syn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='SyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (syn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: syn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
im_warp_x = Image(name_warp_syn + '_x.nii.gz')
data_warp_x = im_warp_x.data
im_warp_y = Image(name_warp_syn + '_y.nii.gz')
data_warp_y = im_warp_y.data
hdr_warp = im_warp_x.hdr
# data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
# data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
# hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
im_warp_x_inverse = Image(name_warp_syn + '_x_inverse.nii.gz')
data_warp_x_inverse = im_warp_x_inverse.data
im_warp_y_inverse = Image(name_warp_syn + '_y_inverse.nii.gz')
data_warp_y_inverse = im_warp_y_inverse.data
hdr_warp_inverse = im_warp_x_inverse.hdr
# data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
# data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
# hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_bsplinesyn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='BSplineSyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (bsplinesyn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: bsplinesyn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
from msct_image import Image
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
| mit | 6,486,023,166,097,411,000 | 72.807851 | 220 | 0.683229 | false |
privb0x23/bliss-initramfs | pkg/hooks/Base.py | 1 | 2070 | # Copyright 2012-2017 Jonathan Vasquez <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from pkg.hooks.Hook import Hook
from pkg.libs.Tools import Tools
class Base(Hook):
@classmethod
# Returns the kmod links
def GetKmodLinks(cls):
return cls._kmod_links
_files = [
# sys-apps/busybox
"/bin/busybox",
# sys-apps/kmod
Tools.GetProgramPath("kmod"),
# app-shells/bash
"/bin/bash",
# sys-apps/grep
"/bin/egrep",
"/bin/fgrep",
"/bin/grep",
# sys-apps/kbd,
"/usr/bin/loadkeys",
# udev
Tools.GetUdevPath(),
Tools.GetProgramPath("udevadm"),
]
_kmod_links = [
"depmod",
"insmod",
"lsmod",
"modinfo",
"modprobe",
"rmmod",
]
| bsd-2-clause | -8,870,375,991,799,534,000 | 31.857143 | 82 | 0.68599 | false |
divyamamgai/UdacityProjectMovieTrailerWebsite | media.py | 1 | 3327 | import webbrowser
import re
import urllib.request
import urllib.parse
import json
# as keyword creates an alias for xml.etree.ElementTree as ET, so we can use ET to reference xml.etree.ElementTree.
import xml.etree.ElementTree as eT
class Movie:
def __init__(self, title, plot, poster, trailer_url):
self.title = title
self.plot = plot
self.poster_url = poster
self.trailer_url = trailer_url
def show_trailer(self):
# Opens the Web Browser with the trailer URL of the movie.
webbrowser.open(self.trailer_url)
@staticmethod
def generate_traileraddict_id(title):
# Firstly strip the title to remove excess white spaces surrounding the text.
# Secondly remove all the non-alphabet and non-numeric characters from the title.
# Thirdly convert the result to lowercase and convert all the white spaces (even the groups) to dash.
return re.sub(r"(\s|-)+", "-", (re.sub(r"[^a-zA-Z0-9\s\-]", "", title.strip())).lower())
@staticmethod
def initialize_from_title(title, traileraddict_trailer_type="trailer"):
print("Requesting information for the movie '" + title + "' from omdb...")
# Make API request to omdb to get movie information.
omdb_api_connection = urllib.request.urlopen("http://www.omdbapi.com/?t=" + urllib.parse.quote(title))
omdb_api_response = omdb_api_connection.read()
omdb_api_connection.close()
# http.client.HTTPResponse.read() returns raw bytes which is needed to be converted
# to string using UTF-8 encoding.
omdb_movie_data = json.loads(omdb_api_response.decode("utf-8"))
# Check whether the movie was found or not.
if omdb_movie_data["Response"] == "True":
print("Movie information found successfully!")
print("Requesting trailer for the movie '" + title + "' from Trailer Addict...")
# Make API request to Trailer Addict to get movie trailer.
traileraddict_api_connection = urllib.request.urlopen("http://simpleapi.traileraddict.com/" +
Movie.generate_traileraddict_id(title) +
"/" + traileraddict_trailer_type)
traileraddict_api_response = traileraddict_api_connection.read()
traileraddict_api_connection.close()
# Parse XML returned as response from the Trailer Addict API.
traileraddict_xml_root = eT.fromstring(traileraddict_api_response.decode("utf-8"))
# In the Trailer Addict Simple API first element of the root is trailer which
# contains the desired movie data. Inside that trailer element is an tag, embed_standard,
# which contains embed HTML code for the trailer. We parse the embed HTML code to get
# the movie trailer URL from the src attribute of the iframe element.
trailer_url = eT.fromstring(traileraddict_xml_root[0].find("embed_standard").text).attrib["src"]
print("Movie trailer found successfully!")
movie = Movie(title, omdb_movie_data["Plot"], omdb_movie_data["Poster"], trailer_url)
return movie
else:
print("Movie not found!")
return None
| mit | 3,126,210,087,341,528,000 | 54.45 | 115 | 0.637211 | false |
mattsmart/biomodels | transcriptome_clustering/spectrums.py | 1 | 9188 | import matplotlib.pyplot as plt
import numpy as np
import os
from inference import choose_J_from_general_form, infer_interactions, error_fn
from settings import FOLDER_OUTPUT
from visualize_matrix import plot_matrix
def get_spectrum_from_arr(J, real=True, sort=True):
# TODO deal with massive complex part if necessary
eig, V = np.linalg.eig(J)
if real:
eig = np.real(eig)
if sort:
eig = np.sort(eig)
return eig
def get_spectrums(C, D, num_spectrums=10, method='U_data', print_errors=True):
"""
Returns J's (generated from method) and their spectrums and their labels
J's returned as list of arrays
Shape is num_spectrums X dim_spectrum
"""
assert method in ['U_data', 'U_lyap', 'infer_data', 'infer_lyap']
spectrums = np.zeros((num_spectrums, D.shape[0]))
list_of_J = [0]*num_spectrums
# generate spectrum labels
if method[0] == 'U':
scales = np.linspace(0, 0.009, num_spectrums)
labels = ['scale_%.3f' % i for i in scales]
else:
alphas = np.logspace(-10, -1, num_spectrums)
labels = ['alpha_%.2e' % a for a in alphas]
for idx in xrange(num_spectrums):
if method[0] == 'U':
J = choose_J_from_general_form(C, D, scale=scales[idx])
else:
J = infer_interactions(C, D, alpha=alphas[idx])
if print_errors:
err = error_fn(C, D, J)
print "Error in method %s, idx %d, is %.3f (alpha=%.2e)" % (method, idx, err, alphas[idx])
list_of_J[idx] = J
spectrums[idx, :] = get_spectrum_from_arr(J, real=True)
return list_of_J, spectrums, labels
def get_J_truncated_spectrum(J, idx):
"""
Given an idx, removes row/col idx of J and computes the spectrum of the new (n-1)*(n-1) array
"""
J_reduce = J.copy()
J_reduce = np.delete(J_reduce, (idx), axis=0)
J_reduce = np.delete(J_reduce, (idx), axis=1)
return get_spectrum_from_arr(J_reduce, real=True)
def scan_J_truncations(J, verbose=False, spectrum_unperturbed=None):
"""
Given a Jacobian matrix J
(1) compute the spectrum
(2) assess if the spectrum is a suitable starting point
(3) iteratively delete all row/col pairs and compute spectrum of each
(4) for each row/col pair, report if the spectrum has been sufficiently perturbed
"""
assert J.shape[0] == J.shape[1]
n = J.shape[0]
if spectrum_unperturbed is None:
spectrum_unperturbed = get_spectrum_from_arr(J, real=True)
spectrums_perturbed = np.zeros((n, n-1))
if verbose:
print 'unperturbed', '\n', spectrum_unperturbed
for idx in xrange(n):
spectrum_idx = get_J_truncated_spectrum(J, idx)
spectrums_perturbed[idx, :] = spectrum_idx
if verbose:
print idx, '\n', spectrum_idx
return spectrum_unperturbed, spectrums_perturbed
def gene_control_scores(spectrum_unperturbed, spectrums_perturbed, fixed_denom=None, use_min=True):
"""
See Sid 2018 draft for idea
"""
if use_min:
cg = np.min(spectrums_perturbed, axis=1) # numerator left term
cg = cg - np.min(spectrum_unperturbed) # numerator full
else:
cg = np.max(spectrums_perturbed, axis=1) # numerator left term
cg = np.max(spectrum_unperturbed) - cg # numerator full (note swap for index positivity)
if fixed_denom is not None:
assert fixed_denom.shape == spectrum_unperturbed.shape
cg = np.divide(cg, fixed_denom)
else:
cg = cg / np.sqrt(np.mean((spectrum_unperturbed ** 2)))
return cg
def plot_spectrum_hists(spectrums, labels, method='U', hist='default', title_mod='', plotdir=FOLDER_OUTPUT, show=False):
# TODO fix x axis range -6 6
# TODO remove method from title since not used
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('Sample name')
f = plt.figure(figsize=(10, 6))
if hist == 'default':
# plot first spectrum to get bins
_, bins, _ = plt.hist(spectrums[0, :], bins=10, range=[-6, 6], alpha=0.5, normed=True, label=labels[0])
for idx in xrange(1, len(labels)):
_ = plt.hist(spectrums[idx, :], bins=bins, alpha=0.5, normed=True, label=labels[idx])
plt.xlabel('Re(lambda)')
plt.ylabel('Spectrums')
elif hist == 'violin':
print 'hist type %s not yet implemented in plot_spectrum_hists(...)' % hist
plt.violinplot(spectrums.T, showmeans=False, showmedians=True)
set_axis_style(plt.gca(), labels)
plt.ylabel('Re(lambda)')
else:
print 'hist type %s not supported in plot_spectrum_hists(...)' % hist
assert 1==2
plt.title('Spectrums from %s %s' % (method, title_mod))
plt.legend()
plt.savefig(plotdir + os.sep + 'spectrum_hist_%s_%s_%s.png' % (hist, method, title_mod))
if show:
plt.show()
return
def plot_rank_order_spectrum(spectrum, method='U', title_mod='', plotdir=FOLDER_OUTPUT, show=False):
f = plt.figure(figsize=(10, 6))
sorted_spectrums_low_to_high = np.sort(spectrum)
sorted_spectrums_high_to_low = sorted_spectrums_low_to_high[::-1]
plt.bar(range(len(sorted_spectrums_high_to_low)), sorted_spectrums_high_to_low)
plt.axhline(0.0, linewidth=1.0, color='k')
plt.ylabel('Re(lambda)')
plt.xlabel('Eigenvalue ranking')
plt.title('Spectrum from %s %s' % (method, title_mod))
plt.savefig(plotdir + os.sep + 'spectrum_ranking_%s_%s.pdf' % (method, title_mod))
if show:
plt.show()
return
def plot_spectrum_extremes(spectrum_unperturbed, spectrums_perturbed, method='U', title_mod='', plotdir=FOLDER_OUTPUT, show=False, max=True):
n = len(spectrum_unperturbed)
bar_width = 0.45
plt.close('all')
f = plt.figure(figsize=(10, 6))
ax = plt.gca()
if max:
spectrum_unperturbed_max = np.max(spectrum_unperturbed)
spectrums_perturbed_maxes = np.max(spectrums_perturbed, axis=1)
plt.bar(np.arange(n), spectrums_perturbed_maxes, bar_width)
plt.axhline(spectrum_unperturbed_max, linewidth=1.0, color='g')
#plt.ylim(np.min(spectrums_perturbed_maxes) * 1.05, np.max(spectrums_perturbed_maxes) * 1.05)
plt.ylabel('Max Re(lambda)')
plt.title('Largest eigenvalue after row/col deletion (green = no deletion) from %s %s' % (method, title_mod))
figpath = plotdir + os.sep + 'spectrum_perturbed_max_%s_%s.pdf' % (method, title_mod)
else:
spectrum_unperturbed_min = np.min(spectrum_unperturbed)
spectrums_perturbed_mins = np.min(spectrums_perturbed, axis=1)
ax.bar(np.arange(n), spectrums_perturbed_mins, bar_width)
plt.axhline(spectrum_unperturbed_min, linewidth=1.0, color='g')
#plt.ylim(np.min(spectrums_perturbed_mins) * 1.05, np.max(spectrums_perturbed_mins) * 1.05)
plt.ylabel('Min Re(lambda)')
plt.title('Lowest eigenvalue after row/col deletion (green = no deletion) from %s %s' % (method, title_mod))
figpath = plotdir + os.sep + 'spectrum_perturbed_min_%s_%s.pdf' % (method, title_mod)
plt.axhline(0.0, linewidth=1.0, color='k')
ax.set_xticks(np.arange(n))
plt.xlabel('Index of deleted row/col')
plt.savefig(figpath)
if show:
plt.show()
return
def plot_sliding_tau_scores(tau_range, gene_score_arr, gene_score_label, score_type, plotdir=FOLDER_OUTPUT, show=False):
assert gene_score_arr.shape[0] == tau_range.shape[0]
plt.close('all')
f = plt.figure(figsize=(12, 7))
plt.plot(tau_range, gene_score_arr, '--ok', alpha=0.3)
# add vertical line at tau = 2.0 bifurcation
plt.axvline(2.0, linewidth=1.0, color='k', alpha=0.7)
# highlight top k curves
top_k = 2
sorted_top_curves = np.argsort(np.sum(gene_score_arr, axis=0))[::-1]
for rank, k in enumerate(sorted_top_curves[0:top_k]):
plt.plot(tau_range, gene_score_arr[:, k], '--o', alpha=0.7, label='rank%d = gene %d' % (rank, k))
for rank, k in enumerate(sorted_top_curves[-top_k:][::-1]):
plt.plot(tau_range, gene_score_arr[:, k], '--x', alpha=0.7, label='rank%d = gene %d' %
(gene_score_arr.shape[1] - 1 - rank, k))
plt.legend()
plt.xlabel('tau')
plt.ylabel('%s index' % score_type)
plt.title('%s index from %s over all genes, approaching bifurcation (tau=2.0)' % (score_type, gene_score_label))
figpath = plotdir + os.sep + 'score_%s_%s.pdf' % (gene_score_label, score_type)
plt.savefig(figpath)
if show:
plt.show()
return
if __name__ == '__main__':
num_spectrum = 10
fake_spectrums = np.random.normal(0.0, 2.0, (num_spectrum, 500))
fake_labels = [str(a) for a in range(num_spectrum)]
plot_spectrum_hists(fake_spectrums, fake_labels, hist='default', title_mod='(fake_main)', show=True)
plot_spectrum_hists(fake_spectrums, fake_labels, hist='violin', title_mod='(fake_main)', show=True)
| mit | 5,367,572,452,356,783,000 | 41.734884 | 141 | 0.626034 | false |
johnnoone/aioconsul | tests/test_common.py | 1 | 1306 | import pytest
from aioconsul.common import Address, parse_addr
from aioconsul.common import duration_to_timedelta, timedelta_to_duration
from datetime import timedelta
@pytest.mark.parametrize("a, b", [
("10s", timedelta(seconds=10)),
("2m", timedelta(minutes=2)),
("2h", timedelta(hours=2)),
("2d", timedelta(days=2)),
])
def test_duration(a, b):
assert duration_to_timedelta(a) == b
assert a == timedelta_to_duration(b)
@pytest.mark.parametrize("input, expected", [
("localhost",
Address(proto=None, host="localhost", port=None)),
("http://localhost",
Address(proto="http", host="localhost", port=None)),
("udp://localhost",
Address(proto="udp", host="localhost", port=None)),
("tcp://localhost",
Address(proto="tcp", host="localhost", port=None)),
("unix://localhost",
Address(proto="unix", host="localhost", port=None)),
(("localhost", 8080),
Address(proto=None, host="localhost", port=8080)),
(8080,
Address(proto=None, host=None, port=8080)),
("127.0.0.1:8080",
Address(proto=None, host="127.0.0.1", port=8080)),
(Address(proto=None, host="localhost", port=None),
Address(proto=None, host="localhost", port=None)),
])
def test_addr(input, expected):
assert parse_addr(input) == expected
| bsd-3-clause | -8,695,781,263,740,024,000 | 32.487179 | 73 | 0.644717 | false |
nasa/39A | spaceapps/awards/models.py | 1 | 1227 | from django.db import models
from projects.models import Project
from locations.models import Location
class LocalAward(models.Model):
project = models.ForeignKey(Project)
location = models.ForeignKey(Location)
title = models.CharField(max_length=100, blank=True)
def is_eligible(self):
if self.project.source_url:
return True
else:
return False
class Nomination(models.Model):
project = models.ForeignKey(Project)
location = models.ForeignKey(Location)
class GlobalAwardClass(models.Model):
title = models.CharField(max_length=150, unique=True, blank=True)
class Meta:
verbose_name = 'Global Award Class'
verbose_name_plural = 'Global Award Classes'
def __unicode__(self):
return self.title
class GlobalAwardFinalist(models.Model):
global_award_class = models.ForeignKey('GlobalAwardClass')
project = models.ForeignKey(Project)
best_in_class = models.BooleanField(default=False)
class Meta:
verbose_name = 'Global Award Finalist'
verbose_name_plural = 'Global Award Finalists'
def __unicode__(self):
return '%s (%s)' % (self.project.title, self.global_award_class.title) | apache-2.0 | 4,471,971,229,118,569,000 | 30.487179 | 78 | 0.688672 | false |
commaai/openpilot | selfdrive/car/honda/carstate.py | 1 | 17654 | from cereal import car
from collections import defaultdict
from common.numpy_fast import interp
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.honda.values import CAR, DBC, STEER_THRESHOLD, SPEED_FACTOR, HONDA_BOSCH, HONDA_BOSCH_ALT_BRAKE_SIGNAL
TransmissionType = car.CarParams.TransmissionType
def calc_cruise_offset(offset, speed):
# euristic formula so that speed is controlled to ~ 0.3m/s below pid_speed
# constraints to solve for _K0, _K1, _K2 are:
# - speed = 0m/s, out = -0.3
# - speed = 34m/s, offset = 20, out = -0.25
# - speed = 34m/s, offset = -2.5, out = -1.8
_K0 = -0.3
_K1 = -0.01879
_K2 = 0.01013
return min(_K0 + _K1 * speed + _K2 * speed * offset, 0.)
def get_can_signals(CP, gearbox_msg="GEARBOX"):
# this function generates lists for signal, messages and initial values
signals = [
("XMISSION_SPEED", "ENGINE_DATA", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING_SENSORS", 0),
("STEER_ANGLE_RATE", "STEERING_SENSORS", 0),
("MOTOR_TORQUE", "STEER_MOTOR_TORQUE", 0),
("STEER_TORQUE_SENSOR", "STEER_STATUS", 0),
("LEFT_BLINKER", "SCM_FEEDBACK", 0),
("RIGHT_BLINKER", "SCM_FEEDBACK", 0),
("GEAR", gearbox_msg, 0),
("SEATBELT_DRIVER_LAMP", "SEATBELT_STATUS", 1),
("SEATBELT_DRIVER_LATCHED", "SEATBELT_STATUS", 0),
("BRAKE_PRESSED", "POWERTRAIN_DATA", 0),
("BRAKE_SWITCH", "POWERTRAIN_DATA", 0),
("CRUISE_BUTTONS", "SCM_BUTTONS", 0),
("ESP_DISABLED", "VSA_STATUS", 1),
("USER_BRAKE", "VSA_STATUS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0),
("STEER_STATUS", "STEER_STATUS", 5),
("GEAR_SHIFTER", gearbox_msg, 0),
("PEDAL_GAS", "POWERTRAIN_DATA", 0),
("CRUISE_SETTING", "SCM_BUTTONS", 0),
("ACC_STATUS", "POWERTRAIN_DATA", 0),
]
checks = [
("ENGINE_DATA", 100),
("WHEEL_SPEEDS", 50),
("STEERING_SENSORS", 100),
("SEATBELT_STATUS", 10),
("CRUISE", 10),
("POWERTRAIN_DATA", 100),
("VSA_STATUS", 50),
("STEER_STATUS", 100),
("STEER_MOTOR_TORQUE", 0), # TODO: not on every car
]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [
("SCM_FEEDBACK", 25),
("SCM_BUTTONS", 50),
]
else:
checks += [
("SCM_FEEDBACK", 10),
("SCM_BUTTONS", 25),
]
if CP.carFingerprint in (CAR.CRV_HYBRID, CAR.CIVIC_BOSCH_DIESEL, CAR.ACURA_RDX_3G):
checks += [
(gearbox_msg, 50),
]
else:
checks += [
(gearbox_msg, 100),
]
if CP.carFingerprint in HONDA_BOSCH_ALT_BRAKE_SIGNAL:
signals += [("BRAKE_PRESSED", "BRAKE_MODULE", 0)]
checks += [("BRAKE_MODULE", 50)]
if CP.carFingerprint in HONDA_BOSCH:
signals += [
("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("EPB_STATE", "EPB_STATUS", 0),
]
checks += [
("EPB_STATUS", 50),
("GAS_PEDAL_2", 100),
]
if not CP.openpilotLongitudinalControl:
signals += [
("CRUISE_CONTROL_LABEL", "ACC_HUD", 0),
("CRUISE_SPEED", "ACC_HUD", 0),
("ACCEL_COMMAND", "ACC_CONTROL", 0),
("AEB_STATUS", "ACC_CONTROL", 0),
]
checks += [
("ACC_HUD", 10),
("ACC_CONTROL", 50),
]
else: # Nidec signals
signals += [("CRUISE_SPEED_PCM", "CRUISE", 0),
("CRUISE_SPEED_OFFSET", "CRUISE_PARAMS", 0)]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [("CRUISE_PARAMS", 10)]
else:
checks += [("CRUISE_PARAMS", 50)]
if CP.carFingerprint in (CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
signals += [("DRIVERS_DOOR_OPEN", "SCM_FEEDBACK", 1)]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("DRIVERS_DOOR_OPEN", "SCM_BUTTONS", 1)]
elif CP.carFingerprint == CAR.HRV:
signals += [("DRIVERS_DOOR_OPEN", "SCM_BUTTONS", 1),
("WHEELS_MOVING", "STANDSTILL", 1)]
else:
signals += [("DOOR_OPEN_FL", "DOORS_STATUS", 1),
("DOOR_OPEN_FR", "DOORS_STATUS", 1),
("DOOR_OPEN_RL", "DOORS_STATUS", 1),
("DOOR_OPEN_RR", "DOORS_STATUS", 1),
("WHEELS_MOVING", "STANDSTILL", 1)]
checks += [
("DOORS_STATUS", 3),
("STANDSTILL", 50),
]
if CP.carFingerprint == CAR.CIVIC:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("IMPERIAL_UNIT", "HUD_SETTING", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [
("HUD_SETTING", 50),
("EPB_STATUS", 50),
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint == CAR.ACURA_ILX:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0)]
checks += [
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint in (CAR.CRV, CAR.CRV_EU, CAR.ACURA_RDX, CAR.PILOT_2019, CAR.RIDGELINE):
signals += [("MAIN_ON", "SCM_BUTTONS", 0)]
elif CP.carFingerprint == CAR.FIT:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0)]
checks += [
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint == CAR.HRV:
signals += [("CAR_GAS", "GAS_PEDAL", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0)]
checks += [
("GAS_PEDAL", 100),
]
elif CP.carFingerprint == CAR.ODYSSEY:
signals += [("MAIN_ON", "SCM_FEEDBACK", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
elif CP.carFingerprint == CAR.PILOT:
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("CAR_GAS", "GAS_PEDAL_2", 0)]
checks += [
("GAS_PEDAL_2", 0), # TODO: fix this freq, seems this signal isn't present at all on some models
]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
# add gas interceptor reading if we are using it
if CP.enableGasInterceptor:
signals.append(("INTERCEPTOR_GAS", "GAS_SENSOR", 0))
signals.append(("INTERCEPTOR_GAS2", "GAS_SENSOR", 0))
checks.append(("GAS_SENSOR", 50))
if CP.openpilotLongitudinalControl:
signals += [
("BRAKE_ERROR_1", "STANDSTILL", 1),
("BRAKE_ERROR_2", "STANDSTILL", 1)
]
checks += [("STANDSTILL", 50)]
return signals, checks
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.gearbox_msg = "GEARBOX"
if CP.carFingerprint == CAR.ACCORD and CP.transmissionType == TransmissionType.cvt:
self.gearbox_msg = "GEARBOX_15T"
self.shifter_values = can_define.dv[self.gearbox_msg]["GEAR_SHIFTER"]
self.steer_status_values = defaultdict(lambda: "UNKNOWN", can_define.dv["STEER_STATUS"]["STEER_STATUS"])
self.user_gas, self.user_gas_pressed = 0., 0
self.brake_switch_prev = 0
self.brake_switch_prev_ts = 0
self.cruise_setting = 0
self.v_cruise_pcm_prev = 0
def update(self, cp, cp_cam, cp_body):
ret = car.CarState.new_message()
# car params
v_weight_v = [0., 1.] # don't trust smooth speed at low values to avoid premature zero snapping
v_weight_bp = [1., 6.] # smooth blending, below ~0.6m/s the smooth speed snaps to zero
# update prevs, update must run once per loop
self.prev_cruise_buttons = self.cruise_buttons
self.prev_cruise_setting = self.cruise_setting
# ******************* parse out can *******************
# TODO: find wheels moving bit in dbc
if self.CP.carFingerprint in (CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
ret.standstill = cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] < 0.1
ret.doorOpen = bool(cp.vl["SCM_FEEDBACK"]["DRIVERS_DOOR_OPEN"])
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
ret.standstill = cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] < 0.1
ret.doorOpen = bool(cp.vl["SCM_BUTTONS"]["DRIVERS_DOOR_OPEN"])
elif self.CP.carFingerprint == CAR.HRV:
ret.doorOpen = bool(cp.vl["SCM_BUTTONS"]["DRIVERS_DOOR_OPEN"])
else:
ret.standstill = not cp.vl["STANDSTILL"]["WHEELS_MOVING"]
ret.doorOpen = any([cp.vl["DOORS_STATUS"]["DOOR_OPEN_FL"], cp.vl["DOORS_STATUS"]["DOOR_OPEN_FR"],
cp.vl["DOORS_STATUS"]["DOOR_OPEN_RL"], cp.vl["DOORS_STATUS"]["DOOR_OPEN_RR"]])
ret.seatbeltUnlatched = bool(cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_LAMP"] or not cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_LATCHED"])
steer_status = self.steer_status_values[cp.vl["STEER_STATUS"]["STEER_STATUS"]]
ret.steerError = steer_status not in ["NORMAL", "NO_TORQUE_ALERT_1", "NO_TORQUE_ALERT_2", "LOW_SPEED_LOCKOUT", "TMP_FAULT"]
# NO_TORQUE_ALERT_2 can be caused by bump OR steering nudge from driver
self.steer_not_allowed = steer_status not in ["NORMAL", "NO_TORQUE_ALERT_2"]
# LOW_SPEED_LOCKOUT is not worth a warning
ret.steerWarning = steer_status not in ["NORMAL", "LOW_SPEED_LOCKOUT", "NO_TORQUE_ALERT_2"]
if not self.CP.openpilotLongitudinalControl:
self.brake_error = 0
else:
self.brake_error = cp.vl["STANDSTILL"]["BRAKE_ERROR_1"] or cp.vl["STANDSTILL"]["BRAKE_ERROR_2"]
ret.espDisabled = cp.vl["VSA_STATUS"]["ESP_DISABLED"] != 0
speed_factor = SPEED_FACTOR[self.CP.carFingerprint]
ret.wheelSpeeds.fl = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FL"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.fr = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FR"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.rl = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RL"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.rr = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RR"] * CV.KPH_TO_MS * speed_factor
v_wheel = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr)/4.
# blend in transmission speed at low speed, since it has more low speed accuracy
v_weight = interp(v_wheel, v_weight_bp, v_weight_v)
ret.vEgoRaw = (1. - v_weight) * cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] * CV.KPH_TO_MS * speed_factor + v_weight * v_wheel
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEER_ANGLE"]
ret.steeringRateDeg = cp.vl["STEERING_SENSORS"]["STEER_ANGLE_RATE"]
self.cruise_setting = cp.vl["SCM_BUTTONS"]["CRUISE_SETTING"]
self.cruise_buttons = cp.vl["SCM_BUTTONS"]["CRUISE_BUTTONS"]
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_stalk(
250, cp.vl["SCM_FEEDBACK"]["LEFT_BLINKER"], cp.vl["SCM_FEEDBACK"]["RIGHT_BLINKER"])
self.brake_hold = cp.vl["VSA_STATUS"]["BRAKE_HOLD_ACTIVE"]
if self.CP.carFingerprint in (CAR.CIVIC, CAR.ODYSSEY, CAR.CRV_5G, CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH,
CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
self.park_brake = cp.vl["EPB_STATUS"]["EPB_STATE"] != 0
main_on = cp.vl["SCM_FEEDBACK"]["MAIN_ON"]
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
self.park_brake = cp.vl["EPB_STATUS"]["EPB_STATE"] != 0
main_on = cp.vl["SCM_BUTTONS"]["MAIN_ON"]
else:
self.park_brake = 0 # TODO
main_on = cp.vl["SCM_BUTTONS"]["MAIN_ON"]
gear = int(cp.vl[self.gearbox_msg]["GEAR_SHIFTER"])
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear, None))
self.pedal_gas = cp.vl["POWERTRAIN_DATA"]["PEDAL_GAS"]
# crv doesn't include cruise control
if self.CP.carFingerprint in (CAR.CRV, CAR.CRV_EU, CAR.HRV, CAR.ODYSSEY, CAR.ACURA_RDX, CAR.RIDGELINE, CAR.PILOT_2019, CAR.ODYSSEY_CHN):
ret.gas = self.pedal_gas / 256.
else:
ret.gas = cp.vl["GAS_PEDAL_2"]["CAR_GAS"] / 256.
# this is a hack for the interceptor. This is now only used in the simulation
# TODO: Replace tests by toyota so this can go away
if self.CP.enableGasInterceptor:
self.user_gas = (cp.vl["GAS_SENSOR"]["INTERCEPTOR_GAS"] + cp.vl["GAS_SENSOR"]["INTERCEPTOR_GAS2"]) / 2.
self.user_gas_pressed = self.user_gas > 1e-5 # this works because interceptor read < 0 when pedal position is 0. Once calibrated, this will change
ret.gasPressed = self.user_gas_pressed
else:
ret.gasPressed = self.pedal_gas > 1e-5
ret.steeringTorque = cp.vl["STEER_STATUS"]["STEER_TORQUE_SENSOR"]
ret.steeringTorqueEps = cp.vl["STEER_MOTOR_TORQUE"]["MOTOR_TORQUE"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD[self.CP.carFingerprint]
if self.CP.carFingerprint in HONDA_BOSCH:
if not self.CP.openpilotLongitudinalControl:
ret.cruiseState.nonAdaptive = cp.vl["ACC_HUD"]["CRUISE_CONTROL_LABEL"] != 0
ret.cruiseState.standstill = cp.vl["ACC_HUD"]["CRUISE_SPEED"] == 252.
# On set, cruise set speed pulses between 254~255 and the set speed prev is set to avoid this.
ret.cruiseState.speed = self.v_cruise_pcm_prev if cp.vl["ACC_HUD"]["CRUISE_SPEED"] > 160.0 else cp.vl["ACC_HUD"]["CRUISE_SPEED"] * CV.KPH_TO_MS
self.v_cruise_pcm_prev = ret.cruiseState.speed
else:
ret.cruiseState.speedOffset = calc_cruise_offset(cp.vl["CRUISE_PARAMS"]["CRUISE_SPEED_OFFSET"], ret.vEgo)
ret.cruiseState.speed = cp.vl["CRUISE"]["CRUISE_SPEED_PCM"] * CV.KPH_TO_MS
self.brake_switch = cp.vl["POWERTRAIN_DATA"]["BRAKE_SWITCH"] != 0
if self.CP.carFingerprint in HONDA_BOSCH_ALT_BRAKE_SIGNAL:
ret.brakePressed = cp.vl["BRAKE_MODULE"]["BRAKE_PRESSED"] != 0
else:
# brake switch has shown some single time step noise, so only considered when
# switch is on for at least 2 consecutive CAN samples
# panda safety only checks BRAKE_PRESSED signal
ret.brakePressed = bool(cp.vl["POWERTRAIN_DATA"]["BRAKE_PRESSED"] or
(self.brake_switch and self.brake_switch_prev and cp.ts["POWERTRAIN_DATA"]["BRAKE_SWITCH"] != self.brake_switch_prev_ts))
self.brake_switch_prev = self.brake_switch
self.brake_switch_prev_ts = cp.ts["POWERTRAIN_DATA"]["BRAKE_SWITCH"]
ret.brake = cp.vl["VSA_STATUS"]["USER_BRAKE"]
ret.cruiseState.enabled = cp.vl["POWERTRAIN_DATA"]["ACC_STATUS"] != 0
ret.cruiseState.available = bool(main_on)
# Gets rid of Pedal Grinding noise when brake is pressed at slow speeds for some models
if self.CP.carFingerprint in (CAR.PILOT, CAR.PILOT_2019, CAR.RIDGELINE):
if ret.brake > 0.05:
ret.brakePressed = True
# TODO: discover the CAN msg that has the imperial unit bit for all other cars
self.is_metric = not cp.vl["HUD_SETTING"]["IMPERIAL_UNIT"] if self.CP.carFingerprint in (CAR.CIVIC) else False
if self.CP.carFingerprint in HONDA_BOSCH:
ret.stockAeb = (not self.CP.openpilotLongitudinalControl) and bool(cp.vl["ACC_CONTROL"]["AEB_STATUS"] and cp.vl["ACC_CONTROL"]["ACCEL_COMMAND"] < -1e-5)
else:
ret.stockAeb = bool(cp_cam.vl["BRAKE_COMMAND"]["AEB_REQ_1"] and cp_cam.vl["BRAKE_COMMAND"]["COMPUTER_BRAKE"] > 1e-5)
if self.CP.carFingerprint in HONDA_BOSCH:
self.stock_hud = False
ret.stockFcw = False
else:
ret.stockFcw = cp_cam.vl["BRAKE_COMMAND"]["FCW"] != 0
self.stock_hud = cp_cam.vl["ACC_HUD"]
self.stock_brake = cp_cam.vl["BRAKE_COMMAND"]
if self.CP.enableBsm and self.CP.carFingerprint in (CAR.CRV_5G, ):
# BSM messages are on B-CAN, requires a panda forwarding B-CAN messages to CAN 0
# more info here: https://github.com/commaai/openpilot/pull/1867
ret.leftBlindspot = cp_body.vl["BSM_STATUS_LEFT"]["BSM_ALERT"] == 1
ret.rightBlindspot = cp_body.vl["BSM_STATUS_RIGHT"]["BSM_ALERT"] == 1
return ret
def get_can_parser(self, CP):
signals, checks = get_can_signals(CP, self.gearbox_msg)
bus_pt = 1 if CP.carFingerprint in HONDA_BOSCH else 0
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, bus_pt)
@staticmethod
def get_cam_can_parser(CP):
signals = []
# all hondas except CRV, RDX and 2019 Odyssey@China use 0xe4 for steering
checks = [(0xe4, 100)]
if CP.carFingerprint in [CAR.CRV, CAR.CRV_EU, CAR.ACURA_RDX, CAR.ODYSSEY_CHN]:
checks = [(0x194, 100)]
if CP.carFingerprint not in HONDA_BOSCH:
signals += [("COMPUTER_BRAKE", "BRAKE_COMMAND", 0),
("AEB_REQ_1", "BRAKE_COMMAND", 0),
("FCW", "BRAKE_COMMAND", 0),
("CHIME", "BRAKE_COMMAND", 0),
("FCM_OFF", "ACC_HUD", 0),
("FCM_OFF_2", "ACC_HUD", 0),
("FCM_PROBLEM", "ACC_HUD", 0),
("ICONS", "ACC_HUD", 0)]
checks += [
("ACC_HUD", 10),
("BRAKE_COMMAND", 50),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
@staticmethod
def get_body_can_parser(CP):
if CP.enableBsm and CP.carFingerprint == CAR.CRV_5G:
signals = [("BSM_ALERT", "BSM_STATUS_RIGHT", 0),
("BSM_ALERT", "BSM_STATUS_LEFT", 0)]
checks = [
("BSM_STATUS_LEFT", 3),
("BSM_STATUS_RIGHT", 3),
]
bus_body = 0 # B-CAN is forwarded to ACC-CAN radar side (CAN 0 on fake ethernet port)
return CANParser(DBC[CP.carFingerprint]["body"], signals, checks, bus_body)
return None
| mit | 1,605,306,792,609,832,000 | 41.4375 | 158 | 0.614648 | false |
opinsys/aptirepo | aptirepo/setup.py | 1 | 2037 | # -*- coding: utf-8 -*-
# aptirepo - Simple APT Repository Tool
# Copyright (C) 2013,2014,2015 Opinsys
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from distutils.core import setup
import os.path
import re
import subprocess
version = re.search('Version: (.*)', subprocess.check_output(
['dpkg-parsechangelog', '-l../debian/changelog'])).group(1)
setup(name='aptirepo',
version=version,
description='Simple APT Repository Tool.',
author='Tuomas Räsänen',
author_email='[email protected]',
url='http://github.com/opinsys/aptirepo',
scripts=['aptirepo'],
package_dir={'aptirepo': 'lib'},
packages=['aptirepo'],
license='GPLv2+',
platforms=['Linux'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: POSIX :: Linux",
"Topic :: System :: Archiving :: Packaging",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
],
requires=['debian'],
provides=['aptirepo'],
)
| gpl-2.0 | 7,825,566,279,488,568,000 | 36.685185 | 73 | 0.660934 | false |
evgfilim1/spin_everyday_bot | thebot.py | 1 | 20481 | # SpinEverydayBot
# Copyright © 2016-2017 Evgeniy Filimonov <https://t.me/evgfilim1>
# See full NOTICE at http://github.com/evgfilim1/spin_everyday_bot
import logging
from telegram import (Bot, Update, ParseMode, TelegramError,
InlineKeyboardMarkup, InlineKeyboardButton, ForceReply, ReplyKeyboardRemove)
from telegram.ext import (Updater, Job, CommandHandler, MessageHandler,
CallbackQueryHandler, Filters, JobQueue, ConversationHandler)
from telegram.ext.dispatcher import run_async
from telegram.utils.helpers import escape_markdown
from random import choice
import config
import core
# Set all logging time in UTC
logging.Formatter.converter = __import__("time").gmtime
updater = Updater(config.BOT_TOKEN, workers=8)
jobs = updater.job_queue
dp = updater.dispatcher
START_KEYBOARD = InlineKeyboardMarkup([
[InlineKeyboardButton(text="Написать боту", url="t.me/{}".format(updater.bot.username))]
])
ALLOWED_UPDATES = ["message", "edited_message", "callback_query"]
locks = []
tg_handler = core.TelegramHandler(updater.bot)
tg_handler.setFormatter(logging.Formatter(config.LOG_TG_FORMAT, style='{'))
log = logging.getLogger('bot')
log.addHandler(core.handler)
log.addHandler(tg_handler)
log.setLevel(logging.DEBUG)
# Just configure loggers below and don't use them
tg_log = logging.getLogger('telegram.ext')
tg_log.addHandler(core.handler)
tg_log.addHandler(tg_handler)
tg_log.setLevel(logging.INFO)
sock_log = logging.getLogger('TeleSocket')
sock_log.addHandler(core.handler)
sock_log.addHandler(tg_handler)
sock_log.setLevel(logging.INFO)
del tg_handler, tg_log, sock_log
def handle_error(bot: Bot, update: Update, error):
log.error(f"Update {update} caused error: {error}")
def reset(bot: Bot = None, job: Job = None):
core.results_today.clear()
log.debug("Reset done")
def auto_save(bot: Bot = None, job: Job = None):
core.save_all()
def auto_spin(bot: Bot, job: Job):
from telegram import Message, Chat
u = Update(0, message=Message(0, None, 0, Chat(job.context, '')))
if core.results_today.get(job.context) is None:
do_the_spin(bot, u)
def update_cache(bot: Bot, update: Update):
user = update.effective_user
chat_id = update.effective_message.chat_id
# Also skip first update when the bot is added
if not core.is_private(chat_id) and core.chat_users.get(chat_id) is not None:
core.chat_users[chat_id].update({user.id: user.name})
def pages_handler(bot: Bot, update: Update):
query = update.callback_query
data = query.data.split(':')[1]
msg = query.message
if msg.chat_id in locks:
query.answer("Нельзя использовать кнопки, пока идёт розыгрыш")
return
page_n = int(data.split('_')[1])
text, max_pages = core.make_top(msg.chat_id, page=page_n)
reply_keyboard = [[]]
if page_n != 1:
reply_keyboard[0].append(InlineKeyboardButton("<<", callback_data=f"top:page_{page_n - 1}"))
if page_n != max_pages:
reply_keyboard[0].append(InlineKeyboardButton(">>", callback_data=f"top:page_{page_n + 1}"))
try:
query.edit_message_text(text, reply_markup=InlineKeyboardMarkup(reply_keyboard),
parse_mode=ParseMode.MARKDOWN)
except TelegramError:
pass
def help_button_handler(bot: Bot, update: Update):
query = update.callback_query
data = query.data.split(':')[1]
keys = []
for key in config.HELP_TEXT[data][1]:
key = key.split('%')
keys.append([InlineKeyboardButton(text=key[0], callback_data=f"help:{key[1]}")])
try:
query.edit_message_text(config.HELP_TEXT[data][0], reply_markup=InlineKeyboardMarkup(keys),
parse_mode=ParseMode.MARKDOWN)
except TelegramError:
pass
def admin_shell(bot: Bot, update: Update, args: list):
msg = update.effective_message
if msg.from_user.id != config.BOT_CREATOR:
return
try:
cmd = args.pop(0)
except IndexError:
return
if cmd == "exec":
exec(" ".join(args))
elif cmd == "vardump":
bot.send_message(chat_id=msg.chat_id, text="```\n{}\n```".format(
eval(" ".join(args))
), parse_mode=ParseMode.MARKDOWN, reply_to_message_id=msg.message_id)
elif cmd == "reset":
reset(bot, None)
elif cmd == "respin":
log.info(f"Respin done in '{msg.chat.title}' ({msg.chat_id})")
core.results_today.pop(msg.chat_id)
msg.reply_text("respin ok")
elif cmd == "md_announce":
core.announce(bot, " ".join(args), md=True)
elif cmd == "announce":
core.announce(bot, " ".join(args))
elif cmd == "sendlogs":
if config.LOG_FILE is None:
msg.reply_text("Logging to file is not configured.")
return
with open(config.LOG_FILE, 'rb') as f:
msg.reply_document(f)
elif cmd == "delete":
if len(args) == 0:
msg.reply_to_message.delete()
else:
params = args.pop(0).split('_')
if params[0] == "current":
params[0] = msg.chat_id
bot.delete_message(chat_id=params[0], message_id=params[1])
elif cmd == "count":
msg.reply_text(f"Чатов у бота: {len(core.chat_users)}")
elif cmd == "send" or cmd == "edit":
params = args.pop(0)
text = " ".join(args).replace("\\n", "\n")
params = params.split("_")
chat = params[0]
if chat == "current":
chat = msg.chat_id
try:
msg_id = params[1]
assert msg_id != ""
except (KeyError, AssertionError, IndexError):
msg_id = None
try:
parse_mode = params[2]
except (KeyError, IndexError):
parse_mode = None
del params
if cmd == "send":
new_msg = bot.send_message(chat, text, parse_mode=parse_mode, reply_to_message_id=msg_id)
msg.reply_text(f"Sent message ID: {new_msg.message_id}")
elif cmd == "edit":
bot.edit_message_text(chat_id=chat, text=text, parse_mode=parse_mode, message_id=msg_id)
elif cmd == "help":
msg.reply_text("Help:\nexec — execute code\nvardump — print variable's value\n"
"delete [<chat>_<msgid>] - delete replied or specified message\n"
"send <chat>_<msgid>_<parsemode> - send message\n"
"edit <chat>_<msgid>_<parsemode> - edit message\n"
"reset — reset all spins\nrespin — reset spin in this chat\n"
"md_announce <text> — tell something to all chats (markdown is on)\n"
"announce <text> — tell something to all chats (markdown is off)\n"
"count - count known chats\nsendlogs — send latest logs as document")
def svc_handler(bot: Bot, update: Update):
chat_id = update.message.chat_id
migrate_to_id = update.message.migrate_to_chat_id
new_members = update.message.new_chat_members
left_member = update.message.left_chat_member
if update.message.group_chat_created or \
(len(new_members) != 0 and any(new_member.id == bot.id for new_member in new_members)):
# TODO: add admins to the list
log.info(f"New chat! ({chat_id})")
core.chat_users[chat_id] = {}
core.can_change_name[chat_id] = []
elif new_members:
for new_member in new_members:
if new_member.username and new_member.username[-3:].lower() == "bot":
return
core.chat_users[chat_id].update({new_member.id: new_member.name})
elif migrate_to_id:
core.migrate(chat_id, migrate_to_id)
elif left_member and left_member.id == bot.id:
core.clear_data(chat_id)
elif left_member:
try:
core.chat_users[chat_id].pop(left_member.id)
except KeyError:
# Passing this because of bots and unknown users
pass
def helper(bot: Bot, update: Update):
keys = []
for key in config.HELP_TEXT["main"][1]:
key = key.split('%')
keys.append([InlineKeyboardButton(text=key[0], callback_data=f"help:{key[1]}")])
try:
bot.send_message(chat_id=update.message.from_user.id, text=config.HELP_TEXT["main"][0],
parse_mode=ParseMode.MARKDOWN, reply_markup=InlineKeyboardMarkup(keys))
except TelegramError:
update.message.reply_text(text=config.PM_ONLY_MESSAGE, reply_markup=START_KEYBOARD)
def ping(bot: Bot, update: Update):
update.message.reply_text(text="Ping? Pong!")
@core.not_pm
def do_the_sрin(bot: Bot, update: Update):
chat_id = update.message.chat_id
s = core.spin_name.get(chat_id, config.DEFAULT_SPIN_NAME).replace('*', '\\*')
p = core.results_today.get(chat_id)
if p is None or chat_id in locks:
return
bot.send_message(chat_id=chat_id, text=config.TEXT_ALREADY.format(s=s, n=update.message.from_user.name),
parse_mode=ParseMode.MARKDOWN)
@run_async
@core.not_pm
def do_the_spin(bot: Bot, update: Update):
chat_id = update.message.chat_id
s = core.spin_name.get(chat_id, config.DEFAULT_SPIN_NAME).replace('*', '\\*')
p = core.results_today.get(chat_id)
if chat_id in locks:
return
if p is not None:
bot.send_message(chat_id=chat_id, text=config.TEXT_ALREADY.format(s=s, n=p),
parse_mode=ParseMode.MARKDOWN)
else:
p = escape_markdown(core.choose_random_user(chat_id, bot))
from time import sleep
curr_text = choice(config.TEXTS)
locks.append(chat_id)
for t in curr_text:
bot.send_message(chat_id=chat_id, text=t.format(s=s, n=p),
parse_mode=ParseMode.MARKDOWN)
sleep(2)
locks.pop(locks.index(chat_id))
@core.not_pm
def auto_spin_config(bot: Bot, update: Update, args: list, job_queue: JobQueue):
msg = update.effective_message
if len(args) == 0:
return
is_moder = core.can_change_spin_name(msg.chat_id, msg.from_user.id, bot)
cmd = args.pop(0)
if cmd == "set" and is_moder:
try:
time = args[0].split(':')
time = "{:0>2}:{:0>2}".format(time[0], time[1])
job = job_queue.run_daily(auto_spin, core.str_to_time(time), context=msg.chat_id)
if msg.chat_id in core.auto_spins:
core.auto_spin_jobs[msg.chat_id].schedule_removal()
except (ValueError, IndexError):
msg.reply_text(f"Ошибка! Проверьте время на правильность и отредактируйте сообщение")
return
core.auto_spins.update({msg.chat_id: time})
core.auto_spin_jobs.update({msg.chat_id: job})
msg.reply_text(f"Автоматический розыгрыш установлен на {time} GMT+0\n\n"
f"ВНИМАНИЕ! Если розыгрыш уже был проведён до того, как запустится автоматический розыгрыш, то"
f" бот не напишет ничего в чат по наступлению времени розыгрыша")
elif cmd == 'del' and is_moder:
if msg.chat_id in core.auto_spins:
core.auto_spin_jobs.pop(msg.chat_id).schedule_removal()
core.auto_spins.pop(msg.chat_id)
msg.reply_text("Теперь автоматический розыгрыш отключен в этом чате")
else:
msg.reply_text("Автоматический розыгрыш ещё не был включен в этом чате")
elif cmd == 'status':
if msg.chat_id in core.auto_spins:
msg.reply_text(f"Автоматический розыгрыш установлен в этом чате"
f" на {core.auto_spins.get(msg.chat_id)} GMT+0")
else:
msg.reply_text("Автоматический розыгрыш отключен в этом чате")
@core.not_pm
def top(bot: Bot, update: Update, args: list):
chat_id = update.message.chat_id
reply_keyboard = [[]]
if chat_id in locks:
return
if chat_id not in core.results_total:
core.results_total[chat_id] = {}
if len(args) == 1 and args[0] == "me":
user = update.message.from_user
username = user.name
stat = core.results_total[chat_id].get(user.id, 0)
text = f"Ваша статистика:\n*{username}*: {stat} раз(а)"
elif update.message.reply_to_message:
user = update.message.reply_to_message.from_user
username = user.name
stat = core.results_total[chat_id].get(user.id, 0)
text = f"Статистика пользователя *{username}*: {stat} раз(а)"
else:
text, pages = core.make_top(chat_id, page=1)
if pages > 1:
reply_keyboard = [[InlineKeyboardButton(">>", callback_data="top:page_2")]]
update.message.reply_text(text=text, parse_mode=ParseMode.MARKDOWN,
reply_markup=InlineKeyboardMarkup(reply_keyboard))
@core.not_pm
def change_spin_name(bot: Bot, update: Update, args: list):
msg = update.effective_message
if len(args) == 0:
spin = core.spin_name.get(msg.chat_id, config.DEFAULT_SPIN_NAME)
msg.reply_text(text=f"Текущее название розыгрыша: *{spin} дня*", parse_mode=ParseMode.MARKDOWN)
return
if core.can_change_spin_name(msg.chat_id, msg.from_user.id, bot):
if args[-1].lower() == "дня" and len(args) > 1:
args.pop(-1)
spin = " ".join(args)
core.spin_name[msg.chat_id] = spin
msg.reply_text(text=f"Текст розыгрыша изменён на *{spin} дня*", parse_mode=ParseMode.MARKDOWN)
@core.not_pm
def admin_ctrl(bot: Bot, update: Update, args: list):
msg = update.effective_message
reply = msg.reply_to_message
admins = core.get_admins_ids(bot, msg.chat_id)
admins.append(config.BOT_CREATOR)
is_admin = msg.from_user.id in admins
if len(args) == 0:
return
cmd = args.pop(0)
if msg.chat_id not in core.can_change_name:
core.can_change_name[msg.chat_id] = []
if cmd == "add" and reply and is_admin:
if core.can_change_spin_name(msg.chat_id, reply.from_user.id, bot):
msg.reply_text(text="Этот пользователь *уже может* изменять название розыгрыша",
parse_mode=ParseMode.MARKDOWN)
else:
core.can_change_name[msg.chat_id].append(reply.from_user.id)
msg.reply_text(text="Теперь этот пользователь *может* изменять название розыгрыша",
parse_mode=ParseMode.MARKDOWN)
elif cmd == "del" and reply and is_admin:
if not core.can_change_spin_name(msg.chat_id, reply.from_user.id, bot):
msg.reply_text(text="Этот пользователь *ещё не может* изменять название розыгрыша",
parse_mode=ParseMode.MARKDOWN)
else:
index = core.can_change_name[msg.chat_id].index(reply.from_user.id)
core.can_change_name[msg.chat_id].pop(index)
msg.reply_text(text="Теперь этот пользователь *не может* изменять название розыгрыша",
parse_mode=ParseMode.MARKDOWN)
elif cmd == "list":
text = "Пользователи, которые *могут* изменять название розыгрыша (не считая администраторов):\n```\n"
for user in core.can_change_name[msg.chat_id]:
text += core.chat_users[msg.chat_id].get(user, f"id{user}") + '\n'
text += "```"
msg.reply_text(text, parse_mode=ParseMode.MARKDOWN)
@core.not_pm
def spin_count(bot: Bot, update: Update):
update.message.reply_text(text=f"Кол-во людей, участвующих в розыгрыше: "
f"_{len(core.chat_users[update.message.chat_id])}_",
parse_mode=ParseMode.MARKDOWN)
def ask_feedback(bot: Bot, update: Update):
update.message.reply_text("Введите сообщение, которое будет отправлено создателю бота\n"
"Бот принимает текст, изображения и документы\n"
"Введите /cancel для отмены", reply_markup=ForceReply(selective=True))
return 0
def send_feedback(bot: Bot, update: Update):
if update.message.reply_to_message.from_user.id != bot.id:
return
bot.send_message(config.BOT_CREATOR, f"<b>Новое сообщение!</b>\n"
f" - <i>Чат:</i> <pre>{update.message.chat}</pre>\n"
f" - <i>Пользователь:</i> <pre>{update.message.from_user}</pre>\n"
f" - <i>ID Сообщения:</i> <pre>{update.message.message_id}</pre>",
parse_mode=ParseMode.HTML)
update.message.forward(config.BOT_CREATOR)
update.message.reply_text("Ваше сообщение отправлено!", reply_markup=ReplyKeyboardRemove(selective=True))
return ConversationHandler.END
def cancel_feedback(bot: Bot, update: Update):
update.message.reply_text("Отменено", reply_markup=ReplyKeyboardRemove(selective=True))
return ConversationHandler.END
jobs.run_repeating(auto_save, 60)
jobs.run_daily(reset, core.str_to_time(config.RESET_TIME))
feedback_handler = ConversationHandler(
entry_points=[CommandHandler('feedback', ask_feedback)],
states={
0: [MessageHandler(Filters.reply & (Filters.text | Filters.photo | Filters.document), send_feedback)]
},
fallbacks=[CommandHandler('cancel', cancel_feedback)]
)
dp.add_handler(CommandHandler(['start', 'help'], helper))
dp.add_handler(CommandHandler('admgroup', admin_ctrl, pass_args=True, allow_edited=True))
dp.add_handler(CommandHandler('sudo', admin_shell, pass_args=True, allow_edited=True))
dp.add_handler(CommandHandler('ping', ping))
dp.add_handler(CommandHandler('setname', change_spin_name, pass_args=True, allow_edited=True))
dp.add_handler(CommandHandler('count', spin_count))
dp.add_handler(CommandHandler('spin', do_the_spin))
dp.add_handler(CommandHandler('sрin', do_the_sрin))
dp.add_handler(CommandHandler('auto', auto_spin_config, pass_args=True, allow_edited=True,
pass_job_queue=True))
dp.add_handler(CommandHandler('stat', top, pass_args=True))
dp.add_handler(feedback_handler)
dp.add_handler(MessageHandler(Filters.status_update, svc_handler))
dp.add_handler(CallbackQueryHandler(pages_handler, pattern=r"^top:page_[1-9]+[0-9]*$"))
dp.add_handler(CallbackQueryHandler(help_button_handler, pattern=r"^help:.+$"))
dp.add_handler(MessageHandler(Filters.all, update_cache, edited_updates=True), group=-1)
dp.add_error_handler(handle_error)
core.init(bot=updater.bot, job_queue=updater.job_queue, callback=auto_spin)
if config.TELESOCKET_TOKEN:
from TeleSocketClient import TeleSocket
updater.bot.set_webhook()
sock = TeleSocket()
sock.login(config.TELESOCKET_TOKEN)
sock.add_telegram_handler(lambda update: core.read_update(updater, update))
webhook = sock.set_webhook(updater.bot.username)
updater._clean_updates()
updater.bot.set_webhook(url=webhook.url, allowed_updates=ALLOWED_UPDATES)
updater.job_queue.start()
updater._init_thread(updater.dispatcher.start, "dispatcher")
updater.running = True
elif config.USE_WEBHOOKS:
updater.start_webhook(listen='0.0.0.0', port=8443, cert=config.WEBHOOK_CERT, key=config.WEBHOOK_KEY,
clean=True, allowed_updates=ALLOWED_UPDATES, webhook_url=f'https://{config.WEBHOOK_URL}:8443')
else:
updater.start_polling(clean=True, allowed_updates=ALLOWED_UPDATES)
log.info("Bot started")
updater.idle()
core.save_all()
log.info("Bot stopped")
| agpl-3.0 | -2,395,831,886,744,955,000 | 39.894737 | 120 | 0.628932 | false |
batoure/ScienceManager | App/service/data/provider.py | 1 | 3148 | #service.data.provider
from service.data.factory.baseProvider import BaseProvider
#TODO: Add textwrap to a query post processor
from model.workflow import *
class Provider(BaseProvider):
def __init__(self, log, connectionSettings):
self._log = log
#pass to the base connectionSettings
self._connection_string = ""
self._connection_settings = connectionSettings
BaseProvider.__init__(self, self._log, connectionSettings)
def reserve_next_batch_number(self, workflow, user):
batch = None
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("""
INSERT INTO WF_BATCH
( WF_ID, WF_USER)
VALUES
({workflow} , {user})
""", {'workflow': workflow, 'user': user})
cmd.execute_non_query()
cmd.command_text('SELECT BATCH_ID FROM (SELECT MAX(DATE_DT) AS DATE_DT, BATCH_ID FROM WF_BATCH GROUP BY BATCH_ID) AS A',{})
link_list = cmd.execute_scalar()
for link in link_list:
batch = link
conn.commit()
conn.close()
return batch
def get_program_details(self, workflow, name):
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("SELECT WF_ID, WF_NAME, WF_FAILURE FROM WF_MASTER_L WHERE WF_NAME = '{workflowname}'", {'workflowname': name})
for row in cmd.execute_reader():
workflow.id, workflow.name, workflow.id = row
conn.commit()
conn.close()
return workflow
def get_program_actions(self, work_flow_id):
tasks = []
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("SELECT WF_ID, WF_STEPS_L.STEP_ID, WF_STEPS_L.ACTION_NAME, WF_ACTIONS_TYPES_L.ACTION_TYPE_NAME, WF_STEPS_L.ACTION_TYPE_ID, WF_STEPS_L.ACTION_TXT FROM WF_STEPS_L JOIN WF_ACTIONS_TYPES_L ON WF_STEPS_L.ACTION_TYPE_ID = WF_ACTIONS_TYPES_L.ACTION_TYPE_ID WHERE WF_ID = '{id}'",{'id': work_flow_id})
for row in cmd.execute_reader():
task = Task()
wfid, task.number, task.action.name, task.action.type.name, task.action.type.id, task.action.text = row
tasks.append(task)
conn.commit()
conn.close()
return tasks | mit | 8,614,728,280,032,674,000 | 47.446154 | 326 | 0.56385 | false |
arizvisa/syringe | lib/ptypes/__init__.py | 1 | 1981 | from . import ptype, parray, pstruct, pbinary, pint, pfloat, pstr
from . import utils, dynamic, provider
dyn, prov = dynamic, provider
__all__ = 'ptype','parray','pstruct','pbinary','pint','pfloat','pstr','dynamic','dyn','prov'
from . import config
Config = config.defaults
## globally changing the ptype provider
def setsource(provider):
'''Sets the default ptype provider to the one specified'''
provider.seek,provider.consume,provider.store
ptype.source = provider
return provider
## globally changing the byte order
def setbyteorder(endianness):
'''
Sets the integer byte order to the endianness specified for all non-binary types.
Can be either config.byteorder.bigendian or config.byteorder.littleendian.
'''
[ module.setbyteorder(endianness) for module in (ptype,pint,pfloat) ]
## some things people people might find useful
#from ptype import debug, debugrecurse
from .ptype import istype, iscontainer, isinstance, undefined
from .provider import file, memory
from .utils import hexdump
if __name__ == '__main__':
import builtins, ptypes
class a(ptypes.ptype.type):
length = 4
data = b'\x41\x41\x41\x41'
import ctypes
b = ctypes.cast(ctypes.pointer(ctypes.c_buffer(data,4)), ctypes.c_void_p)
ptypes.setsource(ptypes.prov.memory())
print('ptype-static-memory', builtins.isinstance(ptypes.ptype.source, ptypes.prov.memory))
print('ptype-instance-memory', builtins.isinstance(ptypes.ptype.type().source, ptypes.prov.memory))
c = a(offset=b.value).l
print('type-instance-memory', c.serialize() == data)
ptypes.setsource(ptypes.prov.empty())
print('ptype-static-empty', builtins.isinstance(ptypes.ptype.source, ptypes.prov.empty))
print('ptype-instance-empty', builtins.isinstance(ptypes.ptype.type().source, ptypes.prov.empty))
c = a(offset=b.value).l
print('type-instance-empty', c.serialize() == b'\x00\x00\x00\x00')
ptypes.setsource(ptypes.prov.memory())
| bsd-2-clause | 1,039,058,835,156,158,200 | 36.377358 | 103 | 0.714286 | false |
YannickB/odoo-hosting | clouder_template_shinken/template.py | 1 | 11712 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from odoo import models, api, modules
except ImportError:
from openerp import models, api, modules
class ClouderNode(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.node'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/hosts/' + self.fulldomain + '.cfg'
class ClouderContainer(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.service'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/services/' + self.fullname + '.cfg'
@api.multi
def deploy_shinken_node(self, nrpe):
"""
Deploy the configuration file to watch the node performances.
"""
node = nrpe.node_id
self.send(
modules.get_module_path('clouder_template_shinken') +
'/res/node-shinken.config', node.shinken_configfile,
username='shinken')
self.execute([
'sed', '-i',
'"s/IP/' + node.ip + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/NAME/' + node.name + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/SSHPORT/' + str(node.ssh_port) + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/NRPEPORT/' + nrpe.ports['nrpe']['hostport'] + '/g"',
node.shinken_configfile], username='shinken')
self.execute(['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_shinken_node(self, nrpe):
"""
Remove the configuration file.
"""
self.execute(['rm', nrpe.node_id.shinken_configfile],
username='shinken')
self.execute(['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def deploy_post(self):
"""
Add the general configuration files.
"""
super(ClouderContainer, self).deploy_post()
if self.application_id.type_id.name == 'shinken' \
and self.application_id.check_tags(['data']):
self.send(
modules.get_module_path('clouder_template_shinken') +
'/res/general-shinken.config',
'/usr/local/shinken/etc/services/clouder.cfg',
username='shinken')
self.execute([
'sed', '-i', '"s/SYSADMIN_MAIL/' +
self.email_sysadmin + '/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.execute(
['rm', '/usr/local/shinken/etc/hosts/localhost.cfg'],
username='shinken')
class ClouderBase(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.base'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/services/' + self.fullname + '.cfg'
@api.multi
def deploy_post(self):
"""
Update odoo configuration.
"""
res = super(ClouderBase, self).deploy_post()
if self.application_id.type_id.name == 'shinken':
self.service_id.execute([
'sed', '-i', '"s/SHINKENDOMAIN/' +
self.fulldomain + '/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.service_id.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
return res
@api.multi
def purge_post(self):
"""
Remove filestore.
"""
res = super(ClouderBase, self).purge_post()
if self.application_id.type_id.name == 'shinken':
self.service_id.execute([
'sed', '-i', '"s/' + self.fulldomain + '/SHINKENDOMAIN/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.service_id.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
return res
class ClouderContainerLink(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.service.link'
@api.multi
def deploy_link(self):
"""
Deploy the configuration file to watch the service.
"""
super(ClouderContainerLink, self).deploy_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
if self.service_id.auto_backup:
config_file = 'service-shinken'
self.target.send(
modules.get_module_path('clouder_template_shinken') +
'/res/' + config_file + '.config',
self.service_id.shinken_configfile, username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.service_id.backup_ids[0].node_id.ip + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/PORT/' +
self.service_id.backup_ids[0].ports['nrpe']['hostport'] +
'/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/METHOD/' +
self.service_id.backup_ids[0].backup_method + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/TYPE/service/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.service_id.backup_ids[0].node_id.ip + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/UNIQUE_NAME/' + self.service_id.fullname + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/HOST/' + self.service_id.node_id.name + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_link(self):
"""
Remove the configuration file.
"""
super(ClouderContainerLink, self).purge_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
self.target.execute(['rm', self.service_id.shinken_configfile],
username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
class ClouderBaseLink(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.base.link'
@api.multi
def deploy_link(self):
"""
Deploy the configuration file to watch the base.
"""
super(ClouderBaseLink, self).deploy_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
config_file = 'base-shinken'
if not self.base_id.auto_backup:
config_file = 'base-shinken-no-backup'
self.target.send(
modules.get_module_path('clouder_template_shinken') +
'/res/' + config_file + '.config',
self.base_id.shinken_configfile, username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.base_id.backup_ids[0].node_id.ip + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/PORT/' +
self.base_id.backup_ids[0].ports['nrpe']['hostport'] + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/METHOD/' +
self.base_id.backup_ids[0].backup_method + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/TYPE/base/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/UNIQUE_NAME/' + self.base_id.fullname + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/DATABASES/' + self.base_id.db_names_comma + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/BASE/' + self.base_id.name + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/DOMAIN/' + self.base_id.fulldomain + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_link(self):
"""
Remove the configuration file.
"""
super(ClouderBaseLink, self).purge_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
self.target.execute(['rm', self.base_id.shinken_configfile],
username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
| agpl-3.0 | -3,848,946,444,569,847,300 | 36.538462 | 79 | 0.518272 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdeui/KApplication.py | 1 | 2567 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdeui.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KApplication(__PyQt4_QtGui.QApplication):
# no doc
def checkRecoverFile(self, *args, **kwargs): # real signature unknown
pass
def clearStartupId(self, *args, **kwargs): # real signature unknown
pass
def commitData(self, *args, **kwargs): # real signature unknown
pass
def disableSessionManagement(self, *args, **kwargs): # real signature unknown
pass
def enableSessionManagement(self, *args, **kwargs): # real signature unknown
pass
def installX11EventFilter(self, *args, **kwargs): # real signature unknown
pass
def kApplication(self, *args, **kwargs): # real signature unknown
pass
def notify(self, *args, **kwargs): # real signature unknown
pass
def quit(self, *args, **kwargs): # real signature unknown
pass
def removeX11EventFilter(self, *args, **kwargs): # real signature unknown
pass
def reparseConfiguration(self, *args, **kwargs): # real signature unknown
pass
def saveState(self, *args, **kwargs): # real signature unknown
pass
def saveYourself(self, *args, **kwargs): # real signature unknown
pass
def sessionConfig(self, *args, **kwargs): # real signature unknown
pass
def sessionSaving(self, *args, **kwargs): # real signature unknown
pass
def setStartupId(self, *args, **kwargs): # real signature unknown
pass
def setSynchronizeClipboard(self, *args, **kwargs): # real signature unknown
pass
def setTopWidget(self, *args, **kwargs): # real signature unknown
pass
def startupId(self, *args, **kwargs): # real signature unknown
pass
def tempSaveName(self, *args, **kwargs): # real signature unknown
pass
def updateRemoteUserTimestamp(self, *args, **kwargs): # real signature unknown
pass
def updateUserTimestamp(self, *args, **kwargs): # real signature unknown
pass
def userTimestamp(self, *args, **kwargs): # real signature unknown
pass
def xioErrhandler(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
loadedByKdeinit = None # (!) real value is ''
| gpl-2.0 | 7,312,801,345,407,079,000 | 26.602151 | 82 | 0.653292 | false |
ajylee/gpaw-rtxs | gpaw/test/nonselfconsistent.py | 1 | 1069 | from ase import Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
a = 7.5 * Bohr
n = 16
atoms = Atoms('He', [(0.0, 0.0, 0.0)], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='PBE')
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference('revPBE')
calc.set(xc='revPBE')
e2 = atoms.get_potential_energy()
niter2 = calc.get_number_of_iterations()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('PBE')
print e1ref + e1 + de12 - (e2ref + e2)
print e1ref + e1 - (e2ref + e2 + de21)
print de12, de21
equal(e1ref + e1 + de12, e2ref + e2, 8e-4)
equal(e1ref + e1, e2ref + e2 + de21, 3e-3)
calc.write('revPBE.gpw')
de21b = GPAW('revPBE.gpw').get_xc_difference('PBE')
equal(de21, de21b, 9e-8)
energy_tolerance = 0.000005
niter_tolerance = 0
equal(e1, -0.07904951, energy_tolerance)
equal(niter1, 16, niter_tolerance)
equal(e2, -0.08147563, energy_tolerance)
equal(niter2, 11, niter_tolerance)
| gpl-3.0 | 1,066,604,541,121,826,800 | 28.694444 | 64 | 0.695042 | false |
agoose77/hivesystem | manual/tetris/tetris-8.py | 1 | 10118 | import copy
import bee
import dragonfly.pandahive
from dragonfly.grid import bgrid
from dragonfly.canvas import box2d
import dragonfly.std, dragonfly.gen, dragonfly.random, dragonfly.logic
blocks = (
bgrid(values=((0, 0), (1, 0), (2, 0), (3, 0))), # I
bgrid(values=((0, 1), (0, 0), (1, 0), (2, 0))), #J
bgrid(values=((0, 0), (1, 0), (2, 0), (2, 1))), #L
bgrid(values=((0, 1), (0, 0), (1, 1), (1, 0))), #O
bgrid(values=((0, 0), (1, 0), (1, 1), (2, 1))), #S
bgrid(values=((0, 0), (1, 0), (1, 1), (2, 0))), #T
bgrid(values=((0, 1), (1, 1), (1, 0), (2, 0))), # Z
)
emptygrid = bgrid(0, 0, 0, 0)
from bee.segments import *
class tetris_init_main(bee.worker):
gridx = variable("int")
parameter(gridx)
gridy = variable("int")
parameter(gridy)
start = antenna("push", "trigger")
outp = output("push", ("object", "bgrid"))
grid = variable(("object", "bgrid"))
t_outp = transistor(("object", "bgrid"))
connect(grid, t_outp)
connect(t_outp, outp)
trig = triggerfunc(t_outp)
@modifier
def m_start(self):
self.grid = bgrid(0, self.gridx - 1, 0, self.gridy - 1)
self.trig()
trigger(start, m_start)
class tetris_control(bee.worker):
maingrid = antenna("pull", ("object", "bgrid"))
blockgrid = antenna("pull", ("object", "bgrid"))
grid1 = buffer("pull", ("object", "bgrid"))
connect(maingrid, grid1)
grid2 = buffer("pull", ("object", "bgrid"))
connect(blockgrid, grid2)
get_grids = triggerfunc(grid1, "input")
trigger(grid1, grid2, "input", "input")
lost = output("push", "trigger")
trig_lost = triggerfunc(lost)
place_init = antenna("push", "trigger")
@modifier
def m_place_init(self):
self.get_grids()
dx = int(self.grid1.maxx / 2) - self.grid2.minx
self.grid2.maxx += dx
self.grid2.minx += dx
dy = self.grid1.maxy - self.grid2.maxy
self.grid2.maxy += dy
self.grid2.miny += dy
if self.grid1.overlap(self.grid2):
self.trig_lost()
trigger(place_init, m_place_init)
dropped = output("push", "trigger")
trig_dropped = triggerfunc(dropped)
move_down = antenna("push", "trigger")
@modifier
def m_move_down(self):
self.get_grids()
block = copy.copy(self.grid2)
block.translate(0, -1)
if block.miny < 0 or self.grid1.overlap(block):
self.grid1.merge(self.grid2)
self.remove_lines()
self.trig_dropped()
else:
self.grid2.translate(0, -1)
trigger(move_down, m_move_down)
def move_sideways(self, direction):
self.get_grids()
block = copy.copy(self.grid2)
block.translate(direction, 0)
if block.minx < 0: return
if block.maxx > self.grid1.maxx: return
if self.grid1.overlap(block): return
self.grid2.translate(direction, 0)
move_left = antenna("push", "trigger")
@modifier
def m_move_left(self):
self.move_sideways(-1)
trigger(move_left, m_move_left)
move_right = antenna("push", "trigger")
@modifier
def m_move_right(self):
self.move_sideways(1)
trigger(move_right, m_move_right)
def rotate(self, times):
self.get_grids()
block = copy.copy(self.grid2)
block.rotate(times)
if block.minx < 0:
block.translate(-block.minx, 0)
if block.maxx > self.grid1.maxx:
block.translate(self.grid1.maxx - block.maxx, 0)
if self.grid1.overlap(block): return
self.grid2.set(block)
rotate_cw = antenna("push", "trigger")
@modifier
def m_rotate_cw(self):
self.rotate(3)
trigger(rotate_cw, m_rotate_cw)
rotate_ccw = antenna("push", "trigger")
@modifier
def m_rotate_ccw(self):
self.rotate(1)
trigger(rotate_ccw, m_rotate_ccw)
drop = antenna("push", "trigger")
@modifier
def m_drop(self):
self.get_grids()
block = copy.copy(self.grid2)
while block.miny >= 0 and not self.grid1.overlap(block):
block.translate(0, -1)
block.translate(0, 1)
self.grid1.merge(block)
self.remove_lines()
self.trig_dropped()
trigger(drop, m_drop)
def remove_lines(self):
values = self.grid1.get_values()
removed = 0
y = 0
while y < self.grid1.maxy + 1:
line = [v for v in values if v[1] == y]
if len(line) == self.grid1.maxx + 1:
values = [v for v in values if v[1] != y]
values = [(v[0], v[1] - 1) if v[1] > y else v for v in values]
removed += 1
else:
y += 1
if removed: self.grid1.set_values(values)
from bee import antenna, output, connect, attribute, configure, parameter, get_parameter
class tetris_select_block(bee.frame):
blocks = parameter("object")
blocks_ = get_parameter("blocks")
w_blocks = dragonfly.gen.gentuple2(blocks_)
sel = dragonfly.random.choice()
connect(w_blocks, sel)
do_select = dragonfly.gen.transistor()
connect(sel, do_select)
chosen = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
chosencontrol = dragonfly.grid.bgridcontrol()
connect(chosen, chosencontrol.grid)
connect(do_select, chosen)
do_select2 = dragonfly.gen.transistor()
connect(chosen, do_select2)
uptofour = dragonfly.std.variable(("int", "int"))((0, 4))
randint = dragonfly.random.randint()
connect(uptofour, randint)
rotate = dragonfly.std.transistor("int")()
connect(randint, rotate)
connect(rotate, chosencontrol.rotate)
trigger = dragonfly.std.pushconnector("trigger")()
connect(trigger, do_select)
connect(trigger, rotate)
connect(trigger, do_select2)
select = antenna(trigger.inp)
selected = output(do_select2.outp)
class tetris_draw(bee.frame):
mainarea_ = attribute("parent", "mainarea")
mainarea_id_ = attribute("parent", "mainarea_id")
drawgrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
drawgridcontrol = dragonfly.grid.bgridcontrol()
connect(drawgrid, drawgridcontrol.grid)
w_draw = dragonfly.canvas.draw3(("object", "bgrid"))(mainarea_id_)
do_draw = dragonfly.std.transistor(("object", "bgrid"))()
connect(drawgrid, do_draw)
connect(do_draw, w_draw)
update = dragonfly.canvas.update3(mainarea_id_)
maingridcontrol = dragonfly.grid.bgridcontrol()
copy_maingrid = dragonfly.std.transistor(("object", "bgrid"))()
connect(maingridcontrol.copy, copy_maingrid)
connect(copy_maingrid, drawgridcontrol.set)
t_blockgrid = dragonfly.std.transistor(("object", "bgrid"))()
connect(t_blockgrid, drawgridcontrol.merge)
trigger = dragonfly.std.pushconnector("trigger")()
connect(trigger, copy_maingrid)
connect(trigger, t_blockgrid)
connect(trigger, update)
start = antenna(do_draw.trig)
maingrid = antenna(maingridcontrol.grid)
blockgrid = antenna(t_blockgrid.inp)
draw = antenna(trigger.inp)
class parameters(object):
def __init__(self, **args):
for a in args: setattr(self, a, args[a])
class main(dragonfly.pandahive.pandahive):
blocks = blocks
gridx = 10
gridy = 20
mainarea = box2d(100, 150, 225, 375)
mainarea_id = "main"
mainarea_parameters = parameters(color=(0.5, 0.5, 0.5, 0))
scorearea = box2d(170, 100, 80, 40)
scorearea_id = "score"
canvas = dragonfly.pandahive.pandacanvas()
blocks_ = attribute("blocks")
gridx_ = attribute("gridx")
gridy_ = attribute("gridy")
mainarea_ = attribute("mainarea")
mainarea_parameters_ = attribute("mainarea_parameters")
mainarea_id_ = attribute("mainarea_id")
scorearea_ = attribute("scorearea")
scorearea_id_ = attribute("scorearea_id")
c0 = configure("canvas") # must have a lower-alphabet name than "canvas"
c0.reserve(mainarea_id_, ("object", "bgrid"), box=mainarea_, parameters=mainarea_parameters_)
maingrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
maingridcontrol = dragonfly.grid.bgridcontrol()
connect(maingrid, maingridcontrol.grid)
blockgrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
blockgridcontrol = dragonfly.grid.bgridcontrol()
connect(blockgrid, blockgridcontrol.grid)
select_block = tetris_select_block(blocks=blocks_)
connect(select_block, blockgridcontrol.set)
init_main = tetris_init_main(gridx_, gridy_)
connect(init_main, maingridcontrol.set)
draw = tetris_draw()
connect(maingrid, draw.maingrid)
connect(blockgrid, draw.blockgrid)
control = tetris_control()
connect(maingrid, control.maingrid)
connect(blockgrid, control.blockgrid)
start = dragonfly.sys.startsensor()
connect(start, select_block)
connect(start, init_main.start)
connect(start, control.place_init)
connect(start, draw.start)
connect(start, draw.draw)
period = dragonfly.std.variable("float")(0.3)
cycle = dragonfly.time.cycle()
connect(period, cycle)
connect(cycle, control.move_down)
connect(cycle, draw.draw)
connect(control.dropped, select_block)
connect(control.dropped, control.place_init)
connect(control.lost, "exitactuator")
k_left = dragonfly.io.keyboardsensor_trigger("LEFT")
connect(k_left, control.move_left)
connect(k_left, draw.draw)
k_right = dragonfly.io.keyboardsensor_trigger("RIGHT")
connect(k_right, control.move_right)
connect(k_right, draw.draw)
k_return = dragonfly.io.keyboardsensor_trigger("RETURN")
connect(k_return, control.rotate_cw)
connect(k_return, draw.draw)
k_space = dragonfly.io.keyboardsensor_trigger("SPACE")
connect(k_space, control.rotate_ccw)
connect(k_space, draw.draw)
k_down = dragonfly.io.keyboardsensor_trigger("DOWN")
connect(k_down, control.drop)
connect(k_down, draw.draw)
raiser = bee.raiser()
connect("evexc", raiser)
m = main().getinstance()
m.build("main")
m.place()
m.close()
m.init()
m.run()
| bsd-2-clause | 6,362,046,269,218,194,000 | 29.113095 | 97 | 0.627496 | false |
jmartinezchaine/OpenERP | openerp/addons/nan_account_extension/partner.py | 1 | 6259 | # -*- encoding: latin-1 -*-
##############################################################################
#
# Copyright (c) 2010 NaN Projectes de Programari Lliure, S.L. All Rights Reserved.
# http://www.NaN-tic.com
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv
from osv import fields
from tools.translate import _
class res_partner(osv.osv):
_inherit = 'res.partner'
def update_account(self, cr, uid, partner_id, account_type, context, force_checked=None):
if account_type not in ('receivable', 'payable'):
return
company = self.pool.get('res.users').browse(cr, uid, uid, context).company_id
parent_account = getattr(company, 'parent_%s_account_id' % account_type )
if not parent_account:
return
partner = self.browse(cr, uid, partner_id, context)
if account_type == 'receivable':
checked = partner.customer
else:
checked = partner.supplier
partner_account = getattr(partner, 'property_account_%s' % account_type )
if not force_checked is None:
checked = force_checked
if partner_account:
if checked:
# If account already exists, just check if we need to update account name.
if partner_account.name != partner.name:
# We will only update account name if no other partner is using the same account.
value = 'account.account,%d' % partner_account.id
partners = self.pool.get('ir.property').search(cr, uid, [('value_reference','=',value)], context=context)
if len(partners) == 1:
self.pool.get('account.account').write(cr, uid, [partner_account.id], {
'name': partner.name,
}, context)
return
# If it's not possible to unlink the account we will rollback this change
# so the property remains the same. Note that we cannot try to unlink first,
# because in this case it would always fail because of the fact that it's set
# as the account in the partner.
cr.execute('SAVEPOINT remove_account')
self.write(cr, uid, [partner_id], {
'property_account_%s' % account_type : False,
}, context)
try:
# Unlink may raise an exception if the account is already set in another partner
# or if it has account moves.
self.pool.get('account.account').unlink(cr, uid, [partner_account.id], context)
except osv.except_osv:
cr.execute('ROLLBACK TO SAVEPOINT remove_account')
return
cr.execute('RELEASE SAVEPOINT remove_account')
return
if not checked:
return
partner_ref = partner.ref or ''
digits = company.account_digits or 0
# Si no ingreso una referencia se obtiene el siguiente codigo desde una secuencia
code = parent_account.code + '0'*(digits - len(parent_account.code + partner_ref)) + partner_ref
account_id = self.pool.get('account.account').search(cr, uid, [('code','=',code)], context=context)
if account_id:
account_id = account_id[0]
else:
tipo_cuenta = 15 # 15 son Terceros a Cobrar
if account_type != 'receivable':
tipo_cuenta = 16
account_id = self.pool.get('account.account').create(cr, uid, {
'name': partner.name,
'code': code,
'parent_id': parent_account.id,
'user_type': tipo_cuenta,
'reconcile': True,
'type': account_type,
'currency_id' : 2,
}, context)
self.write(cr, uid, [partner_id], {
'property_account_%s' % account_type : account_id,
}, context)
def create(self, cr, uid, vals, context=None):
id = super(res_partner, self).create(cr, uid, vals, context)
self.update_account(cr, uid, id, 'receivable', context)
self.update_account(cr, uid, id, 'payable', context)
return id
def write(self, cr, uid, ids, vals, context=None):
result = super(res_partner, self).write(cr, uid, ids, vals, context)
if 'customer' in vals or 'name' in vals:
for id in ids:
self.update_account(cr, uid, id, 'receivable', context)
if 'supplier' in vals or 'name' in vals:
for id in ids:
self.update_account(cr, uid, id, 'payable', context)
return result
def unlink(self, cr, uid, ids, context=None):
for id in ids:
self.update_account(cr, uid, id, 'receivable', context, force_checked = False)
self.update_account(cr, uid, id, 'payable', context, force_checked = False)
return super(res_partner, self).unlink(cr, uid, ids, context)
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,422,568,010,201,253,000 | 42.465278 | 125 | 0.588752 | false |
wavefrontHQ/python-client | test/test_user_group.py | 1 | 1242 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.user_group import UserGroup # noqa: E501
from wavefront_api_client.rest import ApiException
class TestUserGroup(unittest.TestCase):
"""UserGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserGroup(self):
"""Test UserGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.user_group.UserGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,258,667,494,551,933,000 | 30.05 | 409 | 0.708535 | false |
googleapis/python-bigquery-datatransfer | samples/snippets/noxfile_config.py | 1 | 1607 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be inported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT",
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
| apache-2.0 | -1,555,643,134,543,847,400 | 41.289474 | 90 | 0.729932 | false |
nicolaiarocci/eve-swagger | eve_swagger/definitions.py | 1 | 7881 | # -*- coding: utf-8 -*-
"""
eve-swagger.definitions
~~~~~~~~~~~~~~~~~~~~~~~
swagger.io extension for Eve-powered REST APIs.
:copyright: (c) 2016 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app as app
from eve_swagger import OrderedDict
def definitions():
definitions = OrderedDict()
dr_sources = {}
for rd in app.config['DOMAIN'].values():
dr_sources.update(_get_dr_sources(rd['schema']))
for rd in app.config['DOMAIN'].values():
if rd.get('disable_documentation'):
continue
title = rd['item_title']
definitions[title] = _object(rd, dr_sources)
if 'description' in rd:
definitions[title]['description'] = rd['description']
if 'example' in rd:
definitions[title]['example'] = rd['example']
# add data_relation source fields to #/definitions/
definitions.update(dr_sources)
return definitions
def _object(rd, dr_sources):
props = {}
required = []
for field, rules in rd['schema'].items():
if rules.get('required') is True:
required.append(field)
def_name = rd['item_title'] + '_' + field
props[field] = _field_props(rules, dr_sources, def_name)
if def_name in dr_sources:
# the current field is a source of a data_relation
# replace None in dr_sources with the field properties
dr_sources[def_name] = OrderedDict(props[field])
props[field] = {'$ref': '#/definitions/{0}'.format(def_name)}
if 'data_relation' in rules:
# the current field is a copy of another field
dr = rules['data_relation']
if dr['resource'] not in app.config['DOMAIN']:
# source of data_relation does not exist
continue
title = app.config['DOMAIN'][dr['resource']]['item_title']
source_def_name = title + '_' + dr['field']
props[field] = {
'$ref': '#/definitions/{0}'.format(source_def_name)
}
field_def = {}
field_def['type'] = 'object'
field_def['properties'] = props
if len(required):
field_def['required'] = required
return field_def
def _field_props(rules, dr_sources, prefix):
resp = {}
map = {
'dict': ('object',),
'list': ('array',),
'objectid': ('string', 'objectid'),
'datetime': ('string', 'date-time'),
'float': ('number', 'float'),
'point': ('geometry', 'Point'),
'multipoint': ('geometry', 'MultiPoint'),
'linestring': ('geometry', 'LineString'),
'multilinestring': ('geometry', 'MultiLineString'),
'polygon': ('geometry', 'Polygon'),
'multipolygon': ('geometry', 'MultiPolygon'),
'geometrycollection': ('geometry', 'GeometryCollection')
}
eve_type = rules.get('type')
if eve_type is None:
return resp
if 'description' in rules:
resp['description'] = rules['description']
if 'example' in rules:
resp['example'] = rules['example']
if 'allowed' in rules:
resp['enum'] = rules['allowed']
if 'default' in rules:
resp['default'] = rules['default']
if 'minlength' in rules:
if eve_type == 'string':
resp['minLength'] = rules['minlength']
elif eve_type == 'list':
resp['minItems'] = rules['minlength']
if 'maxlength' in rules:
if eve_type == 'string':
resp['maxLength'] = rules['maxlength']
elif eve_type == 'list':
resp['maxItems'] = rules['maxlength']
if 'min' in rules:
if eve_type in ['number', 'integer', 'float']:
resp['minimum'] = rules['min']
if 'max' in rules:
if eve_type in ['number', 'integer', 'float']:
resp['maximum'] = rules['max']
if 'readonly' in rules:
resp['readOnly'] = rules['readonly']
if 'regex' in rules:
resp['pattern'] = rules['regex']
if 'nullable' in rules:
resp['nullable'] = rules['nullable']
type = map.get(eve_type, (eve_type,))
resp['type'] = type[0]
if type[0] == 'object':
# we don't support 'valueschema' rule
if 'schema' in rules:
# set prefix as item_title to avoid name collisions of nested
# fields with higher up fields
pseudo_rd = {'item_title': prefix, 'schema': rules['schema']}
resp.update(_object(pseudo_rd, dr_sources))
elif type[0] == 'array':
type = 'array'
if 'schema' in rules:
resp['items'] = _field_props(rules['schema'], dr_sources, prefix)
else:
# 'items' is mandatory for swagger, we assume it's a list of
# strings
resp['items'] = {'type': 'string'}
elif type[0] == 'geometry':
# create a pseudo object
pseudo_rd = {
'item_title': prefix, 'schema': {
'type': {
'type': 'string',
'allowed': [type[1]],
'required': True
},
'coordinates': {
'type': 'list',
'required': True
}
}
}
point2D_rd = {
'schema': {
'type': 'integer'
},
'minlength': 2,
'maxlength': 2
}
array_rd = {
'schema': {
'type': 'list'
}
}
s = 'schema'
c = 'coordinates'
if type[1] == 'Point':
# 1st level array of point 2D
pseudo_rd[s][c].update(point2D_rd)
elif type[1] in ['LineString', 'MultiPoint']:
# 2nd level array of point 2D
pseudo_rd[s][c].update(array_rd)
pseudo_rd[s][c][s].update(point2D_rd)
elif type[1] in ['MultiLineString', 'Polygon']:
# 3rd level array of point 2D
pseudo_rd[s][c].update(array_rd)
pseudo_rd[s][c][s].update(array_rd)
pseudo_rd[s][c][s][s].update(point2D_rd)
elif type[1] in ['MultiPolygon']:
# 4th level array of point 2D
pseudo_rd[s][c].update(array_rd)
pseudo_rd[s][c][s].update(array_rd)
pseudo_rd[s][c][s][s].update(array_rd)
pseudo_rd[s][c][s][s][s].update(point2D_rd)
elif type[1] in ['GeometryCollection']:
# GeometryCollection require external defs
pass
resp.update(_object(pseudo_rd, dr_sources))
else:
try:
resp['format'] = type[1]
except IndexError:
pass
return resp
def _get_dr_sources(schema):
'''
Returns a dict of names for sources of data_relations:
e.g. the name field in the user resource relates to (is a copy of) the
name field of the person resource
=> return {'person_name': None}
'''
dr_sources = {}
for rules in schema.values():
if 'data_relation' in rules:
dr = rules['data_relation']
if dr['resource'] not in app.config['DOMAIN']:
# source of data_relation does not exist
continue
title = app.config['DOMAIN'][dr['resource']]['item_title']
def_name = title + '_' + dr['field']
dr_sources[def_name] = None
elif 'schema' in rules:
if rules.get('type') in 'dict':
# recursively handle data_relations in subdicts
dr_sources.update(_get_dr_sources(rules['schema']))
elif rules.get('type') == 'list' and 'schema' in rules['schema']:
# recursively handle data_relations in lists
dr_sources.update(_get_dr_sources(rules['schema']['schema']))
return dr_sources
| bsd-3-clause | 2,573,368,983,479,785,500 | 31.974895 | 77 | 0.523411 | false |
algorhythms/LeetCode | 623 Add One Row to Tree.py | 1 | 2451 | #!/usr/bin/python3
"""
Given the root of a binary tree, then value v and depth d, you need to add a row
of nodes with value v at the given depth d. The root node is at depth 1.
The adding rule is: given a positive integer depth d, for each NOT null tree
nodes N in depth d-1, create two tree nodes with value v as N's left subtree
root and right subtree root. And N's original left subtree should be the left
subtree of the new left subtree root, its original right subtree should be the
right subtree of the new right subtree root. If depth d is 1 that means there is
no depth d-1 at all, then create a tree node with value v as the new root of the
whole original tree, and the original tree is the new root's left subtree.
Example 1:
Input:
A binary tree as following:
4
/ \
2 6
/ \ /
3 1 5
v = 1
d = 2
Output:
4
/ \
1 1
/ \
2 6
/ \ /
3 1 5
Example 2:
Input:
A binary tree as following:
4
/
2
/ \
3 1
v = 1
d = 3
Output:
4
/
2
/ \
1 1
/ \
3 1
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
return self.add(root, v, d, 1, "left")
def add(self, node, v, d, cur_d, child) -> TreeNode:
# use the return value for parent's reference
if cur_d == d:
new = TreeNode(v)
setattr(new, child, node)
return new
if node:
node.left = self.add(node.left, v, d, cur_d + 1, "left")
node.right = self.add(node.right, v, d, cur_d + 1, "right")
return node
class Solution2:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
if d == 1:
node = TreeNode(v)
node.left = root
return node
self.add(self, root, v, d, 1)
return root
def add(self, node, v, d, cur_d) -> None:
if not node:
return
if cur_d + 1 == d:
left = node.left
right = node.right
node.left = TreeNode(v)
node.left.left = left
node.right = TreeNode(v)
node.right.right = right
self.add(node.left, v, d, cur_d + 1)
self.add(node.right, v, d, cur_d + 1)
| mit | 1,620,680,028,994,446,600 | 21.694444 | 80 | 0.550796 | false |
janusnic/ecommerce | ecommerce/extensions/analytics/utils.py | 1 | 2992 | from functools import wraps
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
def is_segment_configured():
"""Returns a Boolean indicating if Segment has been configured for use."""
return bool(settings.SEGMENT_KEY)
def parse_tracking_context(user):
"""Extract user and client IDs from a user's tracking context.
Arguments:
user (User): An instance of the User model.
Returns:
Tuple of strings, user_tracking_id and lms_client_id
"""
tracking_context = user.tracking_context or {}
user_tracking_id = tracking_context.get('lms_user_id')
if user_tracking_id is None:
# Even if we cannot extract a good platform user ID from the context, we can still track the
# event with an arbitrary local user ID. However, we need to disambiguate the ID we choose
# since there's no guarantee it won't collide with a platform user ID that may be tracked
# at some point.
user_tracking_id = 'ecommerce-{}'.format(user.id)
lms_client_id = tracking_context.get('lms_client_id')
return user_tracking_id, lms_client_id
def log_exceptions(msg):
"""Log exceptions (avoiding clutter/indentation).
Exceptions are still raised. This module assumes that signal receivers are
being invoked with `send_robust`, or that callers will otherwise mute
exceptions as needed.
"""
def decorator(func): # pylint: disable=missing-docstring
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
try:
return func(*args, **kwargs)
except: # pylint: disable=bare-except
logger.exception(msg)
raise
return wrapper
return decorator
def audit_log(name, **kwargs):
"""DRY helper used to emit an INFO-level log message.
Messages logged with this function are used to construct an audit trail. Log messages
should be emitted immediately after the event they correspond to has occurred and, if
applicable, after the database has been updated. These log messages use a verbose
key-value pair syntax to make it easier to extract fields when parsing the application's
logs.
This function is variadic, accepting a variable number of keyword arguments.
Arguments:
name (str): The name of the message to log. For example, 'payment_received'.
Keyword Arguments:
Indefinite. Keyword arguments are strung together as comma-separated key-value
pairs ordered alphabetically by key in the resulting log message.
Returns:
None
"""
# Joins sorted keyword argument keys and values with an "=", wraps each value
# in quotes, and separates each pair with a comma and a space.
payload = u', '.join([u'{k}="{v}"'.format(k=k, v=v) for k, v in sorted(kwargs.items())])
message = u'{name}: {payload}'.format(name=name, payload=payload)
logger.info(message)
| agpl-3.0 | -5,083,399,345,460,941,000 | 34.619048 | 100 | 0.680816 | false |
partofthething/home-assistant | tests/components/homeassistant/triggers/test_numeric_state.py | 1 | 57730 | """The tests for numeric state automation."""
from datetime import timedelta
import logging
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
async def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
await async_setup_component(
hass,
"input_number",
{
"input_number": {
"value_3": {"min": 0, "max": 255, "initial": 3},
"value_5": {"min": 0, "max": 255, "initial": 5},
"value_8": {"min": 0, "max": 255, "initial": 8},
"value_10": {"min": 0, "max": 255, "initial": 10},
"value_12": {"min": 0, "max": 255, "initial": 12},
"value_100": {"min": 0, "max": 255, "initial": 100},
}
},
)
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_removal(hass, calls, below):
"""Test the firing with removed entity."""
hass.states.async_set("test.entity", 11)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Entity disappears
hass.states.async_remove("test.entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# Set above 12 so the automation will fire again
hass.states.async_set("test.entity", 12)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_over_to_below(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entities_change_over_to_below(hass, calls, below):
"""Test the firing with changed entities."""
hass.states.async_set("test.entity_1", 11)
hass.states.async_set("test.entity_2", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_below_to_below(hass, calls, below):
"""Test the firing with changed entity."""
context = Context()
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10 so this should fire
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# already below so should not fire again
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert len(calls) == 1
# still below so should not fire again
hass.states.async_set("test.entity", 3)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_below_fires_on_entity_change_to_equal(hass, calls, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not below 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_initial_entity_below(hass, calls, below):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# Do not fire on first update when initial state was already below
hass.states.async_set("test.entity", 8)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_fires_on_initial_entity_above(hass, calls, above):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# Do not fire on first update when initial state was already above
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_above(hass, calls, above):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_unavailable_at_startup(hass, calls):
"""Test the firing with changed entity at startup."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10 and 9 is below
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_above_to_above(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 12 is above 10 so this should fire
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 1
# already above, should not fire again
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_if_not_above_fires_on_entity_change_to_equal(hass, calls, above):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 10 is not above 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_below_above_range(hass, calls, above, below):
"""Test the firing with changed entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_range(hass, calls, above, below):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
"above": above,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(5, 10),
(5, "input_number.value_10"),
("input_number.value_5", 10),
("input_number.value_5", "input_number.value_10"),
),
)
async def test_if_fires_on_entity_change_over_to_below_above_range(
hass, calls, above, below
):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": above,
"above": below,
},
"action": {"service": "test.automation"},
}
},
)
# 4 is below 5 so it should not fire
hass.states.async_set("test.entity", 4)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (100, "input_number.value_100"))
async def test_if_not_fires_if_entity_not_match(hass, calls, below):
"""Test if not fired with non matching entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.another_entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_and_warns_if_below_entity_unknown(hass, caplog, calls):
"""Test if warns with unknown below entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": "input_number.unknown",
},
"action": {"service": "test.automation"},
}
},
)
caplog.clear()
caplog.set_level(logging.WARNING)
hass.states.async_set("test.entity", 1)
await hass.async_block_till_done()
assert len(calls) == 0
assert len(caplog.record_tuples) == 1
assert caplog.record_tuples[0][1] == logging.WARNING
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_entity_change_below_with_attribute(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", 11, {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_not_below_with_attribute(
hass, calls, below
):
"""Test attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", 11, {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_fires_on_attribute_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 9})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_attribute_change_with_attribute_not_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_attribute_below(hass, calls, below):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "9", {"test_attribute": 11})
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_if_not_fires_on_entity_change_with_not_attribute_below(
hass, calls, below
):
"""Test attributes change."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10, entity state value should not be tested
hass.states.async_set("test.entity", "entity")
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_fires_on_attr_change_with_attribute_below_and_multiple_attr(
hass, calls, below
):
"""Test attributes change."""
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 9, "not_test_attribute": 11}
)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10, "input_number.value_10"))
async def test_template_list(hass, calls, below):
"""Test template list."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] }}",
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("below", (10.0, "input_number.value_10"))
async def test_template_string(hass, calls, below):
"""Test template string."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute | multiply(10) }}",
"below": below,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"below",
"above",
"from_state.state",
"to_state.state",
)
)
},
},
}
},
)
hass.states.async_set("test.entity", "test state 1", {"test_attribute": "1.2"})
await hass.async_block_till_done()
hass.states.async_set("test.entity", "test state 2", {"test_attribute": "0.9"})
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"]
== f"numeric_state - test.entity - {below} - None - test state 1 - test state 2"
)
async def test_not_fires_on_attr_change_with_attr_not_below_multiple_attr(hass, calls):
"""Test if not fired changed attributes."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute }}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is not below 10
hass.states.async_set(
"test.entity", "entity", {"test_attribute": 11, "not_test_attribute": 9}
)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_action(hass, calls, above, below):
"""Test if action."""
entity_id = "domain.test_entity"
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "numeric_state",
"entity_id": entity_id,
"above": above,
"below": below,
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(entity_id, 10)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 8)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(entity_id, 9)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fails_setup_bad_for(hass, calls, above, below):
"""Test for setup failure for bad for."""
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"invalid": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
async def test_if_fails_setup_for_without_above_below(hass, calls):
"""Test for setup failures for missing above or below."""
with assert_setup_component(0, automation.DOMAIN):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"for": {"seconds": 5},
},
"action": {"service": "homeassistant.turn_on"},
}
},
)
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for not firing on entity change with for."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
hass.states.async_set("test.entity", 15)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above, below
):
"""Test for not firing on entities change with for after stop."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_1", 15)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
hass.states.async_set("test.entity_1", 9)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for_attribute_change(
hass, calls, above, below
):
"""Test for firing on entity change with for and attribute change."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity", 9, attributes={"mock_attr": "attr_change"})
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=4)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entity_change_with_for(hass, calls, above, below):
"""Test for firing on entity change with for."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (10, "input_number.value_10"))
async def test_wait_template_with_trigger(hass, calls, above):
"""Test using wait template with 'trigger.entity_id'."""
hass.states.async_set("test.entity", "0")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
},
"action": [
{"wait_template": "{{ states(trigger.entity_id) | int < 10 }}"},
{
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "entity_id", "to_state.state")
)
},
},
],
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "12")
hass.states.async_set("test.entity", "8")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "numeric_state - test.entity - 12"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_no_overlap(hass, calls, above, below):
"""Test for firing on entities change with no overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=10)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap(hass, calls, above, below):
"""Test for firing on entities change with overlap."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2"
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_1(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": {"seconds": "{{ 5 }}"},
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_2(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_change_with_for_template_3(hass, calls, above, below):
"""Test for firing on change with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "00:00:{{ 5 }}",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_error_with_for_template(hass, calls):
"""Test for not firing on error with for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 100,
"for": "00:00:05",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set("test.entity", 101)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
hass.states.async_set("test.entity", "unavailable")
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
hass.states.async_set("test.entity", 101)
await hass.async_block_till_done()
assert len(calls) == 0
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_invalid_for_template(hass, calls, above, below):
"""Test for invalid for template."""
hass.states.async_set("test.entity", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"below": below,
"for": "{{ five }}",
},
"action": {"service": "test.automation"},
}
},
)
with patch.object(numeric_state_trigger, "_LOGGER") as mock_logger:
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert mock_logger.error.called
@pytest.mark.parametrize(
"above, below",
(
(8, 12),
(8, "input_number.value_12"),
("input_number.value_8", 12),
("input_number.value_8", "input_number.value_12"),
),
)
async def test_if_fires_on_entities_change_overlap_for_template(
hass, calls, above, below
):
"""Test for firing on entities change with overlap and for template."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
mock_utcnow.return_value += timedelta(seconds=5)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "test.entity_2 - 0:00:10"
def test_below_above():
"""Test above cannot be above below."""
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{"platform": "numeric_state", "above": 1200, "below": 1000}
)
def test_schema_input_number():
"""Test input_number only is accepted for above/below."""
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{
"platform": "numeric_state",
"above": "input_datetime.some_input",
"below": 1000,
}
)
with pytest.raises(vol.Invalid):
numeric_state_trigger.TRIGGER_SCHEMA(
{
"platform": "numeric_state",
"below": "input_datetime.some_input",
"above": 1200,
}
)
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_fires_on_entity_change_with_both_filters(
hass, calls, above
):
"""Test for firing if both filters are match attribute."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize("above", (3, "input_number.value_3"))
async def test_attribute_if_not_fires_on_entities_change_with_for_after_stop(
hass, calls, above
):
"""Test for not firing on entity change with for after stop trigger."""
hass.states.async_set("test.entity", "bla", {"test-measurement": 1})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": above,
"attribute": "test-measurement",
"for": 5,
},
"action": {"service": "test.automation"},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("test.entity", "bla", {"test-measurement": 4})
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
@pytest.mark.parametrize(
"above, below",
((8, 12),),
)
async def test_variables_priority(hass, calls, above, below):
"""Test an externally defined trigger variable is overridden."""
hass.states.async_set("test.entity_1", 0)
hass.states.async_set("test.entity_2", 0)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"trigger": "illegal"},
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"above": above,
"below": below,
"for": '{{ 5 if trigger.entity_id == "test.entity_1"'
" else 10 }}",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.entity_id }} - {{ trigger.for }}"
},
},
}
},
)
await hass.async_block_till_done()
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 15)
await hass.async_block_till_done()
mock_utcnow.return_value += timedelta(seconds=1)
async_fire_time_changed(hass, mock_utcnow.return_value)
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 0
mock_utcnow.return_value += timedelta(seconds=3)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "test.entity_1 - 0:00:05"
@pytest.mark.parametrize("multiplier", (1, 5))
async def test_template_variable(hass, calls, multiplier):
"""Test template variable."""
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 11]})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger_variables": {"multiplier": multiplier},
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"value_template": "{{ state.attributes.test_attribute[2] * multiplier}}",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 3 is below 10
hass.states.async_set("test.entity", "entity", {"test_attribute": [11, 15, 3]})
await hass.async_block_till_done()
if multiplier * 3 < 10:
assert len(calls) == 1
else:
assert len(calls) == 0
| mit | -3,673,311,556,926,365,000 | 31.072222 | 93 | 0.527403 | false |
ministryofjustice/cla_backend | cla_backend/apps/legalaid/management/commands/load_contactforresearchmethods.py | 1 | 1083 | from django.core.management import BaseCommand
from legalaid.models import ContactResearchMethod, PersonalDetails
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
class Command(BaseCommand):
help = "Creates the contact for research methods default entities AND migrates data from contact_for_research_via field"
def handle(self, *args, **options):
for (value, label) in RESEARCH_CONTACT_VIA:
(method, created) = ContactResearchMethod.objects.get_or_create(
method=value, defaults={"reference": uuid.uuid4()}
)
details_qs = PersonalDetails.objects.filter(
contact_for_research_via=value, contact_for_research_methods__isnull=True
)
self.stdout.write(
"Processing {method}...migrating {count} records from contact_for_research_via field".format(
method=value, count=details_qs.count()
)
)
for details in details_qs:
details.contact_for_research_methods.add(method)
| mit | -8,174,029,900,856,867,000 | 44.125 | 124 | 0.649123 | false |
natgry/python_training | test/test_del_group.py | 1 | 1265 | from model.group import Group
import random
def test_delete_group_by_id_with_db_check(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
group = random.choice(old_groups)
app.group.delete_group_by_id(group.id)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == len(new_groups)
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_delete_all_groups(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
app.group.delete_all_groups()
all_groups = db.get_group_list()
assert len(all_groups) == 0
def test_delete_some_group_by_index(app):
if app.group.count() == 0:
app.group.create(Group(name="test"))
old_groups = app.group.get_group_list()
from random import randrange
index = randrange(len(old_groups))
app.group.delete_group_by_index(index)
assert len(old_groups) - 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups[index:index+1] = []
assert old_groups == new_groups
| apache-2.0 | -1,836,625,386,796,996,400 | 31.435897 | 113 | 0.649802 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.