repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
reunition/reunition | reunition/apps/alumni/test_models.py | 1 | 1192 | from django.test import TestCase
from model_mommy import mommy
class PersonTests(TestCase):
def test_display_name(self):
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
)
self.assertEqual(person.display_name, 'Bobbie Smith')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_first_name='Roberta',
)
self.assertEqual(person.display_name, 'Roberta (Bobbie) Smith')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_last_name='Jones',
)
self.assertEqual(person.display_name, 'Bobbie Jones (Smith)')
person = mommy.make(
'alumni.Person',
graduation_first_name='Bobbie',
graduation_last_name='Smith',
current_first_name='Roberta',
current_last_name='Jones',
)
self.assertEqual(person.display_name, 'Roberta Jones (Bobbie Smith)')
| mit | -1,216,111,322,849,305,900 | 29.564103 | 77 | 0.571309 | false |
rulz/django-registration | registration/models.py | 1 | 11801 | from __future__ import unicode_literals
import datetime
import hashlib
import random
import re
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.template import RequestContext, TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.utils import six
from registration.users import UserModel, UserModelString
try:
from django.utils.timezone import now as datetime_now
except ImportError:
datetime_now = datetime.datetime.now
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True, request=None):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
Additionally, if email is sent and ``request`` is supplied,
it will be passed to the email template.
"""
new_user = UserModel().objects.create_user(username, email, password)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site, request)
return new_user
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1 hash, generated from a combination of the ``User``'s
username and a random salt.
"""
salt = hashlib.sha1(six.text_type(random.random()).encode('ascii')).hexdigest()[:5]
salt = salt.encode('ascii')
username = user.username
if isinstance(username, six.text_type):
username = username.encode('utf-8')
activation_key = hashlib.sha1(salt+username).hexdigest()
return self.create(user=user,
activation_key=activation_key)
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
try:
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
user.delete()
profile.delete()
except UserModel().DoesNotExist:
profile.delete()
@python_2_unicode_compatible
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = "ALREADY_ACTIVATED"
#user = models.ForeignKey(UserModelString(), unique=True, verbose_name=_('user'))
user = models.OneToOneField(UserModelString(), verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
days after signup during which a user is allowed to
activate their account); if the result is less than or
equal to the current date, the key has expired and this
method returns ``True``.
"""
expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return (self.activation_key == self.ACTIVATED or
(self.user.date_joined + expiration_date <= datetime_now()))
activation_key_expired.boolean = True
def send_activation_email(self, site, request=None):
"""
Send an activation email to the user associated with this
``RegistrationProfile``.
The activation email will make use of two templates:
``registration/activation_email_subject.txt``
This template will be used for the subject line of the
email. Because it is used as the subject line of an email,
this template's output **must** be only a single line of
text; output longer than one line will be forcibly joined
into only a single line.
``registration/activation_email.txt``
This template will be used for the text body of the email.
``registration/activation_email.html``
This template will be used for the html body of the email.
These templates will each receive the following context
variables:
``user``
The new user account
``activation_key``
The activation key for the new account.
``expiration_days``
The number of days remaining during which the account may
be activated.
``site``
An object representing the site on which the user
registered; depending on whether ``django.contrib.sites``
is installed, this may be an instance of either
``django.contrib.sites.models.Site`` (if the sites
application is installed) or
``django.contrib.sites.models.RequestSite`` (if
not). Consult the documentation for the Django sites
framework for details regarding these objects' interfaces.
``request``
Optional Django's ``HttpRequest`` object from view.
If supplied will be passed to the template for better
flexibility via ``RequestContext``.
"""
ctx_dict = {}
if request is not None:
ctx_dict = RequestContext(request, ctx_dict)
# update ctx_dict after RequestContext is created
# because template context processors
# can overwrite some of the values like user
# if django.contrib.auth.context_processors.auth is used
ctx_dict.update({
'user': self.user,
'activation_key': self.activation_key,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,
'site': site,
})
subject = getattr(settings, 'REGISTRATION_EMAIL_SUBJECT_PREFIX', '') + \
render_to_string('registration/activation_email_subject.txt', ctx_dict)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message_txt = render_to_string('registration/activation_email.txt', ctx_dict)
email_message = EmailMultiAlternatives(subject, message_txt, settings.DEFAULT_FROM_EMAIL, [self.user.email])
try:
message_html = render_to_string('registration/activation_email.html', ctx_dict)
except TemplateDoesNotExist:
message_html = None
if message_html:
email_message.attach_alternative(message_html, 'text/html')
email_message.send()
| bsd-3-clause | -2,108,707,373,669,058,000 | 37.819079 | 116 | 0.641132 | false |
open-synergy/opnsynid-stock-logistics-warehouse | stock_production_operation/models/stock_warehouse.py | 1 | 6557 | # -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, _
from openerp.exceptions import Warning as UserError
class StockWarehouse(models.Model):
_inherit = "stock.warehouse"
production_rm_type_id = fields.Many2one(
string="Raw Material Consumption Type",
comodel_name="stock.picking.type"
)
production_rm_loc_id = fields.Many2one(
string="Raw Material Consumption Location",
comodel_name="stock.location"
)
production_fg_type_id = fields.Many2one(
string="Production Result Type",
comodel_name="stock.picking.type"
)
production_fg_loc_id = fields.Many2one(
string="Production Result Location",
comodel_name="stock.location"
)
@api.multi
def _prepare_production_rm_location(self):
self.ensure_one()
parent_location = self.view_location_id
data = {
"name": _("RM Consumption"),
"location_id": parent_location.id,
"usage": "production",
"active": True
}
return data
@api.multi
def _prepare_production_fg_location(self):
self.ensure_one()
parent_location = self.view_location_id
data = {
"name": _("Production Result"),
"location_id": parent_location.id,
"usage": "production",
"active": True
}
return data
@api.multi
def _prepare_production_rm_sequence(self):
self.ensure_one()
data = {
"name": self.code + " - RM Consumption",
"prefix": self.code + "/RM/",
"padding": 6
}
return data
@api.multi
def _prepare_production_fg_sequence(self):
self.ensure_one()
data = {
"name": self.code + " - Production Result",
"prefix": self.code + "/FG/",
"padding": 6
}
return data
@api.multi
def _prepare_production_rm_type(self):
self.ensure_one()
obj_sequence = self.env['ir.sequence']
src_location = self.lot_stock_id
dest_location = self._get_production_rm_location()
sequence = obj_sequence.create(
self._prepare_production_rm_sequence())
data = {
"name": _("RM Consumption"),
"warehouse_id": self.id,
"sequence_id": sequence.id,
"code": "outgoing",
"default_location_src_id": src_location.id,
"allowed_location_ids": [(6, 0, [src_location.id])],
"default_location_dest_id": dest_location.id,
"allowed_dest_location_ids": [(6, 0, [dest_location.id])],
}
return data
@api.multi
def _prepare_production_fg_type(self):
self.ensure_one()
obj_sequence = self.env['ir.sequence']
dest_location = self.lot_stock_id
src_location = self._get_production_fg_location()
sequence = obj_sequence.create(
self._prepare_production_fg_sequence())
data = {
"name": _("Production Result"),
"warehouse_id": self.id,
"sequence_id": sequence.id,
"code": "incoming",
"default_location_src_id": src_location.id,
"allowed_location_ids": [(6, 0, [src_location.id])],
"default_location_dest_id": dest_location.id,
"allowed_dest_location_ids": [(6, 0, [dest_location.id])],
}
return data
@api.multi
def _get_production_rm_location(self):
self.ensure_one()
if not self.production_rm_loc_id:
raise UserError(_("No RM Consumption location"))
return self.production_rm_loc_id
@api.multi
def _get_production_fg_location(self):
self.ensure_one()
if not self.production_fg_loc_id:
raise UserError(_("No production result location"))
return self.production_fg_loc_id
@api.multi
def _create_production_rm_loc(self):
self.ensure_one()
obj_loc = self.env["stock.location"]
production_rm_loc = obj_loc.create(
self._prepare_production_rm_location())
return production_rm_loc
@api.multi
def _create_production_fg_loc(self):
self.ensure_one()
obj_loc = self.env["stock.location"]
production_fg_loc = obj_loc.create(
self._prepare_production_fg_location())
return production_fg_loc
@api.multi
def _create_production_rm_type(self):
self.ensure_one()
obj_type = self.env["stock.picking.type"]
production_rm_type = obj_type.create(
self._prepare_production_rm_type())
return production_rm_type
@api.multi
def _create_production_fg_type(self):
self.ensure_one()
obj_type = self.env["stock.picking.type"]
production_fg_type = obj_type.create(
self._prepare_production_fg_type())
return production_fg_type
@api.multi
def button_create_production_rm_loc(self):
for wh in self:
production_rm_loc = wh._create_production_rm_loc()
self.production_rm_loc_id = production_rm_loc.id
@api.multi
def button_create_production_fg_loc(self):
for wh in self:
production_fg_loc = wh._create_production_fg_loc()
self.production_fg_loc_id = production_fg_loc.id
@api.multi
def button_create_production_rm_type(self):
for wh in self:
production_rm_type = wh._create_production_rm_type()
self.production_rm_type_id = production_rm_type.id
@api.multi
def button_create_production_fg_type(self):
for wh in self:
production_fg_type = wh._create_production_fg_type()
self.production_fg_type_id = production_fg_type.id
@api.model
def create(self, values):
new_wh = super(StockWarehouse, self).create(values)
production_rm_loc = new_wh._create_production_rm_loc()
production_fg_loc = new_wh._create_production_fg_loc()
new_wh.production_rm_loc_id = production_rm_loc.id
new_wh.production_fg_loc_id = production_fg_loc.id
production_rm_type = new_wh._create_production_rm_type()
production_fg_type = new_wh._create_production_fg_type()
new_wh.production_rm_type_id = production_rm_type.id
new_wh.production_fg_type_id = production_fg_type.id
return new_wh
| agpl-3.0 | 7,212,818,813,044,937,000 | 31.785 | 70 | 0.583804 | false |
puruckertom/ubertool | ubertool/earthworm/earthworm_exe.py | 1 | 2201 | from __future__ import division
import pandas as pd
import os.path
import sys
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
from .earthworm_functions import EarthwormFunctions
class EarthwormInputs(ModelSharedInputs):
"""
Input class for Earthworm.
"""
def __init__(self):
"""Class representing the inputs for Earthworm"""
super(EarthwormInputs, self).__init__()
self.k_ow = pd.Series([], dtype="float")
self.l_f_e = pd.Series([], dtype="float")
self.c_s = pd.Series([], dtype="float")
self.k_d = pd.Series([], dtype="float")
self.p_s = pd.Series([], dtype="float")
class EarthwormOutputs(object):
"""
Output class for Earthworm.
"""
def __init__(self):
"""Class representing the outputs for Earthworm"""
super(EarthwormOutputs, self).__init__()
self.out_earthworm_fugacity = pd.Series(name="out_earthworm_fugacity")
class Earthworm(UberModel, EarthwormInputs, EarthwormOutputs, EarthwormFunctions):
"""
Earthworm model for annelid soil ingestion.
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the Earthworm model and containing all its methods"""
super(Earthworm, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
self.populate_inputs(self.pd_obj)
self.pd_obj_out = self.populate_outputs()
self.run_methods()
self.fill_output_dataframe()
# Begin model methods
def run_methods(self):
""" Execute all algorithm methods for model logic """
try:
self.earthworm_fugacity()
except Exception as e:
pass
| unlicense | 1,824,656,894,426,855,400 | 30.442857 | 87 | 0.624262 | false |
defm03/toraeru | test/loli_gelbooru.py | 1 | 3832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*booru general file.
For now, there's working Gelbooru downloader for loli content,
but soon I'll add danbooru, etc.
"""
import loli_spam
import os
import datetime
import urllib.request
import http.cookiejar
import xml.etree.ElementTree as eltree
import json
#loli_spam.execute_spam()
cache_dir = "cache/"
class Gelbooru(object):
"""docstring for Gelbooru"""
def __init__(self, url="http://gelbooru.com/"):
# gets gelbooru homepage by default
super(Gelbooru, self).__init__()
self.url = url
gelbooru_loli = urllib.request.urlopen(url,timeout=5)
read_gel_loli = gelbooru_loli.read()
# save to gel.html file
name_gel_loli = "gel.html"
file_gel_loli = open(cache_dir+name_gel_loli,"wb")
file_gel_loli.write(read_gel_loli)
def gel_rssatom(url="http://gelbooru.com/index.php?page=atom",
by_tag_loli = False,limit = 100,download = True):
"""gel_rssatom:
by_tag_loli:
If you want to get feed for tag 'loli', you need to switch
by_tag_loli to True.
limit:
limit is variable that stores maximum number of loli entries.
maximum number of entries that can be loaded is 100 (limited
by gelbooru API). When I was testing it, there was some problem
with loading less than 5-10 urls.
"""
if by_tag_loli == True:
url = "http://gelbooru.com/index.php?page=dapi&s=post&q=index&limit={0}&tags=loli".format(str(limit))
# gets gelbooru atom rss feed
gelbooru_atom = urllib.request.urlopen(url,timeout=5)
read_gel_atom = gelbooru_atom.read()
# save to atom.xml file
if by_tag_loli == True:
name_gel_atom = "atom_loli.xml"
else: name_gel_atom = "atom.xml"
file_gel_atom = open(cache_dir+name_gel_atom,"wb")
file_gel_atom.write(read_gel_atom)
# XML parsing
tree = eltree.parse(cache_dir+name_gel_atom)
root = tree.getroot()
# gets urls to images from post form
for imgurl in root.iter('post'):
url = imgurl.attrib.get('file_url')
print(url)
# gets picture file name
f_url = url.replace(url[0:37],"")
if download == True and os.path.exists(cache_dir+f_url) == False:
# if file is already downloaded, it will skip it
urllib.request.urlretrieve(url,cache_dir+f_url)
print(f_url)
class Danbooru(object):
"""docstring for Danbooru"""
def __init__(self, url="http://gelbooru.com/"):
super(Danbooru, self).__init__()
self.url = url
def get_time():
# datetime.datetime.now() method
now = datetime.datetime.now()
hour = datetime.time(now.hour)
minute = datetime.time(now.minute)
second = datetime.time(now.second)
# isoformat() >> str method
isotime = datetime.datetime.now().isoformat()
s_iso = str(isotime)
s_iso[0:9] = date
def dan_jsonGET(url="http://gelbooru.com/",tag="loli",limit=100):
# sends request to json API on danbooru and saves in variable 'json_r'
json_g = urllib.request.urlopen(url+"posts.json?limit={0}?search[tags]={1}".format(str(limit), tag))
json_r = json_g.read()
# opens file following new filename format, and writes json data to it
file_dan = open(cache_dir+"danbooru-"+date+"-T-"+str(hour)+"-"+str(minute)+"-"+str(second)+".json", "wb")
file_dan.write(json_r)
"""Filename new format:
example: danbooru-2013-10-08-T-19-11-12.json
1st place: Object name
2nd place: Date in iso format
3rd place: (starting with "-T-") Time: hour - minute - second
"""
def execute_gel(take_limit=100):
# auto get a page, and put into "gel.html" file
Gelbooru("http://gelbooru.com/index.php?page=post&s=list&tags=loli")
maigah = Gelbooru.gel_rssatom(by_tag_loli=True,limit=take_limit)
def execute_dan(take_limit=100):
# calls dan_jsonGET -> saving 100 entries with tag "loli"
# to file following format in Danbooru init()
omgomg = Danbooru.dan_jsonGET(tag="loli",limit=take_limit) | gpl-3.0 | -7,906,757,162,575,998,000 | 29.420635 | 108 | 0.679541 | false |
mitsuhiko/sentry | src/sentry/models/environment.py | 1 | 1364 | """
sentry.models.release
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
BoundedPositiveIntegerField, Model, sane_repr
)
from sentry.utils.cache import cache
from sentry.utils.hashlib import md5
class Environment(Model):
__core__ = False
project_id = BoundedPositiveIntegerField()
name = models.CharField(max_length=64)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_environment'
unique_together = (('project_id', 'name'),)
__repr__ = sane_repr('project_id', 'name')
@classmethod
def get_cache_key(cls, project_id, name):
return 'env:1:%s:%s' % (project_id, md5(name).hexdigest())
@classmethod
def get_or_create(cls, project, name):
name = name or ''
cache_key = cls.get_cache_key(project.id, name)
env = cache.get(cache_key)
if env is None:
env = cls.objects.get_or_create(
project_id=project.id,
name=name,
)[0]
cache.set(cache_key, env, 3600)
return env
| bsd-3-clause | 918,329,941,305,556,000 | 25.230769 | 75 | 0.621701 | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/utilities/roswtf/src/roswtf/network.py | 1 | 4458 | # Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: environment.py 4428 2009-05-05 05:48:36Z jfaustwg $
import os
import socket
import stat
import string
import sys
import rosgraph
import rosgraph.network
from roswtf.rules import warning_rule, error_rule
# #1220
def ip_check(ctx):
# best we can do is compare roslib's routine against socket resolution and make sure they agree
local_addrs = rosgraph.network.get_local_addresses()
resolved_ips = [host[4][0] for host in socket.getaddrinfo(socket.gethostname(), 0, 0, 0, socket.SOL_TCP)]
global_ips = [ ip for ip in resolved_ips if not ip.startswith('127.') and not ip == '::1']
remote_ips = list(set(global_ips) - set(local_addrs))
if remote_ips:
retval = "Local hostname [%s] resolves to [%s], which does not appear to be a local IP address %s." % (socket.gethostname(), ','.join(remote_ips), str(local_addrs))
# IPv6 support % to denote zone/scope ids. The value is expanded
# in other functions, this is why we are using replace command in
# the return. For more info https://github.com/ros/ros_comm/pull/598
return retval.replace('%', '%%')
# suggestion by mquigley based on laptop dhcp issues
def ros_hostname_check(ctx):
"""Make sure that ROS_HOSTNAME resolves to a local IP address"""
if not rosgraph.ROS_HOSTNAME in ctx.env:
return
hostname = ctx.env[rosgraph.ROS_HOSTNAME]
try:
resolved_ips = [host[4][0] for host in socket.getaddrinfo(hostname, 0, 0, 0, socket.SOL_TCP)]
except socket.gaierror:
return "ROS_HOSTNAME [%s] cannot be resolved to an IP address"%(hostname)
# best we can do is compare roslib's routine against socket resolution and make sure they agree
local_addrs = rosgraph.network.get_local_addresses()
remote_ips = list(set(resolved_ips) - set(local_addrs))
if remote_ips:
return "ROS_HOSTNAME [%s] resolves to [%s], which does not appear to be a local IP address %s."%(hostname, ','.join(remote_ips), str(local_addrs))
def ros_ip_check(ctx):
"""Make sure that ROS_IP is a local IP address"""
if not rosgraph.ROS_IP in ctx.env:
return
ip = ctx.env[rosgraph.ROS_IP]
# best we can do is compare roslib's routine against socket resolution and make sure they agree
addrs = rosgraph.network.get_local_addresses()
if ip not in addrs:
return "ROS_IP [%s] does not appear to be a local IP address %s."%(ip, str(addrs))
# Error/Warning Rules
warnings = [
(ros_hostname_check,
"ROS_HOSTNAME may be incorrect: "),
(ros_ip_check,
"ROS_IP may be incorrect: "),
]
errors = [
(ip_check,
"Local network configuration is invalid: "),
]
def wtf_check(ctx):
for r in warnings:
warning_rule(r, r[0](ctx), ctx)
for r in errors:
error_rule(r, r[0](ctx), ctx)
| bsd-3-clause | 3,243,990,153,839,998,500 | 38.105263 | 172 | 0.700987 | false |
yro/openveda | openveda/reporting.py | 1 | 2431 |
import os
import sys
"""
Quick and dirty error handling & logging
"""
from global_vars import *
class ErrorObject(object):
"""
Unspecified errors with a message
"""
@staticmethod
def print_error(message):
decorator = "***************E*R*R*O*R*******************"
outgoing = '\n%s \n\n%s \n\n%s\n' % (
NODE_COLORS_BLUE + decorator + NODE_COLORS_END,
message,
NODE_COLORS_BLUE + decorator + NODE_COLORS_END,
)
print outgoing
class Output(object):
"""
Various reporting methods
"""
@staticmethod
def _seconds_from_string(duration):
hours = float(duration.split(':')[0])
minutes = float(duration.split(':')[1])
seconds = float(duration.split(':')[2])
duration_seconds = (((hours * 60) + minutes) * 60) + seconds
return duration_seconds
@staticmethod
def status_bar(process):
"""
A terminal status bar thing
"""
fps = None
duration = None
while True:
line = process.stdout.readline().strip()
if line == '' and process.poll() is not None:
break
if fps == None or duration == None:
if "Stream #" in line and " Video: " in line:
fps = [s for s in line.split(',') if "fps" in s][0].strip(' fps')
if "Duration: " in line:
dur = line.split('Duration: ')[1].split(',')[0].strip()
duration = Output()._seconds_from_string(duration=dur)
else:
if 'frame=' in line:
cur_frame = line.split('frame=')[1].split('fps=')[0].strip()
end_frame = float(duration) * float(fps.strip())
pctg = (float(cur_frame) / float(end_frame))
sys.stdout.write('\r')
i = int(pctg * 20.0)
sys.stdout.write("%s : [%-20s] %d%%" % ('Transcode', '='*i, int(pctg * 100)))
sys.stdout.flush()
"""
Just for politeness
"""
sys.stdout.write('\r')
sys.stdout.write("%s : [%-20s] %d%%" % ('Transcode', '='*20, 100))
sys.stdout.flush()
def main():
test_error = "This is a test"
ErrorObject.print_error(
message = test_error,
)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | 3,754,043,802,549,835,000 | 27.267442 | 97 | 0.48334 | false |
migumar2/uiHRDC | uiHRDC/benchmark/report/summaryTables/utils-py/generateSelfIdx.py | 1 | 3853 |
from sys import argv
from mytiming import *
from mycheckfiles import *
##--main --------------------------------------------------------------##
variants=[]
variants.append( ["WCSA" ,"../../../self-indexes/collectResults/wcsa" ,"B-LOG.dat" , "S-LOG.dat" , "N.Wa_swcsa.dat" , "N.Wb_swcsa.dat" , "N.P2_swcsa.dat", "N.P5_swcsa.dat" ,"e.Words20_swcsa.dat" , "e.Words3000_swcsa.dat" ] )
variants.append( ["RLCSA" ,"../../../self-indexes/collectResults/rlcsa" ,"B-LOG.dat" , "S-LOG.dat" , "Wa_rlcsa" , "Wb_rlcsa" , "P2_rlcsa" , "P5_rlcsa" ,"e80_rlcsa" , "e13000_rlcsa" ] )
variants.append( ["SLP" ,"../../../self-indexes/collectResults/slp" ,"B-LOG.dat" , "S-LOG.dat" , "slp.f1_1000" , "slp.f1001_100k" , "slp.2_2" , "slp.5_5" ,"slp.snippets80" , "slp.snippets13000" ] )
variants.append( ["WSLP" ,"../../../self-indexes/collectResults/wslp" ,"B-LOG.dat" , "S-LOG.dat" , "wslp.f1_1000" , "wslp.f1001_100k" , "wslp.2_2" , "wslp.5_5" ,"wslp.snippets80" , "wslp.snippets13000" ] )
variants.append( ["LZ77-Index" ,"../../../self-indexes/collectResults/lz77" ,"B-LOG.dat" , "S-LOG.dat" , "lz77.f1_1000" , "lz77.f1001_100k" , "lz77.2_2" , "lz77.5_5" ,"lz77.snippets80" , "lz77.snippets13000" ] )
variants.append( ["LZEnd-Index" ,"../../../self-indexes/collectResults/lzend" ,"B-LOG.dat" , "S-LOG.dat" , "lzend.f1_1000" , "lzend.f1001_100k" , "lzend.2_2" , "lzend.5_5" ,"lzend.snippets80" , "lzend.snippets13000" ] )
#src=vbytePos[0:10] #src is a COPY of the list (which remains un-modified)
#src=rice[0:8] #src is a COPY of the list (which remains un-modified)
#src=riceB[0:8] #src is a COPY of the list (which remains un-modified)
header= r"""
%%%% STATIC HEADER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{table}[htbp]
\scriptsize
\centering
\begin{tabular}{|l|r|r|c|c|c|c|c|c|}
\cline{2-9} \multicolumn{1}{r|}{} & \multicolumn{2}{c|}{Overall Time} & \multicolumn{4}{c|}{Locate} &\multicolumn{2}{c|}{Extract} \\
\cline{2-9} \multicolumn{1}{r|}{} & \multicolumn{2}{c|}{ } & \multicolumn{2}{c|}{Words} & \multicolumn{2}{c|}{Phrases} & 80 & 13,000 \\
\cline{2-9} \multicolumn{1}{r|}{} & Building & Querying & {Low freq} & {High freq} & {2-words} & {5-words} & chars & chars \\
\hline
\hline
%%%% CONTENTS GENERATED BY SCRIPT %%%%%%%%%%%%%%%%%%%%%%%%%%"""
footer= r"""%%%% STATIC FOOTER %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\cline{1-3}
\end{tabular}%
\caption{Summary and state of the experiments run on the test machine: self-indexes.}
\label{ap1:self}%
\end{table}%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
if len(argv) !=2:
print "Incorrect syntax! You must provide an output .tex filename"
print " use: python %s <filename.tex>" % argv[0]
exit(0)
filename= argv[1]
deleteFileIfExists(filename)
#print header
addDataToLog(filename, header)
#processes all the techniques in "variants"
for t in variants:
src=t[0:len(t)]
if len(src)==8:
src.append("none") #no extract80-file
src.append("none") #no extract13000-file
strresult= getStrIIResultIdx(src)
#print strresult
addDataToLog(filename, strresult)
str=getElapsedTime("../../../self-indexes/SELF-LOG.dat")
str=str.rjust(13)+" "
overall=r""" \hline
\textbf{OVERALL TIME } & \multicolumn{2}{|c|}{""" + str + r""" } &\multicolumn{4}{|r}{} \\"""
#print overall
addDataToLog(filename, overall)
#print footer
addDataToLog(filename, footer)
| lgpl-2.1 | -2,661,403,691,565,364,000 | 35.695238 | 234 | 0.523748 | false |
USGS-EROS/lcmap-changes | bin/pyccd_inputs.py | 1 | 2416 | #!/usr/bin/env python3
import argparse
import requests
import logging
import sys
__format = '%(asctime)s %(module)-10s::%(funcName)-20s - [%(lineno)-3d]%(message)s'
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format=__format,
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def get_chip_specs(host, port):
""" Returns all chip specs from the named host and port for pyccd"""
query = ''.join(['(((red OR blue OR green OR swir1 OR swir2 OR nir) AND sr)', ' ',
'OR (toa AND thermal AND NOT tirs2)', ' ',
'OR (cfmask AND NOT conf))', ' ',
#'AND NOT LANDSAT_8'])
])
chip_specs=''.join(['http://', host, ':', port, '/landsat/chip-specs?q=', query])
logger.debug("chip_specs url: {}".format(chip_specs))
return requests.get(chip_specs).json()
def get_ubids(chip_specs):
""" Return all ubids from supplied chip-specs """
return [ts['ubid'] for ts in chip_specs]
def url_template(ubids, start_date, end_date='{{now}}', host='localhost', port='80'):
""" Returns the inputs url template to be fed into algorithms configuration """
# TODO: gonna have to deal with the context path being different for local vs deployed
# /landsat here, probably / locally
base = ''.join(['http://', host, ':', port,
'/landsat/chips?x={{x}}&y={{y}}',
'&acquired=', start_date, '/', end_date])
ubids = ''.join(['&ubid={}'.format(u) for u in ubids])
return ''.join([base, ubids])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--host", action="store", help="host for lcmap-landsat api")
parser.add_argument("--port", action="store", help="port for lcmap-landsat api", default="80")
parser.add_argument("--start", action="store", help="start date for data query YYYY-MM-DD")
parser.add_argument("--end", action="store", help="end date for data query YYYY-MM-DD", default="{{now}}")
args = parser.parse_args()
if len(sys.argv) < 2 or not (args.host and args.start):
parser.print_usage()
sys.exit(1)
else:
print(url_template(sorted(list(set(get_ubids(get_chip_specs(args.host, args.port))))),
args.start, args.end, args.host, args.port))
| unlicense | -243,910,291,986,523,200 | 44.584906 | 114 | 0.57947 | false |
freakboy3742/pyxero | tests/auth.py | 1 | 19520 | import json
import time
import unittest
from datetime import datetime, timedelta
from mock import Mock, patch
from six.moves.urllib.parse import parse_qs, urlparse
from xero.api import Xero
from xero.auth import (
OAuth2Credentials,
PartnerCredentials,
PrivateCredentials,
PublicCredentials,
)
from xero.constants import XERO_OAUTH2_AUTHORIZE_URL
from xero.exceptions import (
XeroAccessDenied,
XeroException,
XeroNotVerified,
XeroTenantIdNotSet,
XeroUnauthorized,
)
class PublicCredentialsTest(unittest.TestCase):
@patch("requests.post")
def test_initial_constructor(self, r_post):
"Initial construction causes a requst to get a request token"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", scope="payroll.endpoint"
)
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
"scope": "payroll.endpoint",
},
)
@patch("requests.post")
def test_bad_credentials(self, r_post):
"Initial construction with bad credentials raises an exception"
r_post.return_value = Mock(
status_code=401,
text="oauth_problem=consumer_key_unknown&oauth_problem_advice=Consumer%20key%20was%20not%20recognised",
)
with self.assertRaises(XeroUnauthorized):
PublicCredentials(consumer_key="unknown", consumer_secret="unknown")
@patch("requests.post")
def test_unvalidated_constructor(self, r_post):
"Credentials with an unverified request token can be constructed"
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
self.assertEqual(
credentials.state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
},
)
# No HTTP requests were made
self.assertFalse(r_post.called)
@patch("requests.post")
def test_validated_constructor(self, r_post):
"A validated set of credentials can be reconstructed"
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="validated_token",
oauth_token_secret="validated_token_secret",
verified=True,
)
self.assertEqual(
credentials.state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "validated_token",
"oauth_token_secret": "validated_token_secret",
"verified": True,
},
)
try:
credentials.oauth
except XeroNotVerified:
self.fail("Credentials should have been verified")
# No HTTP requests were made
self.assertFalse(r_post.called)
@patch("requests.post")
def test_url(self, r_post):
"The request token URL can be obtained"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(consumer_key="key", consumer_secret="secret")
self.assertEqual(
credentials.url, "https://api.xero.com/oauth/Authorize?oauth_token=token"
)
@patch("requests.post")
def test_url_with_scope(self, r_post):
"The request token URL includes the scope parameter"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", scope="payroll.endpoint"
)
self.assertIn("scope=payroll.endpoint", credentials.url)
@patch("requests.post")
def test_configurable_url(self, r_post):
"Test configurable API url"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
url = "https//api-tls.xero.com"
credentials = PublicCredentials(
consumer_key="key", consumer_secret="secret", api_url=url
)
self.assertEqual(
credentials.url, "{url}/oauth/Authorize?oauth_token=token".format(url=url)
)
@patch("requests.post")
def test_verify(self, r_post):
"Unverfied credentials can be verified"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=verified_token&oauth_token_secret=verified_token_secret",
)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
credentials.verify("verifier")
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "verified_token",
"oauth_token_secret": "verified_token_secret",
"verified": True,
},
)
try:
credentials.oauth
except XeroNotVerified:
self.fail("Credentials should have been verified")
@patch("requests.post")
def test_verify_failure(self, r_post):
"If verification credentials are bad, an error is raised"
r_post.return_value = Mock(
status_code=401,
text="oauth_problem=bad_verifier&oauth_problem_advice=The consumer was denied access to this resource.",
)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
with self.assertRaises(XeroUnauthorized):
credentials.verify("badverifier")
with self.assertRaises(XeroNotVerified):
credentials.oauth
def test_expired(self):
"Expired credentials are correctly detected"
now = datetime(2014, 1, 1, 12, 0, 0)
soon = now + timedelta(minutes=30)
credentials = PublicCredentials(
consumer_key="key",
consumer_secret="secret",
oauth_token="token",
oauth_token_secret="token_secret",
)
# At this point, oauth_expires_at isn't set
with self.assertRaises(XeroException):
credentials.expired(now)
# Not yet expired
credentials.oauth_expires_at = soon
self.assertFalse(credentials.expired(now=now))
# Expired
self.assertTrue(credentials.expired(now=soon))
class PartnerCredentialsTest(unittest.TestCase):
@patch("requests.post")
def test_initial_constructor(self, r_post):
"Initial construction causes a request to get a request token"
r_post.return_value = Mock(
status_code=200, text="oauth_token=token&oauth_token_secret=token_secret"
)
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="abc",
scope="payroll.endpoint",
)
# A HTTP request was made
self.assertTrue(r_post.called)
state = credentials.state
# Expiry times should be calculated
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token",
"oauth_token_secret": "token_secret",
"verified": False,
"scope": "payroll.endpoint",
},
)
@patch("requests.post")
def test_refresh(self, r_post):
"Refresh function gets a new token"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=token2&oauth_token_secret=token_secret2&oauth_session_handle=session",
)
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="key",
oauth_token="token",
oauth_token_secret="token_secret",
verified=True,
)
credentials.refresh()
# Expiry times should be calculated
state = credentials.state
self.assertIsNotNone(state.pop("oauth_authorization_expires_at"))
self.assertIsNotNone(state.pop("oauth_expires_at"))
self.assertEqual(
state,
{
"consumer_key": "key",
"consumer_secret": "secret",
"oauth_token": "token2",
"oauth_token_secret": "token_secret2",
"oauth_session_handle": "session",
"verified": True,
},
)
@patch("requests.post")
def test_configurable_url(self, r_post):
"Test configurable API url"
r_post.return_value = Mock(
status_code=200,
text="oauth_token=token&oauth_token_secret=token_secret&oauth_session_handle=session",
)
url = "https//api-tls.xero.com"
credentials = PartnerCredentials(
consumer_key="key",
consumer_secret="secret",
rsa_key="key",
oauth_token="token",
oauth_token_secret="token_secret",
verified=True,
api_url=url,
)
credentials.refresh()
self.assertEqual(
credentials.url, "{url}/oauth/Authorize?oauth_token=token".format(url=url)
)
class PrivateCredentialsTest(unittest.TestCase):
def test_default_url(self):
"Test default API url"
credentials = PrivateCredentials(consumer_key="key", rsa_key="rsa_key")
self.assertEqual(credentials.base_url, "https://api.xero.com")
def test_configurable_url(self):
"Test configurable API url"
url = "https//api-tls.xero.com"
credentials = PrivateCredentials(
consumer_key="key", rsa_key="rsa_key", api_url=url
)
self.assertEqual(credentials.base_url, url)
class OAuth2CredentialsTest(unittest.TestCase):
callback_uri = "https://myapp.example.com/xero/auth/callback/"
def setUp(self):
super(OAuth2CredentialsTest, self).setUp()
# Create an expired token to be used by tests
self.expired_token = {
"access_token": "1234567890",
"expires_in": 1800,
"token_type": "Bearer",
"refresh_token": "0987654321",
# 'expires_at': datetime.utcnow().timestamp()}
"expires_at": time.time(),
}
def test_authorisation_url_and_random_state(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", callback_uri=self.callback_uri
)
url = credentials.generate_url()
self.assertTrue(url.startswith(XERO_OAUTH2_AUTHORIZE_URL))
qs = parse_qs(urlparse(url).query)
# Test that the credentials object can be dumped by state
cred_state = credentials.state
# Then test that the relevant attributes are in the querystring
self.assertEqual(qs["client_id"][0], cred_state["client_id"])
self.assertEqual(qs["redirect_uri"][0], cred_state["callback_uri"])
self.assertEqual(qs["response_type"][0], "code")
self.assertEqual(qs["scope"][0], " ".join(cred_state["scope"]))
self.assertEqual(qs["state"][0], cred_state["auth_state"])
def test_authorisation_url_using_initial_state(self):
credentials = OAuth2Credentials(
"client_id",
"client_secret",
callback_uri=self.callback_uri,
auth_state="test_state",
)
url = urlparse(credentials.generate_url())
self.assertEqual(credentials.auth_state, "test_state")
qs = parse_qs(url.query)
self.assertEqual(qs["state"][0], "test_state")
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_using_bad_auth_uri(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
bad_auth_uri = "{}?error=access_denied&state={}".format(
self.callback_uri, credentials.auth_state
)
with self.assertRaises(XeroAccessDenied):
credentials.verify(bad_auth_uri)
with self.assertRaises(XeroAccessDenied):
OAuth2Credentials(
"client_id",
"client_secret",
auth_state="test_state",
auth_secret=bad_auth_uri,
)
self.assertFalse(r_request.called)
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_success(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
auth_uri = "{}?code=0123456789&scope={}&state={}".format(
self.callback_uri, "%20".join(credentials.scope), credentials.auth_state
)
r_request.return_value = Mock(
status_code=200,
request=Mock(headers={}, body=""),
headers={},
text='{"access_token":"1234567890","expires_in":1800,'
'"token_type":"Bearer","refresh_token":"0987654321"}',
)
credentials.verify(auth_uri)
self.assertTrue(r_request.called)
self.assertTrue(credentials.token)
self.assertTrue(credentials.oauth)
self.assertFalse(credentials.expired())
# Finally test the state
self.assertEqual(
credentials.state,
{
"client_id": credentials.client_id,
"client_secret": credentials.client_secret,
"auth_state": credentials.auth_state,
"scope": credentials.scope,
"user_agent": credentials.user_agent,
"token": credentials.token,
},
)
@patch("requests_oauthlib.OAuth2Session.request")
def test_verification_failure(self, r_request):
credentials = OAuth2Credentials(
"client_id", "client_secret", auth_state="test_state"
)
auth_uri = "{}?code=0123456789&scope={}&state={}".format(
self.callback_uri, "%20".join(credentials.scope), credentials.auth_state
)
r_request.return_value = Mock(
status_code=400,
request=Mock(headers={}, body=""),
headers={},
text='{"error":"invalid_grant"}',
)
with self.assertRaises(XeroAccessDenied):
credentials.verify(auth_uri)
with self.assertRaises(XeroAccessDenied):
OAuth2Credentials(
"client_id",
"client_secret",
auth_state="test_state",
auth_secret=auth_uri,
)
@patch("requests_oauthlib.OAuth2Session.post")
def test_token_refresh(self, r_post):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
self.assertTrue(credentials.oauth)
self.assertTrue(credentials.expired())
r_post.return_value = Mock(
status_code=200,
headers={},
text='{"access_token":"5555555555","expires_in":1800,'
'"token_type":"Bearer","refresh_token":"44444444444"}',
)
credentials.refresh()
self.assertTrue(r_post.called)
self.assertFalse(credentials.expired())
# Test that the headers were set correctly
auth = r_post.call_args[1]["auth"]
self.assertEqual(auth.username, "client_id")
self.assertEqual(auth.password, "client_secret")
@patch("requests.get")
def test_get_tenants(self, r_get):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
content = '[{"id":"1","tenantId":"12345","tenantType":"ORGANISATION"}]'
def json_fct():
return json.loads(content)
r_get.return_value = Mock(status_code=200, json=json_fct)
tenants = credentials.get_tenants()
self.assertTrue(r_get.called)
self.assertEqual(
tenants, [{"id": "1", "tenantId": "12345", "tenantType": "ORGANISATION"}]
)
tenants = credentials.get_tenants(auth_event_id="b71db552-68ff-4d80-a824-7544e5ccad28")
self.assertEqual(r_get.mock_calls[-1].args[0].split('?authEventId=')[1], "b71db552-68ff-4d80-a824-7544e5ccad28")
@patch("xero.auth.OAuth2Credentials.get_tenants")
def test_set_default_tenant(self, get_tenants):
get_tenants.return_value = [
{"id": "1", "tenantId": "12345", "tenantType": "ORGANISATION"}
]
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
credentials.set_default_tenant()
self.assertEqual(credentials.tenant_id, "12345")
@patch("requests.get")
def test_tenant_is_used_in_xero_request(self, r_get):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token, tenant_id="12345"
)
xero = Xero(credentials)
# Just return any old response
r_get.return_value = None
try:
xero.contacts.all()
except: # NOQA: E722
pass
self.assertEqual(r_get.call_args[1]["headers"]["Xero-tenant-id"], "12345")
def test_tenant_id_not_set_raises_error(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
xero = Xero(credentials)
with self.assertRaises(XeroTenantIdNotSet):
xero.contacts.all()
@patch.object(OAuth2Credentials, "get_tenants", Mock(return_value=[]))
def test_set_default_tenant_raises_exception(self):
credentials = OAuth2Credentials(
"client_id", "client_secret", token=self.expired_token
)
with self.assertRaises(XeroException):
credentials.set_default_tenant()
| bsd-3-clause | -1,077,953,491,466,018,800 | 33.125874 | 120 | 0.58335 | false |
crichardson17/starburst_atlas | HighResSims/Baseline_DustFree_Hires_cut17/Baseline_dustfree_plotter.py | 1 | 12507 | ############################################################
############# Plotting File for Contour Plots ##############
################## Data read from Cloudy ###################
################ Helen Meskhidze, Fall 2015 ################
#################### Elon University #######################
#------------------------------------------------------------------------------------------------------
'''
The inputs this code takes are .grd and .txt files from Cloudy.
It can take in as many input files (in case you have a grid and haven't concatenated all the files)- just change the numFiles value
This code outputs a set of contour plots, saved to the working directory
'''
#------------------------------------------------------------------------------------------------------
#Packages importing
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
import time
# ------------------------------------------------------------------------------------------------------
# keep track of how long the code takes to run
t0 = time.clock()
headerloc = "/Users/helen/Documents/Thesis_Research/github_repo/starburst_atlas/headers_dir/headers.txt"
# ------------------------------------------------------------------------------------------------------
#data files' names from source directory constructed here. default source directory is working directory
numFiles = 6 #change this if you have more/less files
gridFiles = [None]*numFiles
emissionFiles = [None]*numFiles
for i in range(numFiles):
for file in os.listdir('.'):
if file.endswith("{:d}.grd".format(i+1)):
gridFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
if file.endswith("{:d}.txt".format(i+1)):
emissionFiles[i] = file
#keep track of all the files you'll be importing by printing
#print file
print("Files names constructed")
# ------------------------------------------------------------------------------------------------------
#Patches data
#this section adds the rectangles on the plots of the three other studies
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.)] # ignored
codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY]
path = Path(verts, codes)
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.)] # ignored
path = Path(verts, codes)
path2 = Path(verts2, codes)
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.)] # ignored
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the patches routine: to add patches for others peoples' data onto our plots.
#Adds patches to the first subplot
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='blue', lw=0)
patch = patches.PathPatch(path, facecolor='grey', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
plt.figure(figsize=(13,10))
def add_sub_plot(sub_num, elinesplot):
numplots = 16
plt.subplot(numplots/4.,4,sub_num) #row, column
#choose which z array, then which subplot
z_subnum = z_total[elinesplot]
z_line = z_subnum[:,:,sub_num-1]
contour1 = plt.contour(x_axis, y_axis, z_line, levels, colors='k', origin='lower', extent=extent) #teal contours, dashed
contourmap = plt.imshow(z_line, cmap='Reds', extent= extent, aspect = "auto",origin='lower', vmin=0, vmax =4)
plt.scatter(max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[elinesplot][sub_num-1]], xy=(8,11), xytext=(4,8.5), fontsize = 10)
plt.annotate(max_values[line[elinesplot][sub_num-1],0], xy = (max_values[line[elinesplot][sub_num-1],2], max_values[line[elinesplot][sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == 4:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.5,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 8:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 12:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
if sub_num == 0:
cb = plt.colorbar(contourmap, pad = 0.05, ticks=np.arange(0,4.0,0.5))
cb.ax.tick_params(labelsize=10)
#axis limits
yt_min = 8 ; yt_max = 17; xt_min = 0; xt_max = 10
plt.ylim(yt_min,yt_max); plt.xlim(xt_min,xt_max)
#ticks
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num == 0:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 12:
plt.tick_params(labelbottom = 'off')
if sub_num%(numplots/4) == 1:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
else:
plt.tick_params(labelleft = 'off')
if sub_num > 12:
plt.tick_params(labelbottom = 'on')
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
#to print progress to the terminal
if sub_num == numplots/2:
print("half the sub-plots of plot{:d} are complete".format(elinesplot+1))
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
print("Beginning file import")
for i in range(numFiles):
gridI = [];
with open(gridFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
for row in csvReader:
gridI.append(row)
gridI = asarray(gridI)
gridI = gridI[1:,6:8]
if ( i == 0 ):
grid = gridI
else :
grid = concatenate((grid,gridI))
for i in range(numFiles):
emissionLineI = [];
with open(emissionFiles[i], 'rb') as f:
csvReader = csv.reader(f, delimiter='\t')
headers = csvReader.next()
for row in csvReader:
emissionLineI.append(row)
emissionLineI = asarray(emissionLineI)
emissionLineI = emissionLineI[:,1:]
if ( i == 0 ):
Emissionlines = emissionLineI
else :
Emissionlines = concatenate((Emissionlines,emissionLineI))
hdens_values = grid[:,1]
phi_values = grid[:,0]
print("Import files complete")
# ---------------------------------------------------
#To fix when hdens > 10
#many of my grids were run off with hdens up to 12 so we needed to cut off part of the data
#first create temorary arrays
print("modifications begun")
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
#save data in range desired to temp arrays
for i in range(len(hdens_values)):
if (float(hdens_values[i]) < 10.100) & (float(phi_values[i]) < 17.100) :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
print("modifications complete")
# ---------------------------------------------------
#there are the emission line names properly formatted
print("Importing headers from header file")
headersFile = open(headerloc,'r')
headers = headersFile.read().splitlines()
headersFile.close()
# ---------------------------------------------------
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
#select the scaling factor
#for 4860
incidentnum = 58 #reference index of 4860
incidentline = 4860. #wavelength
incident = Emissionlines[:,58]
print("Scaling data")
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10) > 0:
concatenated_data[i,j] = math.log(incidentline*(float(Emissionlines[i,j])/float(Emissionlines[i,incidentnum])), 10)
else:
concatenated_data[i,j] == 0
print("Finding peaks")
#find the maxima (having cut the arrays already) to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print("Data arranged")
# ---------------------------------------------------
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines to plot here! indexes of desired lines
line = [
#UV1Lines
[0, 1, 2, 3, 5, 165, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
#977, 991, 1026, 1216, 1218, 1239, 1240, 1243, 1263, 1304, 1308, 1397, 1402, 1406, 1486, 1531
#UV2line
[16, 17, 18, 19, 20, 21, 23, 24, 25, 27, 29, 30,31, 32, 33, 34],
#1549, 1640, 1665, 1671, 1750, 1860, 1888, 1907, 2297, 2321, 2471, 2326, 2335, 2665, 2798
#Optical Lines
[36, 37, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52],
#NE 3 3343A, NE 5 3426, 3646, 3726, 3727, 3729, 3869, 3889, 3933, 4026, 4070, 4074, 4078, 4102, 4340, 4363
#Optical Lines 2
[53, 55, 56, 57, 59, 60, 61, 64, 65, 66, 67, 68, 69, 70, 71, 73],
#NE 4 4720A, AR 4 4740, 4861, O III 4959, O 3 5007, O 1 5577, N 2 5755, HE 1 5876, O 1 6300;
#S 3 6312, O 1 6363, H 1 6563, N 2 6584, S II 6716, S 2 6720, S II 6731
#IR Lines
[75, 76, 77, 78, 79, 80, 81, 82, 84, 83, 85, 86, 87, 88, 89, 90],
#AR 5 7005A, AR 3 7135A, TOTL 7325A, AR 3 7751, 6LEV 8446, CA2X 8498, CA2Y 8542, CA2Z 8662;
#CA 2 8579A, S 3 9069, H 1 9229, S 3 9532... H 1 9546
#More Lines
[97,112, 107, 110, 108, 111, 106, 109, 104, 101, 102, 105, 99, 103, 98, 100],
[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
]
# ---------------------------------------------------
Nx = len(np.where(y == y[0])[0])
Ny = len(np.where(x == x[0])[0])
x_axis = x[0:Nx]
y_axis = np.unique(y)
extent = [min(x_axis),max(x_axis),min(y_axis),max(y_axis)]
# ---------------------------------------------------
z_total = [None] * (len(line)-1)
#create z array for this plot
for i in range(len(z_total)):
zi1 = [concatenated_data[:,line[i]]]
zi2 = np.reshape(zi1,(Ny,Nx,16))
z_total[i] = zi2
# ---------------------------------------------------
#plotting features (and contour levels)
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
#levels = arange(10**-1,10, .2) #teal levels
levels = arange(10**-2,10**2, 1) #black levels
# ---------------------------------------------------
#loop through desired plots and desired subplots
print("Beginning plotting")
plt.clf()
for j in range (len(z_total)):
for i in range(16):
add_sub_plot(i,j)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
plt.savefig(("Full_lines_%d.pdf")%j)
print("plot {:d} complete".format(j+1))
plt.clf()
if (time.clock() - t0) > 120:
print((time.clock() - t0)/60., "minutes process time")
else:
print(time.clock() - t0, "seconds process time")
| gpl-2.0 | 782,418,356,838,361,500 | 38.206897 | 240 | 0.595986 | false |
suutari/shoop | shuup/front/checkout/_services.py | 1 | 2897 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import abc
import six
from shuup.apps.provides import get_provide_objects
from shuup.core.models import ServiceProvider
from ._view_mixin import CheckoutPhaseViewMixin
class ServiceCheckoutPhaseProvider(six.with_metaclass(abc.ABCMeta)):
"""
Interface for providing checkout phase for a service.
Items specified in ``front_service_checkout_phase_provider`` provide
category should implement this interface.
"""
@abc.abstractmethod
def get_checkout_phase(self, checkout_process, service):
"""
Get checkout phase for given service.
If this provider is for another service, then the return value
will be None.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: shuup.front.checkout.CheckoutPhaseViewMixin|None
"""
pass
class BasicServiceCheckoutPhaseProvider(ServiceCheckoutPhaseProvider):
"""
Helper for implementing basic ServiceCheckoutPhaseProvider.
This helper should be useful for most cases, where one only has to
provide a checkout phase for certain service provider type just by
initializing some predefined class.
"""
phase_class = None # override in subclass
service_provider_class = None # override in subclass
def get_checkout_phase(self, checkout_process, service):
"""
Get checkout phase for given service.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: shuup.front.checkout.CheckoutPhaseViewMixin|None
"""
assert issubclass(self.phase_class, CheckoutPhaseViewMixin)
assert issubclass(self.service_provider_class, ServiceProvider)
if isinstance(service.provider, self.service_provider_class):
return checkout_process.instantiate_phase_class(
self.phase_class, service=service)
return None
def get_checkout_phases_for_service(checkout_process, service):
"""
Get checkout phases for given service.
:type checkout_process: shuup.front.checkout.CheckoutProcess
:type service: shuup.core.models.Service
:rtype: Iterable[shuup.front.checkout.CheckoutPhaseViewMixin]
"""
classes = get_provide_objects("front_service_checkout_phase_provider")
for provider_cls in classes:
provider = provider_cls()
assert isinstance(provider, ServiceCheckoutPhaseProvider)
phase = provider.get_checkout_phase(checkout_process, service)
if phase:
assert isinstance(phase, CheckoutPhaseViewMixin)
yield phase
| agpl-3.0 | 6,591,927,049,728,485,000 | 33.903614 | 74 | 0.714187 | false |
pmfournier/netqual | bin/net.py | 1 | 6291 | import subprocess
import tempfile
import time
import sys
import os
def have_interface(ifname):
try:
bash_cmd("ip addr show dev \"{0}\"".format(ifname))
except:
return False
return True
def shell_cmd(arr, ignore_fail=False):
if subprocess.call(arr) != 0 and ignore_fail == False:
raise RuntimeError("Command failed")
def bash_cmd(s):
ret = subprocess.call(s, shell=True)
if ret != 0:
raise RuntimeError("Command failed with code {0}".format(ret))
def enable_ipv4_routing():
open("/proc/sys/net/ipv4/ip_forward", "w").write("1")
def write_new_tempfile(content):
f = tempfile.NamedTemporaryFile(delete=False)
f.write(content)
f.flush()
name = f.name
return name
def setup_wifi_wpa2_psk(iface, ssid, psk):
wpa_file="""network={{
key_mgmt=WPA-PSK
proto=WPA2
ssid="{ssid}"
psk="{psk}"
}}
""".format(ssid=ssid, psk=psk)
filename = write_new_tempfile(wpa_file)
shell_cmd(["wpa_supplicant", "-B", "-Dwext", "-i{0}".format(iface), "-c{0}".format(filename)])
def start_ntp():
shell_cmd(["ntpdate", "tick.utoronto.ca"])
def wait_for_connectivity():
while True:
try:
bash_cmd("curl www.google.com")
except:
time.sleep(1)
continue
return
# System structure
WAN_IF="ethwan"
PASSTHROUGH_IF="ethpass"
LANMON_IF="ethlanmon"
WLANMON_IF="ethwlanmon"
OOB_IF="usb0"
def clear_net():
# First disable all interfaces to avoid security breaches
# as we bring down the firewall
shell_cmd(["ifconfig", WAN_IF, "down"], ignore_fail=True)
shell_cmd(["ifconfig", PASSTHROUGH_IF, "down"], ignore_fail=True)
shell_cmd(["ifconfig", WLANMON_IF, "down"], ignore_fail=True)
shell_cmd(["ifconfig", LANMON_IF, "down"], ignore_fail=True)
shell_cmd(["iptables", "-P", "INPUT", "ACCEPT"])
shell_cmd(["iptables", "-P", "OUTPUT", "ACCEPT"])
shell_cmd(["iptables", "-F", "INPUT"])
shell_cmd(["iptables", "-F", "OUTPUT"])
shell_cmd(["iptables", "-F", "FORWARD"])
shell_cmd(["iptables", "-t", "nat", "-F", "POSTROUTING"])
bash_cmd("killall dhclient || true")
bash_cmd("killall wpa_supplicant || true")
bash_cmd('pkill -f "^udhcpd -S /tmp" || true')
def udhcpd(iface, lower, upper, dns, router):
config_file="""start {lower}
end {upper}
interface {iface}
option subnet 255.255.255.0
option dns {dns}
option router {router}
""".format(lower=lower, upper=upper, iface=iface, dns=dns, router=router)
f = tempfile.NamedTemporaryFile(delete=False)
f.write(config_file)
f.flush()
name = f.name
shell_cmd(["udhcpd", "-S", name])
def main():
# Obtain config directory as first argument
if len(sys.argv) < 2:
raise RuntimeError("Missing config directory on the command line")
config_dir = sys.argv[1]
wlan_config_file = os.path.join(config_dir, "wlan.conf")
wlan_config = open(wlan_config_file)
wlan_config_text = wlan_config.readlines()[0].rstrip()
wlan_fields = wlan_config_text.split(",")
bash_cmd("echo none >/sys/class/leds/beaglebone\:green\:heartbeat/trigger")
bash_cmd("echo 1 >/sys/class/leds/beaglebone\:green\:heartbeat/brightness")
clear_net()
# Setup firewall
shell_cmd(["iptables", "-P", "INPUT", "DROP"])
shell_cmd(["iptables", "-P", "OUTPUT", "DROP"])
shell_cmd(["iptables", "-P", "FORWARD", "DROP"])
# Just allow everything for lo
shell_cmd(["iptables", "-A", "INPUT", "-i", "lo", "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "OUTPUT", "-o", "lo", "-j", "ACCEPT"])
# Allow anything on oob interface
shell_cmd(["iptables", "-A", "INPUT", "-i", OOB_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "OUTPUT", "-o", OOB_IF, "-j", "ACCEPT"])
# Allow all traffic on wan interface
shell_cmd(["iptables", "-A", "OUTPUT", "-o", WAN_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", WAN_IF, "-m", "state", "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"])
# Allow all traffic on passthrough interface
shell_cmd(["iptables", "-A", "OUTPUT", "-o", PASSTHROUGH_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", PASSTHROUGH_IF, "-m", "state", "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", PASSTHROUGH_IF, "-p", "udp", "--dport", "bootps", "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", PASSTHROUGH_IF, "-p", "tcp", "--dport", "ssh", "-j", "ACCEPT"])
shell_cmd(["iptables", "-t", "nat", "-A", "POSTROUTING", "-o", WAN_IF, "-j", "MASQUERADE"])
shell_cmd(["iptables", "-A", "FORWARD", "-i", PASSTHROUGH_IF, "-o", WAN_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "FORWARD", "-i", WAN_IF, "-o", PASSTHROUGH_IF, "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT"])
# lanmon is just for our outbound traffic
shell_cmd(["iptables", "-A", "OUTPUT", "-o", LANMON_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", LANMON_IF, "-m", "state", "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", LANMON_IF, "-p", "tcp", "--dport", "ssh", "-j", "ACCEPT"])
# wlanmon is just for our outbound traffic
shell_cmd(["iptables", "-A", "OUTPUT", "-o", WLANMON_IF, "-j", "ACCEPT"])
shell_cmd(["iptables", "-A", "INPUT", "-i", WLANMON_IF, "-m", "state", "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"])
# DMZ
#shell_cmd(["iptables", "-t", "nat", "-A", "PREROUTING", "-i", "ethwan", "-j", "DNAT", "--to-destination", "10.254.0.2"])
enable_ipv4_routing()
shell_cmd(["dhclient", "-nw", WAN_IF])
if have_interface(PASSTHROUGH_IF):
shell_cmd([
"ifconfig",
PASSTHROUGH_IF,
"up",
"10.254.0.1",
"netmask",
"255.255.255.0"])
# FIXME: use dns from dhcp
udhcpd(
PASSTHROUGH_IF,
"10.254.0.2",
"10.254.0.2",
"8.8.8.8",
"10.254.0.1")
if have_interface(WLANMON_IF):
wlan_iface = wlan_fields[0]
wlan_ssid = wlan_fields[1]
wlan_psk = wlan_fields[2]
setup_wifi_wpa2_psk(wlan_iface, wlan_ssid, wlan_psk)
# FIXME: should wait for association instead
# The idea here is that setting the default route will fail if we are
# not associated to the ap by the time we invoke dhclient.
time.sleep(5)
shell_cmd([
"dhclient",
"-nw",
"-e", "IF_METRIC=50",
WLANMON_IF])
if have_interface(LANMON_IF):
shell_cmd([
"dhclient",
"-nw",
"-e", "IF_METRIC=60",
LANMON_IF])
wait_for_connectivity()
start_ntp()
bash_cmd("echo 0 >/sys/class/leds/beaglebone\:green\:heartbeat/brightness")
main()
| gpl-2.0 | -2,804,229,674,267,572,700 | 29.100478 | 142 | 0.626292 | false |
saullocastro/pyNastran | pyNastran/converters/dev/obj/obj_reader.py | 1 | 3015 | from __future__ import print_function
from numpy import array, unique, hstack, zeros
class OBJ(object):
def __init__(self):
pass
def read_obj(self, obj_filename):
"""
v -0.0817245 0.000635 0.00421862
v -0.0817245 0.000580371 0.00421862
v -0.0817245 -0.000635 0.00421862
l 1 2
l 2 3
"""
nodes = []
lines = []
#faces = []
with open(obj_filename, 'r') as obj_file:
for line in f.readlines():
sline = line.strip().split()
#print(sline)
Type = sline[0]
if Type == 'v': # vertex
nodes.append(sline[1:])
elif Type == 'l': # line
lines.append(sline[1:])
#elif Type == 'vt': # texture coordinate
#lines.append(sline[1:])
#elif Type == 'vn': # normal vector (not unit vector)
#lines.append(sline[1:])
#elif Type == 'vp': # parameter space vertex
#lines.append(sline[1:])
else:
raise NotImplementedError(sline)
self.nodes = array(nodes, dtype='float64')
# make it 0-based instead of 1 based
self.lines = array(lines, dtype='int32') - 1
self.make_elements()
def make_elements(self):
#print(self.nodes.shape)
unodes, indicies = unique_rows(self.nodes, return_inverse=True)
#print(unodes)
#print(list(indicies))
#print(unodes.shape)
#print(indicies.shape)
n1 = self.lines[:, 0]
n2 = self.lines[:, 1]
i1 = indicies[n1]
i2 = indicies[n2]
nrows = len(i1)
#self.lines = hstack([i1, i2], dtype='int32')
self.lines = hstack([i1, i2])
lines2 = zeros((nrows, 2), dtype='int32')
lines2[:, 0] = i1
lines2[:, 1] = i2
self.lines = lines2
#print(self.lines.shape)
self.nodes = unodes
def write_obj(self, obj_filename):
float_fmt = '8.6f'
int_fmt = 'i'
node_fmt = 'v %%%s %%%s %%%s\n' % (float_fmt, float_fmt, float_fmt)
line_fmt = 'l %%%s %%%s\n' % (int_fmt, int_fmt)
#print(node_fmt)
with open(obj_filename, 'wb') as obj_file:
for node in self.nodes:
obj_file.write(node_fmt % tuple(node))
for line in self.lines + 1:
obj_file.write(line_fmt % tuple(line))
def unique_rows(data, return_inverse=False):
ncols = data.shape[1]
dtype = data.dtype.descr * ncols
struct = data.view(dtype)
uniq, indicies = unique(struct, return_inverse=return_inverse)
uniq = uniq.view(data.dtype).reshape(-1, ncols)
return uniq, indicies
def main(): # pragma: no cover
obj_filename = '6.5e-06_edges.txt'
obj = OBJ()
obj.read_obj(obj_filename)
obj.write_obj('b.txt')
if __name__ == '__main__': # pragma: no cover
main()
| lgpl-3.0 | -4,943,026,172,035,429,000 | 30.082474 | 76 | 0.511443 | false |
pombredanne/invenio | modules/bibmerge/lib/bibmerge_engine.py | 1 | 17312 | ## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0103
"""Invenio BibMerge Engine."""
import os
from invenio.bibmerge_merger import merge_field_group, replace_field, \
add_field, delete_field, merge_field, \
add_subfield, replace_subfield, \
delete_subfield, copy_R2_to_R1, merge_record
from invenio.search_engine import print_record, perform_request_search, \
get_fieldvalues
from invenio.bibedit_utils import cache_exists, cache_expired, \
create_cache_file, delete_cache_file, get_cache_file_contents, \
get_cache_mtime, latest_record_revision, record_locked_by_other_user, \
record_locked_by_queue, save_xml_record, touch_cache_file, \
update_cache_file_contents, _get_file_path, \
get_record_revision_ids, revision_format_valid_p, split_revid, \
get_marcxml_of_revision_id
from invenio.htmlutils import remove_html_markup
from invenio.search_engine import record_exists
from invenio.bibrecord import create_record, record_xml_output, record_add_field
from invenio.bibedit_config import CFG_BIBEDIT_TO_MERGE_SUFFIX
import invenio.template
bibmerge_templates = invenio.template.load('bibmerge')
def perform_request_init():
"""Handle the initial request.
"""
errors = []
warnings = []
body = ''
# Build page structure and control panel.
body += bibmerge_templates.controlpanel()
body += """
<div id="bibMergeContent">
</div>"""
return body, errors, warnings
def perform_request_ajax(req, uid, data):
"""Ajax request dispatcher.\
"""
requestType = data['requestType']
if requestType in ('getRecordCompare', 'submit', 'cancel', 'recCopy', \
'recMerge', 'recMergeNC'):
return perform_request_record(requestType, uid, data)
elif requestType in ('getFieldGroup', 'getFieldGroupDiff', \
'mergeFieldGroup', 'mergeNCFieldGroup', 'replaceField', 'addField', \
'deleteField', 'mergeField'):
return perform_request_update_record(requestType, uid, data)
elif requestType in ('deleteSubfield', 'addSubfield', 'replaceSubfield', \
'diffSubfield'):
return perform_small_request_update_record(requestType, uid, data)
elif requestType == "searchCandidates" or requestType == "searchRevisions":
return perform_candidate_record_search(requestType, data)
else:
return { 'resultCode': 1, 'resultText': 'Error unknown' }
def perform_candidate_record_search(requestType, data):
"""Handle search requests.
"""
max_results = 999
too_many = False
result = {
'resultCode': 0,
'resultText': ''
}
if requestType == "searchCandidates":
recids = perform_request_search( p=data['query'] )
if len(recids) > max_results:
too_many = True
else:
captions = [ search_result_info(x) for x in recids ]
alternative_titles = [ remove_html_markup(print_record(x, "hs")) for x in recids ]
search_results = [recids, captions, alternative_titles]
elif requestType == "searchRevisions":
revisions = get_record_revision_ids( data['recID1'] )
captions = [ split_revid(x, 'datetext')[1] for x in revisions ]
search_results = [revisions, captions]
if too_many == True:
result['resultCode'] = 1
result['resultText'] = 'Too many results'
else:
result['results'] = search_results
result['resultText'] = '%s results' % len(search_results[0])
return result
def search_result_info(recid):
"""Return report number of a record or if it doen't exist return the recid
itself.
"""
report_numbers = get_fieldvalues(recid, '037__a')
if len(report_numbers) == 0:
return "#"+str(recid)
else:
return report_numbers[0]
def perform_request_record(requestType, uid, data):
"""Handle 'major' record related requests.
Handle retrieving, submitting or cancelling the merging session.
"""
#TODO add checks before submission and cancel, replace get_bibrecord call
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
record1 = _get_record(recid1, uid, result)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'submit':
if data.has_key('duplicate'):
recid2 = data['duplicate']
record2 = _get_record_slave(recid2, result, 'recid', uid)
if result['resultCode'] != 0: #return in case of error
return result
# mark record2 as deleted
record_add_field(record2, '980', ' ', ' ', '', [('c', 'DELETED')])
# mark record2 as duplicate of record1
record_add_field(record2, '970', ' ', ' ', '', [('d', str(recid1))])
#submit record2
xml_record = record_xml_output(record2)
save_xml_record(recid2, uid, xml_record)
#submit record1
save_xml_record(recid1, uid)
result['resultText'] = 'Record submitted'
return result
elif requestType == 'cancel':
delete_cache_file(recid1, uid)
result['resultText'] = 'Cancelled'
return result
recid2 = data["recID2"]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getRecordCompare':
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records compared'
elif requestType == 'recCopy':
copy_R2_to_R1(record1, record2)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Record copied'
elif requestType == 'recMerge':
merge_record(record1, record2, merge_conflicting_fields=True)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
elif requestType == 'recMergeNC':
merge_record(record1, record2, merge_conflicting_fields=False)
result['resultHtml'] = bibmerge_templates.BM_html_all_diff(record1, record2)
result['resultText'] = 'Records merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
def perform_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a field level.
Handle merging, adding, or replacing of fields.
"""
result = {
'resultCode': 0,
'resultText': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
record_content = get_cache_file_contents(recid1, uid)
cache_dirty = record_content[0]
rec_revision = record_content[1]
record1 = record_content[2]
pending_changes = record_content[3]
disabled_hp_changes = record_content[4]
# We will not be able to Undo/Redo correctly after any modifications
# from the level of bibmerge are performed ! We clear all the undo/redo
# lists
undo_list = []
redo_list = []
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
if requestType == 'getFieldGroup':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = 'Field group retrieved'
return result
elif requestType == 'getFieldGroupDiff':
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'], True)
result['resultText'] = 'Fields compared'
return result
elif requestType == 'mergeFieldGroup' or requestType == 'mergeNCFieldGroup':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
if requestType == 'mergeNCFieldGroup':
merge_field_group(record1, record2, fnum, ind1, ind2, False)
else:
merge_field_group(record1, record2, fnum, ind1, ind2, True)
resultText = 'Field group merged'
elif requestType == 'replaceField' or requestType == 'addField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
if requestType == 'replaceField':
replace_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field replaced'
else: # requestType == 'addField'
add_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field added'
elif requestType == 'deleteField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
if findex1 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
delete_field(record1, fnum, findex1)
resultText = 'Field deleted'
elif requestType == 'mergeField':
fnum, ind1, ind2 = _fieldtagNum_and_indicators(data['fieldTag'])
findex1 = _field_info( data['fieldCode1'] )[1]
findex2 = _field_info( data['fieldCode2'] )[1]
if findex2 == None:
result['resultCode'], result['resultText'] = 1, 'No value in the selected field'
return result
merge_field(record1, record2, fnum, findex1, findex2)
resultText = 'Field merged'
else:
result['resultCode'], result['resultText'] = 1, 'Wrong request type'
return result
result['resultHtml'] = bibmerge_templates.BM_html_field_group(record1, record2, data['fieldTag'])
result['resultText'] = resultText
update_cache_file_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, undo_list, redo_list)
return result
def perform_small_request_update_record(requestType, uid, data):
"""Handle record update requests for actions on a subfield level.
Handle adding, replacing or deleting of subfields.
"""
result = {
'resultCode': 0,
'resultText': '',
'resultHtml': ''
}
recid1 = data["recID1"]
recid2 = data["recID2"]
cache_content = get_cache_file_contents(recid1, uid) #TODO: check mtime, existence
cache_dirty = cache_content[0]
rec_revision = cache_content[1]
record1 = cache_content[2]
pending_changes = cache_content[3]
disabled_hp_changes = cache_content[4]
mode = data['record2Mode']
record2 = _get_record_slave(recid2, result, mode, uid)
if result['resultCode'] != 0: #if record not accessible return error information
return result
ftag, findex1 = _field_info(data['fieldCode1'])
fnum = ftag[:3]
findex2 = _field_info(data['fieldCode2'])[1]
sfindex1 = data['sfindex1']
sfindex2 = data['sfindex2']
if requestType == 'deleteSubfield':
delete_subfield(record1, fnum, findex1, sfindex1)
result['resultText'] = 'Subfield deleted'
elif requestType == 'addSubfield':
add_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield added'
elif requestType == 'replaceSubfield':
replace_subfield(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfield replaced'
elif requestType == 'diffSubfield':
result['resultHtml'] = bibmerge_templates.BM_html_subfield_row_diffed(record1, record2, fnum, findex1, findex2, sfindex1, sfindex2)
result['resultText'] = 'Subfields diffed'
update_cache_file_contents(recid1, uid, rec_revision, record1, pending_changes, disabled_hp_changes, [], [])
return result
def _get_record(recid, uid, result, fresh_record=False):
"""Retrieve record structure.
"""
record = None
mtime = None
cache_dirty = None
record_status = record_exists(recid)
existing_cache = cache_exists(recid, uid)
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif not existing_cache and record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif existing_cache and cache_expired(recid, uid) and \
record_locked_by_other_user(recid, uid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by user' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
if fresh_record:
delete_cache_file(recid, uid)
existing_cache = False
if not existing_cache:
record_revision, record = create_cache_file(recid, uid)
mtime = get_cache_mtime(recid, uid)
cache_dirty = False
else:
tmpRes = get_cache_file_contents(recid, uid)
cache_dirty, record_revision, record = tmpRes[0], tmpRes[1], tmpRes[2]
touch_cache_file(recid, uid)
mtime = get_cache_mtime(recid, uid)
if not latest_record_revision(recid, record_revision):
result['cacheOutdated'] = True
result['resultCode'], result['resultText'], result['cacheDirty'], result['cacheMTime'] = 0, 'Record OK', cache_dirty, mtime
return record
def _get_record_slave(recid, result, mode=None, uid=None):
"""Check if record exists and return it in dictionary format.
If any kind of error occurs returns None.
If mode=='revision' then recid parameter is considered as revid."""
record = None
if recid == 'none':
mode = 'none'
if mode == 'recid':
record_status = record_exists(recid)
#check for errors
if record_status == 0:
result['resultCode'], result['resultText'] = 1, 'Non-existent record: %s' % recid
elif record_status == -1:
result['resultCode'], result['resultText'] = 1, 'Deleted record: %s' % recid
elif record_locked_by_queue(recid):
result['resultCode'], result['resultText'] = 1, 'Record %s locked by queue' % recid
else:
record = create_record( print_record(recid, 'xm') )[0]
elif mode == 'tmpfile':
file_path = '%s_%s.xml' % (_get_file_path(recid, uid),
CFG_BIBEDIT_TO_MERGE_SUFFIX)
if not os.path.isfile(file_path): #check if file doesn't exist
result['resultCode'], result['resultText'] = 1, 'Temporary file doesnt exist'
else: #open file
tmpfile = open(file_path, 'r')
record = create_record( tmpfile.read() )[0]
tmpfile.close()
elif mode == 'revision':
if revision_format_valid_p(recid):
marcxml = get_marcxml_of_revision_id(recid)
if marcxml:
record = create_record(marcxml)[0]
else:
result['resultCode'], result['resultText'] = 1, 'The specified revision does not exist'
else:
result['resultCode'], result['resultText'] = 1, 'Invalid revision id'
elif mode == 'none':
return {}
else:
result['resultCode'], result['resultText'] = 1, 'Invalid record mode for record2'
return record
def _field_info(fieldIdCode):
"""Returns a tuple: (field-tag, field-index)
eg.: _field_info('R1-8560_-2') --> ('8560_', 2) """
info = fieldIdCode.split('-')
if info[2] == 'None':
info[2] = None
else:
info[2] = int(info[2])
return tuple( info[1:] )
def _fieldtagNum_and_indicators(fieldTag):
"""Separate a 5-char field tag to a 3-character field-tag number and two
indicators"""
fnum, ind1, ind2 = fieldTag[:3], fieldTag[3], fieldTag[4]
if ind1 == '_':
ind1 = ' '
if ind2 == '_':
ind2 = ' '
return (fnum, ind1, ind2)
| gpl-2.0 | -5,580,802,279,621,333,000 | 39.734118 | 139 | 0.628524 | false |
mbohlool/client-python | kubernetes/client/models/v1beta2_deployment_strategy.py | 1 | 4295 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DeploymentStrategy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rolling_update': 'V1beta2RollingUpdateDeployment',
'type': 'str'
}
attribute_map = {
'rolling_update': 'rollingUpdate',
'type': 'type'
}
def __init__(self, rolling_update=None, type=None):
"""
V1beta2DeploymentStrategy - a model defined in Swagger
"""
self._rolling_update = None
self._type = None
self.discriminator = None
if rolling_update is not None:
self.rolling_update = rolling_update
if type is not None:
self.type = type
@property
def rolling_update(self):
"""
Gets the rolling_update of this V1beta2DeploymentStrategy.
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.
:return: The rolling_update of this V1beta2DeploymentStrategy.
:rtype: V1beta2RollingUpdateDeployment
"""
return self._rolling_update
@rolling_update.setter
def rolling_update(self, rolling_update):
"""
Sets the rolling_update of this V1beta2DeploymentStrategy.
Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.
:param rolling_update: The rolling_update of this V1beta2DeploymentStrategy.
:type: V1beta2RollingUpdateDeployment
"""
self._rolling_update = rolling_update
@property
def type(self):
"""
Gets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.
:return: The type of this V1beta2DeploymentStrategy.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1beta2DeploymentStrategy.
Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.
:param type: The type of this V1beta2DeploymentStrategy.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DeploymentStrategy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -6,132,782,709,722,817,000 | 26.88961 | 105 | 0.570664 | false |
gedhe/sidesa2.0 | kejadian_lain.py | 1 | 28823 | #Boa:Frame:kejadian_lain
import os
import wx
import wx.lib.buttons
import data_penduduk
import sqlite3
import string
import gettext
import peringatan
db = sqlite3.connect('/opt/sidesa/sidesa')
cur = db.cursor()
def create(parent):
return kejadian_lain(parent)
[wxID_KEJADIAN_LAIN, wxID_KEJADIAN_LAINCARI_KK, wxID_KEJADIAN_LAINDOKUMEN,
wxID_KEJADIAN_LAININPUT_ALAMAT, wxID_KEJADIAN_LAININPUT_AYAH,
wxID_KEJADIAN_LAININPUT_DUSUN, wxID_KEJADIAN_LAININPUT_IBU,
wxID_KEJADIAN_LAININPUT_NAMA, wxID_KEJADIAN_LAININPUT_NIK,
wxID_KEJADIAN_LAININPUT_NO, wxID_KEJADIAN_LAININPUT_NO_KK,
wxID_KEJADIAN_LAININPUT_RT, wxID_KEJADIAN_LAININPUT_RW,
wxID_KEJADIAN_LAININPUT_TEMPAT_LAHIR, wxID_KEJADIAN_LAINISIPENDUDUK,
wxID_KEJADIAN_LAINKEMBALI, wxID_KEJADIAN_LAINKETERANGAN,
wxID_KEJADIAN_LAINLABEL_AGAMA, wxID_KEJADIAN_LAINLABEL_ALAMAT,
wxID_KEJADIAN_LAINLABEL_DATA_PENDUDUK, wxID_KEJADIAN_LAINLABEL_DIFABELITAS,
wxID_KEJADIAN_LAINLABEL_DUSUN, wxID_KEJADIAN_LAINLABEL_GOLONGAN_DARAH,
wxID_KEJADIAN_LAINLABEL_JENIS_KELAMIN,
wxID_KEJADIAN_LAINLABEL_KEWARGANEGARAAN, wxID_KEJADIAN_LAINLABEL_KONTRASEPSI,
wxID_KEJADIAN_LAINLABEL_NAMA_AYAH, wxID_KEJADIAN_LAINLABEL_NAMA_IBU,
wxID_KEJADIAN_LAINLABEL_NAMA_LENGKAP, wxID_KEJADIAN_LAINLABEL_NOMOR_KK,
wxID_KEJADIAN_LAINLABEL_PEKERJAAN, wxID_KEJADIAN_LAINLABEL_PEKERJAAN_LAINNYA,
wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TEMPUH,
wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TERAKHIR,
wxID_KEJADIAN_LAINLABEL_RESIKO_KEHAMILAN, wxID_KEJADIAN_LAINLABEL_SHDK,
wxID_KEJADIAN_LAINLABEL_STATUS_KEPENDUDUKAN,
wxID_KEJADIAN_LAINLABEL_STATUS_PERKAWINAN,
wxID_KEJADIAN_LAINLABEL_STATUS_TINGGAL,
wxID_KEJADIAN_LAINLABEL_TANGGAL_LAHIR, wxID_KEJADIAN_LAINLABEL_TEMPAT_LAHIR,
wxID_KEJADIAN_LAINLAPORAN, wxID_KEJADIAN_LAINLEBEL_NIK,
wxID_KEJADIAN_LAINNAMA_KK, wxID_KEJADIAN_LAINPILIHAN_AGAMA,
wxID_KEJADIAN_LAINPILIHAN_DIFABELITAS,
wxID_KEJADIAN_LAINPILIHAN_GOLONGAN_DARAH,
wxID_KEJADIAN_LAINPILIHAN_JENIS_KELAMIN, wxID_KEJADIAN_LAINPILIHAN_KEHAMILAN,
wxID_KEJADIAN_LAINPILIHAN_KONTRASEPSI, wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN,
wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN_LAINNYA,
wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_DITEMPUH,
wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_TERAKHIR,
wxID_KEJADIAN_LAINPILIHAN_SHDK, wxID_KEJADIAN_LAINPILIHAN_STATUS,
wxID_KEJADIAN_LAINPILIHAN_STATUS_KEPENDUDUKAN,
wxID_KEJADIAN_LAINPILIHAN_STATUS_TINGGAL,
wxID_KEJADIAN_LAINPILIHAN_WARGANEGARA, wxID_KEJADIAN_LAINSIMPANGAMBAR,
wxID_KEJADIAN_LAINSTATICTEXT1, wxID_KEJADIAN_LAINSTATICTEXT2,
wxID_KEJADIAN_LAINSTATICTEXT3, wxID_KEJADIAN_LAINSTATICTEXT4,
wxID_KEJADIAN_LAINSTATICTEXT5, wxID_KEJADIAN_LAINSTATICTEXT6,
wxID_KEJADIAN_LAINSTATICTEXT7, wxID_KEJADIAN_LAINTANGGALKEJADIAN,
wxID_KEJADIAN_LAINTANGGAL_LAHIR, wxID_KEJADIAN_LAINTGLKEJADIAN,
wxID_KEJADIAN_LAINTOMBOL_CARI, wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA,
] = [wx.NewId() for _init_ctrls in range(72)]
class kejadian_lain(wx.Dialog):
def _init_coll_isipenduduk_Columns(self, parent):
# generated method, don't edit
parent.InsertColumn(col=0, format=wx.LIST_FORMAT_LEFT,
heading='Nama Penduduk', width=150)
parent.InsertColumn(col=1, format=wx.LIST_FORMAT_LEFT,
heading='Nomor KK', width=250)
parent.InsertColumn(col=2, format=wx.LIST_FORMAT_LEFT, heading='Alamat',
width=260)
parent.InsertColumn(col=3, format=wx.LIST_FORMAT_LEFT, heading='Dusun',
width=100)
parent.InsertColumn(col=4, format=wx.LIST_FORMAT_LEFT, heading='RT',
width=40)
parent.InsertColumn(col=5, format=wx.LIST_FORMAT_LEFT, heading='RW',
width=40)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_KEJADIAN_LAIN,
name=u'edit_kejadian_lain', parent=prnt, pos=wx.Point(406, 79),
size=wx.Size(888, 639), style=wx.FRAME_NO_TASKBAR,
title=u'Kejadian Lain')
self.SetClientSize(wx.Size(888, 639))
self.Center(wx.BOTH)
self.label_nomor_kk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NOMOR_KK,
label=u'Nomor KK', name=u'label_nomor_kk', parent=self,
pos=wx.Point(8, 152), size=wx.Size(168, 17),
style=wx.TE_READONLY)
self.label_alamat = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_ALAMAT,
label=u'Alamat', name=u'label_alamat', parent=self,
pos=wx.Point(256, 152), size=wx.Size(47, 17), style=0)
self.label_dusun = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DUSUN,
label=u'Dusun', name=u'label_dusun', parent=self,
pos=wx.Point(552, 152), size=wx.Size(144, 17), style=0)
self.lebel_nik = wx.StaticText(id=wxID_KEJADIAN_LAINLEBEL_NIK,
label=u'N I K *', name=u'lebel_nik', parent=self,
pos=wx.Point(192, 192), size=wx.Size(40, 17), style=0)
self.label_tempat_lahir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_TEMPAT_LAHIR,
label=u'Tempat Lahir', name=u'label_tempat_lahir', parent=self,
pos=wx.Point(192, 312), size=wx.Size(176, 17), style=0)
self.label_tanggal_lahir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_TANGGAL_LAHIR,
label=u'Tanggal Lahir', name=u'label_tanggal_lahir', parent=self,
pos=wx.Point(192, 352), size=wx.Size(152, 17), style=0)
self.label_golongan_darah = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_GOLONGAN_DARAH,
label=u'Golongan Darah', name=u'label_golongan_darah',
parent=self, pos=wx.Point(192, 392), size=wx.Size(200, 17),
style=0)
self.label_nama_lengkap = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_LENGKAP,
label=u'Nama Lengkap', name=u'label_nama_lengkap', parent=self,
pos=wx.Point(192, 232), size=wx.Size(98, 17), style=0)
self.label_jenis_kelamin = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_JENIS_KELAMIN,
label=u'Jenis Kelamin', name=u'label_jenis_kelamin', parent=self,
pos=wx.Point(192, 272), size=wx.Size(152, 17), style=0)
self.label_agama = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_AGAMA,
label=u'Agama', name=u'label_agama', parent=self,
pos=wx.Point(400, 192), size=wx.Size(120, 17), style=0)
self.input_no_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NO_KK,
name=u'input_no_kk', parent=self, pos=wx.Point(8, 168),
size=wx.Size(240, 25), style=wx.TE_READONLY, value=u'')
self.input_alamat = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_ALAMAT,
name=u'input_alamat', parent=self, pos=wx.Point(256, 168),
size=wx.Size(288, 25), style=wx.TE_READONLY, value=u'')
self.input_dusun = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_DUSUN,
name=u'input_dusun', parent=self, pos=wx.Point(552, 168),
size=wx.Size(192, 25), style=wx.TE_READONLY, value=u'')
self.input_rt = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_RT,
name=u'input_rt', parent=self, pos=wx.Point(752, 168),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.input_rw = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_RW,
name=u'input_rw', parent=self, pos=wx.Point(816, 168),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.input_nik = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NIK,
name=u'input_nik', parent=self, pos=wx.Point(192, 208),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.input_nama = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NAMA,
name=u'input_nama', parent=self, pos=wx.Point(192, 248),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_jenis_kelamin = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_JENIS_KELAMIN,
name=u'pilihan_jenis_kelamin', parent=self, pos=wx.Point(192,
288), size=wx.Size(200, 27), style=wx.TE_READONLY, value=u'')
self.input_tempat_lahir = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_TEMPAT_LAHIR,
name=u'input_tempat_lahir', parent=self, pos=wx.Point(192, 328),
size=wx.Size(200, 25), style=wx.TE_READONLY, value=u'')
self.tanggalkejadian = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGALKEJADIAN,
name=u'tanggalkejadian', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.pilihan_golongan_darah = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_GOLONGAN_DARAH,
name=u'pilihan_golongan_darah', parent=self, pos=wx.Point(192,
408), size=wx.Size(80, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_agama = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_AGAMA,
name=u'pilihan_agama', parent=self, pos=wx.Point(400, 208),
size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_kewarganegaraan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_KEWARGANEGARAAN,
label=u'Kewarganegaraan', name=u'label_kewarganegaraan',
parent=self, pos=wx.Point(400, 232), size=wx.Size(168, 17),
style=0)
self.pilihan_warganegara = wx.TextCtrl(name=u'pilihan_warganegara',
parent=self, pos=wx.Point(400, 248), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pendidikan_terakhir = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TERAKHIR,
label=u'Pendidikan Terakhir', name=u'label_pendidikan_terakhir',
parent=self, pos=wx.Point(400, 272), size=wx.Size(184, 17),
style=0)
self.pilihan_pendidikan_terakhir = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_TERAKHIR,
name=u'pilihan_pendidikan_terakhir', parent=self,
pos=wx.Point(400, 288), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pendidikan_tempuh = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PENDIDIKAN_TEMPUH,
label=u'Pendidikan Saat Ini Ditempuh',
name=u'label_pendidikan_tempuh', parent=self, pos=wx.Point(400,
312), size=wx.Size(264, 17), style=0)
self.pilihan_pendidikan_ditempuh = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PENDIDIKAN_DITEMPUH,
name=u'pilihan_pendidikan_ditempuh', parent=self,
pos=wx.Point(400, 328), size=wx.Size(216, 25),
style=wx.TE_READONLY, value=u'')
self.label_pekerjaan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PEKERJAAN,
label=u'Pekerjaan Utama', name=u'label_pekerjaan', parent=self,
pos=wx.Point(400, 352), size=wx.Size(200, 17), style=0)
self.pilihan_pekerjaan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN,
name=u'pilihan_pekerjaan', parent=self, pos=wx.Point(400, 370),
size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_pekerjaan_lainnya = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_PEKERJAAN_LAINNYA,
label=u'Pekerjaan Lainnya', name=u'label_pekerjaan_lainnya',
parent=self, pos=wx.Point(400, 392), size=wx.Size(168, 17),
style=0)
self.pilihan_pekerjaan_lainnya = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_PEKERJAAN_LAINNYA,
name=u'pilihan_pekerjaan_lainnya', parent=self, pos=wx.Point(400,
408), size=wx.Size(216, 25), style=wx.TE_READONLY, value=u'')
self.label_status_perkawinan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_PERKAWINAN,
label=u'Status Perkawinan', name=u'label_status_perkawinan',
parent=self, pos=wx.Point(624, 192), size=wx.Size(176, 17),
style=0)
self.pilihan_status = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS,
name=u'pilihan_status', parent=self, pos=wx.Point(624, 208),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_status_kependudukan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_KEPENDUDUKAN,
label=u'Status Kependudukan', name=u'label_status_kependudukan',
parent=self, pos=wx.Point(624, 232), size=wx.Size(184, 17),
style=0)
self.pilihan_status_kependudukan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS_KEPENDUDUKAN,
name=u'pilihan_status_kependudukan', parent=self,
pos=wx.Point(624, 248), size=wx.Size(248, 25),
style=wx.TE_READONLY, value=u'')
self.label_status_tinggal = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_STATUS_TINGGAL,
label=u'Status Tinggal', name=u'label_status_tinggal',
parent=self, pos=wx.Point(624, 272), size=wx.Size(152, 17),
style=0)
self.pilihan_status_tinggal = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_STATUS_TINGGAL,
name=u'pilihan_status_tinggal', parent=self, pos=wx.Point(624,
288), size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_difabelitas = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DIFABELITAS,
label=u'Penyandang Difabelitas', name=u'label_difabelitas',
parent=self, pos=wx.Point(624, 312), size=wx.Size(184, 17),
style=0)
self.pilihan_difabelitas = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_DIFABELITAS,
name=u'pilihan_difabelitas', parent=self, pos=wx.Point(624, 328),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.label_kontrasepsi = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_KONTRASEPSI,
label=u'Penggunaan Kontrasepsi', name=u'label_kontrasepsi',
parent=self, pos=wx.Point(624, 352), size=wx.Size(192, 17),
style=0)
self.pilihan_kontrasepsi = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_KONTRASEPSI,
name=u'pilihan_kontrasepsi', parent=self, pos=wx.Point(624, 368),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.pilihan_kehamilan = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_KEHAMILAN,
name=u'pilihan_kehamilan', parent=self, pos=wx.Point(624, 408),
size=wx.Size(248, 25), style=wx.TE_READONLY, value=u'')
self.laporan = wx.TextCtrl(id=wxID_KEJADIAN_LAINLAPORAN,
name=u'laporan', parent=self, pos=wx.Point(136, 496),
size=wx.Size(192, 27), style=0, value=u'')
self.keterangan = wx.TextCtrl(id=wxID_KEJADIAN_LAINKETERANGAN,
name=u'keterangan', parent=self, pos=wx.Point(416, 496),
size=wx.Size(448, 27), style=0, value=u'')
self.label_shdk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_SHDK,
label=u'Status Hubungan Dalam Keluarga', name=u'label_shdk',
parent=self, pos=wx.Point(24, 536), size=wx.Size(320, 17),
style=0)
self.pilihan_shdk = wx.TextCtrl(id=wxID_KEJADIAN_LAINPILIHAN_SHDK,
name=u'pilihan_shdk', parent=self, pos=wx.Point(24, 560),
size=wx.Size(304, 25), style=wx.TE_READONLY, value=u'')
self.label_nama_ayah = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_AYAH,
label=u'Nama Ayah', name=u'label_nama_ayah', parent=self,
pos=wx.Point(344, 536), size=wx.Size(152, 17), style=0)
self.input_ayah = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_AYAH,
name=u'input_ayah', parent=self, pos=wx.Point(344, 560),
size=wx.Size(280, 25), style=wx.TE_READONLY, value=u'')
self.label_nama_ibu = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_NAMA_IBU,
label=u'Nama Ibu', name=u'label_nama_ibu', parent=self,
pos=wx.Point(632, 536), size=wx.Size(160, 17), style=0)
self.input_ibu = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_IBU,
name=u'input_ibu', parent=self, pos=wx.Point(632, 560),
size=wx.Size(240, 25), style=wx.TE_READONLY, value=u'')
self.label_resiko_kehamilan = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_RESIKO_KEHAMILAN,
label=u'Resiko Kehamilan', name=u'label_resiko_kehamilan',
parent=self, pos=wx.Point(624, 392), size=wx.Size(176, 17),
style=0)
self.tombol_tambah_data = wx.Button(id=wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA,
label=u'Tambah Data', name=u'tombol_tambah_data', parent=self,
pos=wx.Point(240, 600), size=wx.Size(200, 32), style=0)
self.tombol_tambah_data.Bind(wx.EVT_BUTTON,
self.OnTombol_tambah_dataButton,
id=wxID_KEJADIAN_LAINTOMBOL_TAMBAH_DATA)
self.kembali = wx.Button(id=wxID_KEJADIAN_LAINKEMBALI,
label=u'Kembali Ke Menu', name=u'kembali', parent=self,
pos=wx.Point(456, 600), size=wx.Size(208, 32), style=0)
self.kembali.Bind(wx.EVT_BUTTON, self.OnKembaliButton,
id=wxID_KEJADIAN_LAINKEMBALI)
self.dokumen = wx.StaticText(id=wxID_KEJADIAN_LAINDOKUMEN,
label=u'Catatan Kejadian Penduduk Lainnya', name=u'dokumen',
parent=self, pos=wx.Point(16, 440), size=wx.Size(304, 17),
style=0)
self.label_data_penduduk = wx.StaticText(id=wxID_KEJADIAN_LAINLABEL_DATA_PENDUDUK,
label=u'FORM DATA PENDUDUK', name=u'label_data_penduduk',
parent=self, pos=wx.Point(336, 0), size=wx.Size(216, 17),
style=0)
self.isipenduduk = wx.ListCtrl(id=wxID_KEJADIAN_LAINISIPENDUDUK,
name=u'isipenduduk', parent=self, pos=wx.Point(16, 16),
size=wx.Size(856, 104), style=wx.LC_REPORT)
self._init_coll_isipenduduk_Columns(self.isipenduduk)
self.isipenduduk.Bind(wx.EVT_LIST_ITEM_SELECTED,
self.OnIsipendudukListItemSelected,
id=wxID_KEJADIAN_LAINISIPENDUDUK)
self.staticText1 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT1,
label=u'Nama Lengkap', name='staticText1', parent=self,
pos=wx.Point(400, 128), size=wx.Size(145, 17), style=0)
self.cari_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAINCARI_KK,
name=u'cari_kk', parent=self, pos=wx.Point(552, 128),
size=wx.Size(224, 24), style=0, value='')
self.tombol_cari = wx.Button(id=wxID_KEJADIAN_LAINTOMBOL_CARI,
label=u'Cari', name=u'tombol_cari', parent=self, pos=wx.Point(784,
128), size=wx.Size(85, 24), style=0)
self.tombol_cari.Bind(wx.EVT_BUTTON, self.OnTombol_cariButton,
id=wxID_KEJADIAN_LAINTOMBOL_CARI)
self.input_no = wx.TextCtrl(id=wxID_KEJADIAN_LAININPUT_NO,
name=u'input_no', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(56, 27), style=wx.TE_READONLY, value=u'')
self.staticText2 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT2,
label=u'RT', name='staticText2', parent=self, pos=wx.Point(760,
152), size=wx.Size(24, 16), style=0)
self.staticText3 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT3,
label=u'RW', name='staticText3', parent=self, pos=wx.Point(824,
152), size=wx.Size(19, 17), style=0)
self.staticText4 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT4,
label=u'Pemberi Laporan', name='staticText4', parent=self,
pos=wx.Point(16, 504), size=wx.Size(118, 17), style=0)
self.staticText5 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT5,
label=u'Keterangan', name='staticText5', parent=self,
pos=wx.Point(336, 504), size=wx.Size(74, 17), style=0)
self.staticText6 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT6,
label=u'Tanggal Kejadian', name='staticText6', parent=self,
pos=wx.Point(16, 464), size=wx.Size(106, 17), style=0)
self.tglkejadian = wx.DatePickerCtrl(id=wxID_KEJADIAN_LAINTGLKEJADIAN,
name='tglkejadian', parent=self, pos=wx.Point(136, 464),
size=wx.Size(192, 26), style=wx.DP_DROPDOWN|wx.DP_SHOWCENTURY)
self.tglkejadian.Bind(wx.EVT_DATE_CHANGED, self.OnGetDate)
self.nama_kk = wx.TextCtrl(id=wxID_KEJADIAN_LAINNAMA_KK,
name=u'nama_kk', parent=self, pos=wx.Point(8, 208),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.staticText7 = wx.StaticText(id=wxID_KEJADIAN_LAINSTATICTEXT7,
label=u'Nama Kepala Keluarga', name='staticText7', parent=self,
pos=wx.Point(8, 192), size=wx.Size(135, 17), style=0)
self.tanggalkejadian = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGALKEJADIAN,
name=u'tanggalkejadian', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(176, 24), style=wx.TE_READONLY, value=u'')
self.simpangambar = wx.TextCtrl(id=wxID_KEJADIAN_LAINSIMPANGAMBAR,
name=u'simpangambar', parent=self, pos=wx.Point(-100, -100),
size=wx.Size(152, 24), style=0, value=u'')
self.tanggal_lahir = wx.TextCtrl(id=wxID_KEJADIAN_LAINTANGGAL_LAHIR,
name=u'tanggal_lahir', parent=self, pos=wx.Point(192, 368),
size=wx.Size(200, 27), style=0, value=u'')
def __init__(self, parent):
self._init_ctrls(parent)
self.awal()
def awal(self):
self.loadgambar()
self.IsiList()
self.input_no_kk.SetValue('')
self.input_alamat.SetValue('')
self.input_dusun.SetValue('')
self.input_rt.SetValue('')
self.input_rw.SetValue('')
self.input_nik.SetValue('')
self.input_nama.SetValue('')
self.pilihan_jenis_kelamin.SetValue('')
self.input_tempat_lahir.SetValue('')
self.tanggal_lahir.SetValue('')
self.pilihan_golongan_darah.SetValue('')
self.pilihan_agama.SetValue('')
self.pilihan_warganegara.SetValue('')
self.pilihan_pendidikan_terakhir.SetValue('')
self.pilihan_pendidikan_ditempuh.SetValue('')
self.pilihan_pekerjaan.SetValue('')
self.pilihan_pekerjaan_lainnya.SetValue('')
self.pilihan_status.SetValue('')
self.pilihan_status_kependudukan.SetValue('')
self.pilihan_status_tinggal.SetValue('')
self.pilihan_difabelitas.SetValue('')
self.pilihan_kontrasepsi.SetValue('')
self.pilihan_kehamilan.SetValue('')
self.pilihan_shdk.SetValue('')
self.input_ayah.SetValue('')
self.input_ibu.SetValue('')
self.laporan.SetValue('')
self.keterangan.SetValue('')
self.cari_kk.SetValue('')
self.nama_kk.SetValue('')
self.input_no.SetValue('')
def OnGetDate(self, event):
selected = self.tglkejadian.GetValue()
month = selected.Month + 1
day = selected.Day
year = selected.Year
date_str = "%02d/%02d/%4d" % (month, day, year)
self.tanggalkejadian.SetValue("{}".format(date_str))
def loadgambar(self):
self.PhotoMaxSize = 130
img = wx.EmptyImage(120,130)
self.imageCtrl = wx.StaticBitmap(self, wx.ID_ANY, wx.BitmapFromImage(img),wx.Point(52, 251))
def IsiList(self):
self.isipenduduk.DeleteAllItems()
sql = "SELECT * FROM penduduk WHERE kematian='Tidak'"
cur.execute(sql)
hasil = cur.fetchall()
nokk = self.isipenduduk.GetItemCount()
for i in hasil :
self.isipenduduk.InsertStringItem(nokk, "%s"%i[1])
self.isipenduduk.SetStringItem(nokk,1,"%s"%i[2])
self.isipenduduk.SetStringItem(nokk,2,"%s"%i[21])
self.isipenduduk.SetStringItem(nokk,3,"%s"%i[29])
self.isipenduduk.SetStringItem(nokk,4,"%s"%i[26])
self.isipenduduk.SetStringItem(nokk,5,"%s"%i[27])
nokk = nokk + 1
def Isi_Object(self) :
carikk=str(self.cari_kk.GetValue())
sql="SELECT * FROM penduduk WHERE nik='%s'"%(carikk)
cur.execute(sql)
hasil = cur.fetchone()
if hasil :
self.input_no_kk.SetValue(str(hasil[16]))
self.nama_kk.SetValue(str(hasil[17]))
self.input_alamat.SetValue(str(hasil[21]))
self.input_dusun.SetValue(str(hasil[29]))
self.input_rt.SetValue(str(hasil[26]))
self.input_rw.SetValue(str(hasil[27]))
self.input_nik.SetValue(str(hasil[1]))
self.input_nama.SetValue(str(hasil[2]))
self.pilihan_jenis_kelamin.SetValue(str(hasil[3]))
self.input_tempat_lahir.SetValue(str(hasil[4]))
self.tanggal_lahir.SetValue(str(hasil[5]))
self.pilihan_golongan_darah.SetValue(str(hasil[7]))
self.pilihan_agama.SetValue(str(hasil[8]))
self.pilihan_warganegara.SetValue(str(hasil[28]))
self.pilihan_pendidikan_terakhir.SetValue(str(hasil[12]))
self.pilihan_pendidikan_ditempuh.SetValue(str(hasil[31]))
self.pilihan_pekerjaan.SetValue(str(hasil[13]))
self.pilihan_pekerjaan_lainnya.SetValue(str(hasil[19]))
self.pilihan_status.SetValue(str(hasil[9]))
self.pilihan_status_kependudukan.SetValue(str(hasil[32]))
self.pilihan_status_tinggal.SetValue(str(hasil[33]))
self.pilihan_difabelitas.SetValue(str(hasil[34]))
self.pilihan_kontrasepsi.SetValue(str(hasil[35]))
self.pilihan_kehamilan.SetValue(str(hasil[36]))
self.pilihan_shdk.SetValue(str(hasil[10]))
self.input_ayah.SetValue(str(hasil[15]))
self.input_ibu.SetValue(str(hasil[14]))
self.simpangambar.SetValue(str(hasil[57]))
self.input_no.SetValue(str(hasil[0]))
self.viewgambar()
else :
self.pesan = wx.MessageDialog(self,"Data Tidak Ada","Konfirmasi",wx.OK)
self.pesan.ShowModal()
self.cari_kk.Clear()
self.cari_kk.SetFocus()
def viewgambar(self):
filepath=self.simpangambar.GetValue()
img = wx.Image(filepath, wx.BITMAP_TYPE_ANY)
# scale the image, preserving the aspect ratio
W = img.GetWidth()
H = img.GetHeight()
if W > H:
NewW = self.PhotoMaxSize
NewH = self.PhotoMaxSize * H / W
else:
NewH = self.PhotoMaxSize
NewW = self.PhotoMaxSize * W / H
img = img.Scale(NewW,NewH)
self.imageCtrl.SetBitmap(wx.BitmapFromImage(img))
def OnTombol_kembali_kemenuButton(self, event):
self.main=data_penduduk.create(None)
self.main.Show()
self.Close()
self.Destroy()
def OnTombol_tambah_dataButton(self, event):
nokk = str(self.input_no_kk.GetValue())
nik = str(self.input_nik.GetValue())
nama = str(self.input_nama.GetValue())
kejadian = str(self.tanggalkejadian.GetValue())
laporan = str(self.laporan.GetValue())
keterangan = str(self.keterangan.GetValue())
inputno = str(self.input_no.GetValue())
if laporan == '':
self.pesan = wx.MessageDialog(self,"Nama Pelapor Jangan Kosong","Peringatan",wx.OK)
self.pesan.ShowModal()
elif keterangan == '':
self.pesan = wx.MessageDialog(self,"Keterangan Kematian Jangan Kosong","Peringatan",wx.OK)
self.pesan.ShowModal()
else:
add_keluarga="UPDATE penduduk SET kejadianlain='Ya' WHERE no='"+inputno+"'"
cur.execute(add_keluarga)
db.commit()
add_kejadian="INSERT INTO peristiwalain (nomornik,tanggalperistiwa, peristiwa, pemberilaporan, namalengkap, nomorkk) VALUES('"+(nik)+"','"+(kejadian)+"','"+(keterangan)+"','"+(laporan)+"','"+(nama)+"','"+(nokk)+"') "
cur.execute(add_kejadian)
db.commit()
self.pesan = wx.MessageDialog(self,"Data Sudah Tersimpan","Konfirmasi",wx.OK)
self.pesan.ShowModal()
self.awal()
def OnKembaliButton(self, event):
self.main=data_penduduk.create(None)
self.main.Show()
self.Close()
self.Destroy()
def OnTombol_cariButton(self, event):
self.Isi_Object()
def OnIsipendudukListItemSelected(self, event):
self.currentItem = event.m_itemIndex # mengambil no index baris yang dipilih
b=self.isipenduduk.GetItem(self.currentItem).GetText() # no index baris dikonversi ke text/ string
self.cari_kk.SetValue(b)
self.Isi_Object()
event.Skip()
| gpl-2.0 | 5,543,628,404,911,905,000 | 48.270085 | 229 | 0.630608 | false |
hjanime/VisTrails | vistrails/packages/URL/http_directory.py | 1 | 8252 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# https://gist.github.com/remram44/6540454
from __future__ import division
from HTMLParser import HTMLParser
import os
import re
from .https_if_available import build_opener
re_url = re.compile(r'^(([a-zA-Z_-]+)://([^/]+))(/.*)?$')
def resolve_link(link, url):
m = re_url.match(link)
if m is not None:
if not m.group(4):
# http://domain -> http://domain/
return link + '/'
else:
return link
elif link[0] == '/':
# /some/path
murl = re_url.match(url)
return murl.group(1) + link
else:
# relative/path
if url[-1] == '/':
return url + link
else:
return url + '/' + link
class ListingParser(HTMLParser):
"""Parses an HTML file and build a list of links.
Links are stored into the 'links' set. They are resolved into absolute
links.
"""
def __init__(self, url):
HTMLParser.__init__(self)
if url[-1] != '/':
url += '/'
self.__url = url
self.links = set()
def handle_starttag(self, tag, attrs):
if tag == 'a':
for key, value in attrs:
if key == 'href':
if not value:
continue
value = resolve_link(value, self.__url)
self.links.add(value)
break
def download_directory(url, target, insecure=False):
def mkdir():
if not mkdir.done:
try:
os.mkdir(target)
except OSError:
pass
mkdir.done = True
mkdir.done = False
opener = build_opener(insecure=insecure)
response = opener.open(url)
if response.info().type == 'text/html':
contents = response.read()
parser = ListingParser(url)
parser.feed(contents)
for link in parser.links:
link = resolve_link(link, url)
if link[-1] == '/':
link = link[:-1]
if not link.startswith(url):
continue
name = link.rsplit('/', 1)[1]
if '?' in name:
continue
mkdir()
download_directory(link, os.path.join(target, name), insecure)
if not mkdir.done:
# We didn't find anything to write inside this directory
# Maybe it's a HTML file?
if url[-1] != '/':
end = target[-5:].lower()
if not (end.endswith('.htm') or end.endswith('.html')):
target = target + '.html'
with open(target, 'wb') as fp:
fp.write(contents)
else:
buffer_size = 4096
with open(target, 'wb') as fp:
chunk = response.read(buffer_size)
while chunk:
fp.write(chunk)
chunk = response.read(buffer_size)
###############################################################################
import unittest
class TestLinkResolution(unittest.TestCase):
def test_absolute_link(self):
self.assertEqual(
resolve_link('http://website.org/p/test.txt',
'http://some/other/url'),
'http://website.org/p/test.txt')
self.assertEqual(
resolve_link('http://website.org',
'http://some/other/url'),
'http://website.org/')
def test_absolute_path(self):
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://some/url/'),
'http://some/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site'),
'http://site/p/test.txt')
self.assertEqual(
resolve_link('/p/test.txt', 'http://site/'),
'http://site/p/test.txt')
def test_relative_path(self):
self.assertEqual(
resolve_link('some/file', 'http://site/folder'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/file', 'http://site/folder/'),
'http://site/folder/some/file')
self.assertEqual(
resolve_link('some/dir/', 'http://site/folder'),
'http://site/folder/some/dir/')
class TestParser(unittest.TestCase):
def test_parse(self):
parser = ListingParser('http://a.remram.fr/test')
parser.feed("""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"><html><head><title>
Index of /test</title></head><body><h1>Index of /test</h1><table><tr><th>
<img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a>
</th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size
</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5">
<hr></th></tr><tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td>
<td><a href="/">Parent Directory</a></td><td> </td><td align="right"> -
</td><td> </td></tr><tr><td valign="top">
<img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="a">a</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> 3 </td><td>
</td></tr><tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td>
<td><a href="/bb">bb</a></td><td align="right">11-Sep-2013 15:46 </td>
<td align="right"> 3 </td><td> </td></tr><tr><td valign="top">
<img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="/cc/">cc/</a></td>
<td align="right">11-Sep-2013 15:46 </td><td align="right"> - </td><td>
</td></tr><tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td>
<td><a href="http://a.remram.fr/dd">dd/</a></td><td align="right">
11-Sep-2013 15:46 </td><td align="right"> - </td><td> </td></tr><tr>
<th colspan="5"><hr></th></tr></table></body></html>
""")
links = set(l for l in parser.links if '?' not in l)
self.assertEqual(links, set([
'http://a.remram.fr/',
'http://a.remram.fr/test/a',
'http://a.remram.fr/bb',
'http://a.remram.fr/cc/',
'http://a.remram.fr/dd',
]))
| bsd-3-clause | -6,873,373,719,552,639,000 | 37.381395 | 79 | 0.544716 | false |
Bajoo/client-pc | bajoo/gui/base_view.py | 1 | 4844 | # -*- coding: utf-8 -*-
import wx
from ..common.path import resource_filename
from .translator import Translator
from ..common.i18n import N_
class BaseView(Translator):
"""Base class for all views.
This class come with helper functions to configure the view.
Class Attributes:
LIGHT_GRAY (wx.Colour): Predefined background color.
Attributes:
window (wx.Window): the window element the view is in charge.
"""
LIGHT_GRAY = wx.Colour(0xf2, 0xf2, 0xf2)
def __init__(self, window):
Translator.__init__(self)
# wx.Window instance.
self.window = window
def set_frame_title(self, title):
"""Set the title of the wx.Frame containing this Window.
Args:
title (str): new frame title. The title will be translated.
"""
frame = self.window.GetTopLevelParent()
self.register_i18n(frame, frame.SetTitle, title)
def make_sizer(self, direction, items, outside_border=True, flag=0,
proportion=0, sizer=None, border=15):
"""Recursively make sizers with border for simple cases.
Each element given will be added to the sizer, with appropriate
borders. Border between elements (even sub-sizer) will be merged.
Args:
direction: the direction of the first sizer. Can be wx.HORIZONTAL
or wx.VERTICAL.
items (list of wx.Window): a list of all elements to add to the
sizer. If an item is None, a stretchable spacer is added. If
it's another list, this function is called recursively with the
opposite direction.
outside_border (boolean, optional): If set to False, no outside
border are added: Only borders between elements will be
created.
flag (optional): if set, additional flags who will be passed to
each ``sizer.Add()`` call.
proportion (optional): If set, the parameter will be passed to each
``sizer.Add()`` call.
sizer (wx.Sizer, optional): If set, this empty sizer will be used,
instead of creating a new one.
border (integer, optional): size of the border to use
returns:
wx.Sizer: the top-level sizer created.
"""
swap_direction = {
wx.VERTICAL: wx.HORIZONTAL,
wx.HORIZONTAL: wx.VERTICAL
}
if not sizer:
sizer = wx.BoxSizer(direction)
# the first border is implemented as a Spacer,
# because borders of hidden elements don't appears.
if outside_border:
sizer.AddSpacer(border)
for (index, item) in enumerate(items):
if item is None:
sizer.AddStretchSpacer()
continue
flags = 0
if isinstance(item, list):
item = self.make_sizer(swap_direction[direction], item,
outside_border=False)
if isinstance(item, wx.Sizer):
flags |= wx.EXPAND
# Compute flag for merging common border.
if outside_border:
if direction is wx.VERTICAL:
flags |= wx.LEFT | wx.RIGHT
else:
flags |= wx.TOP | wx.BOTTOM
if len(items) - 1 is not index:
if direction is wx.VERTICAL:
flags |= wx.BOTTOM
else:
flags |= wx.RIGHT
flags |= flag
sizer.Add(item, border=border, flag=flags, proportion=proportion)
# last border
if outside_border:
sizer.AddSpacer(border)
return sizer
def create_settings_button_box(self, parent):
"""Create a common box with 3 buttons: ok, cancel, apply"""
btn_ok = wx.Button(parent, wx.ID_OK, name='btn_ok')
btn_cancel = wx.Button(parent, wx.ID_CANCEL, name='btn_cancel')
btn_apply = wx.Button(parent, wx.ID_APPLY, name='btn_apply')
self.register_many_i18n('SetLabel', {
btn_cancel: N_('Cancel'),
btn_ok: N_('OK'),
btn_apply: N_('Apply')
})
# Buttons box
button_box = wx.StdDialogButtonSizer()
button_box.SetAffirmativeButton(btn_ok)
button_box.SetCancelButton(btn_cancel)
button_box.AddButton(btn_apply)
# Layout the button box
button_box.Realize()
return button_box
def set_icon(self):
"""Set the standard Bajoo favicon to the window.
Note that the window must be an instance of wx.Frame.
"""
icon_path = resource_filename('assets/window_icon.png')
icon = wx.Icon(icon_path)
self.window.SetIcon(icon)
| gpl-3.0 | 7,601,464,904,003,091,000 | 32.638889 | 79 | 0.569777 | false |
jim-easterbrook/pywws | src/pywws/device_pyusb1.py | 1 | 4757 | # pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-20 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB v1.0.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library
(version 1.0). It is one of several USB device modules, each of which
uses a different USB library interface. See :ref:`Installation - USB
library<dependencies-usb>` for details.
Testing
=======
Run :py:mod:`pywws.testweatherstation` with increased verbosity so it
reports which USB device access module is being used::
python -m pywws.testweatherstation -vv
18:28:09:pywws.weatherstation.CUSBDrive:using pywws.device_pyusb1
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import sys
import usb.core
import usb.util
class USBDevice(object):
"""Low level USB device access via PyUSB 1.0 library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
def __init__(self, idVendor, idProduct):
self.dev = usb.core.find(idVendor=idVendor, idProduct=idProduct)
if not self.dev:
raise IOError("Weather station device not found")
if sys.platform.startswith('linux'):
try:
detach = self.dev.is_kernel_driver_active(0)
except NotImplementedError:
detach = True
if detach:
try:
self.dev.detach_kernel_driver(0)
except usb.core.USBError:
pass
self.dev.reset()
self.dev.set_configuration()
usb.util.claim_interface(self.dev, 0)
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.dev.read(0x81, size, timeout=1200)
if not result or len(result) < size:
raise IOError('pywws.device_pyusb1.USBDevice.read_data failed')
return list(result)
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True
| gpl-2.0 | 4,071,569,313,834,991,000 | 33.722628 | 104 | 0.646206 | false |
zverevalexei/trex-http-proxy | error_messages.py | 1 | 1559 | error = {
'not_implemented': {
'err_code': 'not_implemented',
'err_description': 'Method is not implemented',
'err_resolution': 'Check your request.'
},
'not_json': {
'err_code': 'not_json',
'err_description': 'Request contains data in other than JSON format.',
'err_resolution': 'Check your request, or contact the developer.'
},
'no_request_data': {
'err_code': 'no_request_data',
'err_description': 'Request data is empty.',
'err_resolution': 'Check your request, or contact the developer.'
},
'trex_not_start': {
'err_code': 'trex_not_start',
'err_description': 'TRex could not start to generate traffic.',
'err_resolution': 'Check with developer team.'
},
'ascii_error': {
'err_code': 'ascii_error',
'err_description': 'TRex supports ASCII characters only.',
'err_resolution': 'Please verify input data and make sure it contains ASCII-compatible symbols only.'
},
'trex_already_running': {
'err_code': 'trex_already_running',
'err_description': 'TRex is running already.',
'err_resolution': 'Stop traffic, then try to start it again.'
},
'pps_must_be_positive': {
'err_code': 'pps_must_be_positive',
'err_description': 'PPS must have a positive value (>0). Traffic stopped.',
'err_resolution': 'Choose a positive value to start server.'
}
}
# Get an error details by its code
def get_error_message(code):
return error[code]
| mit | 9,159,863,534,013,109,000 | 36.119048 | 109 | 0.601668 | false |
juanka1331/VAN-applied-to-Nifti-images | final_scripts/benchmark/benchmark_cvae_swap_over_kernel.py | 1 | 19454 | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
import tarfile
import time
from datetime import datetime
import lib.neural_net.kfrans_ops as ops
import settings
from lib import session_helper as session, timing_helper
from lib.data_loader.mri_loader import load_mri_data_3d
from lib.data_loader.pet_loader import load_pet_data_3d
from lib.over_regions_lib import cvae_over_regions
from lib.utils import cv_utils
from lib.utils import evaluation_utils
from lib.utils import output_utils
from lib.utils import svm_utils
from lib.utils.auc_output_handler import stringfy_auc_information
from lib.utils.evaluation_logger_helper import evaluation_container_to_log_file
from lib.utils.evaluation_utils import get_average_over_metrics
from lib.utils.os_aux import create_directories
from settings import explicit_iter_per_region
from final_scripts.benchmark import benchmark_helper as helper
session_datetime = datetime.now().isoformat()
print("Time session init: {}".format(session_datetime))
# META SETTINGS
images_used = "PET"
#images_used = "MRI"
# Session settings
session_name = "CVAE_session_swap_kernel_{0}".format(images_used)
session_path = os.path.join(settings.path_to_general_out_folder, session_name)
historial_path = os.path.join(session_path, "historical")
create_directories([session_path, historial_path])
# SWAAP SETTINGS
n_folds = 10
bool_test = False
swap_over = "kernel_size"
regions_used = "most_important"
list_regions = session.select_regions_to_evaluate(regions_used)
# list_regions = [85, 6, 7]
# Vae settings
# Net Configuration
kernel_list = [10, 9, 8, 7, 6, 5, 4, 3, 2]
# SVM_over_regions_threshold = None
SVM_over_regions_threshold = 0 # Middle value
# SMV_over_regions_threshold = None
SMV_over_regions_threshold = 0.5 # Middle value
#CMV_over_regions_threshold = None
CMV_over_regions_threshold = 0# Middle value
hyperparams = {
"latent_layer_dim": 100,
'activation_layer': ops.lrelu,
'features_depth': [1, 16, 32],
'decay_rate': 0.002,
'learning_rate': 0.001,
# "cvae_model": "3layers",
"cvae_model": "2layers",
'stride': 2,
'lambda_l2_regularization': 0.0001}
# Vae session cofiguration
cvae_session_conf = {
"batch_size": 32,
"bool_normalized": False,
"n_iters": 100,
"save_meta_bool": False,
"show_error_iter": 10,
}
# It could be None or a value content between 0 or 1
# OUTPUT: Files initialization
loop_output_file_simple_majority_vote = os.path.join(
session_path, "loop_output_simple_majority_vote.csv")
loop_output_file_complex_majority_vote = os.path.join(
session_path, "loop_output_complex_majority_vote.csv")
loop_output_file_weighted_svm = os.path.join(
session_path, "loop_output_weighted_svm.csv")
loop_output_file_timing = os.path.join(
session_path, "loop_output_timing.csv")
evaluations_per_sample_log_file = os.path.join(
session_path, "test_scores_evaluation_per_sample.log")
full_evaluations_per_sample_log_file = os.path.join(
session_path, "full_scores_evaluation_per_sample.log")
loop_output_path_session_description = os.path.join(
session_path, "session_description.csv")
tar_file_main_output_path = os.path.join(
session_path, "{0}_{1}.tar.gz".format(historial_path, session_datetime))
roc_logs_file_path = os.path.join(session_path, "roc.logs")
list_paths_files_to_store = [loop_output_file_simple_majority_vote,
loop_output_file_complex_majority_vote,
loop_output_file_weighted_svm,
roc_logs_file_path,
loop_output_path_session_description,
loop_output_file_timing,
evaluations_per_sample_log_file]
roc_logs_file = open(roc_logs_file_path, "w")
# SESSION DESCRIPTOR ELABORATION
session_descriptor = {}
session_descriptor['meta settings'] = {
"n_folds": n_folds,
"bool_test": bool_test,
"regions_used": regions_used,
"loop_over_kernel": str(kernel_list),
"Support_Vector_Machine over regions threshold": SVM_over_regions_threshold,
"Simple_Majority_Vote over regions threshold": SMV_over_regions_threshold,
"Complex_Majority_Vote over regions threshold": CMV_over_regions_threshold
}
# Session Description Handling
session_descriptor['VAE'] = {}
session_descriptor['VAE']["net configuration"] = hyperparams
session_descriptor['VAE']["session configuration"] = cvae_session_conf
file_session_descriptor = open(loop_output_path_session_description, "w")
output_utils.print_recursive_dict(session_descriptor,
file=file_session_descriptor)
file_session_descriptor.close()
# LOADING DATA // Initialize
n_samples = 0
patient_labels = None
region_to_3dimg_dict_mri_gm = None
region_to_3dimg_dict_mri_wm = None
region_to_3dimg_dict_pet = None
if images_used == "PET":
region_to_3dimg_dict_pet, patient_labels, n_samples = \
load_pet_data_3d(list_regions)
elif images_used == "MRI":
region_to_3dimg_dict_mri_gm, region_to_3dimg_dict_mri_wm, \
patient_labels, n_samples = load_mri_data_3d(list_regions)
# RESULTS CONTAINER // Initialize
list_averages_svm_weighted = []
list_averages_simple_majority_vote = []
list_averages_decision_net = []
list_averages_complex_majority_vote = []
list_averages_timing = []
auc_header = "{0}; fold; evaluation; test|train; " \
"false_positive_rate; true_positive_rate;" \
"threshold ".format(swap_over)
roc_logs_file.write("{}\n".format(auc_header))
dic_container_evaluations = {
"SVM": {},
"SMV": {},
"CMV": {},
}
# Structure to store the kfold sample distribution in each swap value
k_fold_container = {}
for swap_variable_index in kernel_list:
print("Evaluating the system with a kernel size of {} ".format(
swap_variable_index))
# Doesnt mind the number of kernel elements
hyperparams["kernel_size"] = [swap_variable_index] * 3
# OUTPUT SETTINGS
# OUTPUT: List of dictionaries
complex_majority_vote_k_folds_results_train = []
complex_majority_vote_k_folds_results_test = []
simple_majority_vote_k_folds_results_train = []
simple_majority_vote_k_folds_results_test = []
decision_net_k_folds_results_train = []
decision_net_vote_k_folds_results_test = []
svm_weighted_regions_k_folds_results_train = []
svm_weighted_regions_k_folds_results_test = []
svm_weighted_regions_k_folds_coefs = []
# initializing evaluation container
dic_container_evaluations["SVM"][swap_variable_index] = {}
dic_container_evaluations["SMV"][swap_variable_index] = {}
dic_container_evaluations["CMV"][swap_variable_index] = {}
available_regions = None
# Different timing dict per class NOR/AD
if images_used == "MRI":
timing = {
"MRI_GM_neuralnet": [],
"MRI_WM_neuralnet": [],
}
elif images_used == "PET":
timing = {
"PET":[]
}
k_fold_dict = cv_utils.generate_k_folder_in_dict(n_samples, n_folds)
k_fold_container[swap_variable_index] = k_fold_dict
for k_fold_index in range(0, n_folds, 1):
print("Kfold {} Selected".format(k_fold_index))
vae_output = {}
# Structure the data Dic["test|"train"] -> Samples (Known the kfold)
if images_used == "MRI":
reg_to_group_to_images_dict_mri_gm = \
cv_utils.restructure_dictionary_based_on_cv(
dict_train_test_index=k_fold_dict[k_fold_index],
region_to_img_dict=region_to_3dimg_dict_mri_gm)
reg_to_group_to_images_dict_mri_wm = \
cv_utils.restructure_dictionary_based_on_cv(
dict_train_test_index=k_fold_dict[k_fold_index],
region_to_img_dict=region_to_3dimg_dict_mri_wm)
if images_used == "PET":
reg_to_group_to_images_dict_pet = \
cv_utils.restructure_dictionary_based_on_cv(
dict_train_test_index=k_fold_dict[k_fold_index],
region_to_img_dict=region_to_3dimg_dict_pet)
Y_train, Y_test = \
cv_utils.get_test_and_train_labels_from_kfold_dict_entry(
k_fold_entry=k_fold_dict[k_fold_index],
patient_labels=patient_labels)
if bool_test:
print("Number test samples {}".format(len(Y_test)))
print("Number train samples {}".format(len(Y_train)))
# MRI Auto-encoder Extract of features
if images_used == "MRI":
print("Training MRI regions over GM")
time_reference = time.time()
vae_output["gm"], regions_whose_net_not_converge_gm = \
cvae_over_regions.execute_without_any_logs(
region_train_cubes_dict=reg_to_group_to_images_dict_mri_gm["train"],
hyperparams=hyperparams,
session_conf=cvae_session_conf,
list_regions=list_regions,
path_to_root=None,
region_test_cubes_dict=reg_to_group_to_images_dict_mri_gm["test"],
explicit_iter_per_region=explicit_iter_per_region
)
timing["MRI_GM_neuralnet"].append(time.time() - time_reference)
print("Not converging regions GM {}".format(str(regions_whose_net_not_converge_gm)))
print("Training MRI regions over WM")
time_reference = time.time()
vae_output["wm"], regions_whose_net_not_converge_wm, \
= cvae_over_regions.execute_without_any_logs(
region_train_cubes_dict=reg_to_group_to_images_dict_mri_wm[
"train"],
hyperparams=hyperparams,
session_conf=cvae_session_conf,
list_regions=list_regions,
path_to_root=None,
region_test_cubes_dict=reg_to_group_to_images_dict_mri_wm[
"test"],
explicit_iter_per_region=explicit_iter_per_region
)
timing["MRI_WM_neuralnet"].append(time.time() - time_reference)
print("Not converging regions GM {}".format(
str(regions_whose_net_not_converge_wm)))
regions_whose_net_not_converge = \
regions_whose_net_not_converge_gm + \
[x for x in regions_whose_net_not_converge_wm
if x not in regions_whose_net_not_converge_gm]
print("Not converging total regions {}".format(
str(regions_whose_net_not_converge)))
available_regions = [region for region in list_regions
if region not in regions_whose_net_not_converge]
if len(available_regions) == 0:
print("No one region neural net converges successfully,"
"The parameters used should be changed. Exiting")
sys.exit(0)
# [patient x region]
train_score_matriz, test_score_matriz = svm_utils.svm_mri_over_vae_output(
vae_output, Y_train, Y_test, available_regions,
bool_test=bool_test)
if images_used == "PET":
print("Train PET over regions")
time_reference = time.time()
vae_output, regions_whose_net_not_converge = \
cvae_over_regions.execute_without_any_logs(
region_train_cubes_dict=reg_to_group_to_images_dict_pet[
"train"],
hyperparams=hyperparams,
session_conf=cvae_session_conf,
list_regions=list_regions,
path_to_root=None,
region_test_cubes_dict=reg_to_group_to_images_dict_pet[
"test"],
explicit_iter_per_region=explicit_iter_per_region
)
timing["PET"].append(time.time() - time_reference)
print("Not converging total regions {}".format(
str(regions_whose_net_not_converge)))
available_regions = [region for region in list_regions
if region not in regions_whose_net_not_converge]
if len(available_regions) == 0:
print("No one region neural net converges successfully,"
"The parameters used should be changed. Exiting")
sys.exit(0)
train_score_matriz, test_score_matriz = svm_utils.svm_pet_over_vae_output(
vae_output, Y_train, Y_test, available_regions,
bool_test=bool_test)
# End Auto-encoder Process. Extraction of Feature
data = helper.organize_data(
test_score_matriz, Y_test, train_score_matriz, Y_train)
if bool_test:
print("\nMatriz svm scores -> shapes, before complex majority vote")
print("train matriz [patients x region]: " + str(
train_score_matriz.shape))
print("test matriz scores [patient x region]: " + str(
test_score_matriz.shape))
print("RESULTS: Output kfolds nº {}".format(k_fold_index))
# COMPLEX MAJORITY VOTE
complex_output_dic_test, complex_output_dic_train, roc_dic, \
CMV_means_activation_dic = \
evaluation_utils.complex_majority_vote_evaluation(
data, bool_test=bool_test,
threshold_fixed=CMV_over_regions_threshold)
# Adding logs about means activation:
dic_container_evaluations["CMV"][swap_variable_index][k_fold_index] = \
CMV_means_activation_dic
# Adding roc results to log file
roc_test_string, roc_train_string = stringfy_auc_information(
swap_over=swap_variable_index,
k_fold_index=k_fold_index,
evaluation="Complex_Majority_Vote",
roc_dic=roc_dic)
roc_logs_file.write("{}\n".format(roc_train_string))
roc_logs_file.write("{}\n".format(roc_test_string))
# Adding results to kfolds output
complex_majority_vote_k_folds_results_train.append(
complex_output_dic_train)
complex_majority_vote_k_folds_results_test.append(
complex_output_dic_test)
if bool_test:
print("\nMatriz svm scores -> shapes, after complex majority vote")
print("train matriz [patients x regions]: " + str(
train_score_matriz.shape))
print("test matriz scores [patients x regions]: " + str(
test_score_matriz.shape))
print("Complex Majority Vote Test: " + str(complex_output_dic_test))
print("Complex Majority Vote Train: " + str(complex_output_dic_train))
# SIMPLE MAJORITY VOTE
simple_output_dic_train, simple_output_dic_test, roc_dic, \
SMV_means_activation_dic = \
evaluation_utils.simple_majority_vote(data,
bool_test=False, threshold_fixed=SMV_over_regions_threshold)
# Adding logs about means activation:
dic_container_evaluations["SMV"][swap_variable_index][k_fold_index] = \
SMV_means_activation_dic
roc_test_string, roc_train_string = stringfy_auc_information(
swap_over=swap_variable_index,
k_fold_index=k_fold_index,
evaluation="Simple_Majority_Vote",
roc_dic=roc_dic)
roc_logs_file.write("{}\n".format(roc_train_string))
roc_logs_file.write("{}\n".format(roc_test_string))
print("Simple Majority Vote Test: " + str(simple_output_dic_test))
print("Simple Majority Vote Train: " + str(simple_output_dic_train))
simple_majority_vote_k_folds_results_train.append(
simple_output_dic_train)
simple_majority_vote_k_folds_results_test.append(
simple_output_dic_test)
# SVM weighted REGIONS RESULTS
print("DECISION WEIGHTING SVM OUTPUTS")
weighted_output_dic_test, weighted_output_dic_train, \
aux_dic_regions_weight_coefs, roc_dic, \
evaluation_sample_scores = \
evaluation_utils.weighted_svm_decision_evaluation(
data, available_regions, bool_test=bool_test,
threshold_fixed=SVM_over_regions_threshold)
# Evaluations Loggins
dic_container_evaluations["SVM"][swap_variable_index][k_fold_index] = \
evaluation_sample_scores
# Roc loggings
roc_test_string, roc_train_string = stringfy_auc_information(
swap_over=swap_variable_index,
k_fold_index=k_fold_index,
evaluation="SVM_weighted",
roc_dic=roc_dic)
roc_logs_file.write("{}\n".format(roc_train_string))
roc_logs_file.write("{}\n".format(roc_test_string))
svm_weighted_regions_k_folds_results_train.append(
weighted_output_dic_train)
svm_weighted_regions_k_folds_results_test.append(
weighted_output_dic_test)
svm_weighted_regions_k_folds_coefs.append(aux_dic_regions_weight_coefs)
print("SVM classification Test: " + str(weighted_output_dic_test))
print("SVM classification Train: " + str(weighted_output_dic_train))
# KFOLD LOOP ENDED
# Extra field, swap over property
extra_field = {swap_over: str(swap_variable_index)}
# Timing scripts, mean over kfolds results
average_timing = timing_helper.get_averages_timing_dict_per_images_used(
timing_dict=timing,
images_used=images_used
)
average_timing.update(extra_field)
# GET AVERAGE RESULTS OVER METRICS
averages_simple_majority_vote = get_average_over_metrics(
simple_majority_vote_k_folds_results_test)
averages_simple_majority_vote.update(extra_field)
averages_complex_majority_vote = get_average_over_metrics(
complex_majority_vote_k_folds_results_test)
averages_complex_majority_vote.update(extra_field)
averages_svm_weighted = get_average_over_metrics(
svm_weighted_regions_k_folds_results_test)
averages_svm_weighted.update(extra_field)
list_averages_svm_weighted.append(averages_svm_weighted)
list_averages_simple_majority_vote.append(averages_simple_majority_vote)
list_averages_complex_majority_vote.append(averages_complex_majority_vote)
list_averages_timing.append(average_timing)
# Outputs files
# simple majority
output_utils.print_dictionary_with_header(
loop_output_file_simple_majority_vote,
list_averages_simple_majority_vote)
# complex majority
output_utils.print_dictionary_with_header(
loop_output_file_complex_majority_vote,
list_averages_complex_majority_vote)
output_utils.print_dictionary_with_header(
loop_output_file_weighted_svm,
list_averages_svm_weighted)
output_utils.print_dictionary_with_header(
loop_output_file_timing,
list_averages_timing)
roc_logs_file.close()
evaluation_container_to_log_file(
path_file_test_out=evaluations_per_sample_log_file ,
path_file_full_out = full_evaluations_per_sample_log_file,
evaluation_container=dic_container_evaluations,
k_fold_container = k_fold_container,
swap_variable_list=kernel_list,
n_samples=n_samples)
# Tarfile to group the results
tar = tarfile.open(tar_file_main_output_path, "w:gz")
for file in list_paths_files_to_store:
tar.add(file)
tar.close()
| gpl-2.0 | -8,908,820,016,381,429,000 | 36.994141 | 96 | 0.634915 | false |
SShrike/bluepill | tests/test_server.py | 1 | 2087 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Severen Redwood <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from flask import Flask
from flask_restful import Api
from bluepill import BluePillStateError
from bluepill.server import BackgroundServer, get_application, get_api
@pytest.fixture
def server():
"""Create an instance of BackgroundServer for testing."""
return BackgroundServer()
def test_get_application():
# TODO: Check that the routes are all correct.
# Check that it returns a Flask application instance.
assert(isinstance(get_application(), Flask))
def test_get_api():
# Check that it returns a Flask-RESTful API instance.
assert(isinstance(get_api(), Api))
def test_backgroundserver(server):
# TODO: Terminate server if it hangs and fail test.
# Test starting and immediately stopping.
server.start()
server.stop()
def test_backgroundserver_already_started(server):
# Test the already started error.
with pytest.raises(BluePillStateError) as e:
server.start()
server.start()
assert e.value.message == 'Server already running.'
# Test that the state is correct.
assert server.get_state() == BackgroundServer.STARTED
def test_backgroundserver_already_stopped(server):
# Test the already stopped error.
with pytest.raises(BluePillStateError) as e:
server.stop()
server.stop()
assert e.value.message == 'Server already stopped.'
# Test that the state is correct.
assert server.get_state() == BackgroundServer.STOPPED
| apache-2.0 | -2,015,381,984,920,242,200 | 32.126984 | 74 | 0.725922 | false |
w2naf/pythonPropWeb | pyprop/voacapgui.py | 1 | 68775 | #!/usr/bin/env python
#
# File: voacapgui
#
# Copyright (c) 2009 J.Watson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import with_statement
import sys
import os
import datetime
import subprocess
import time
import re
from dateutil.relativedelta import relativedelta
from ConfigParser import *
try:
import pygtk
pygtk.require("2.0")
import gobject
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
import gettext
import locale
GETTEXT_DOMAIN = 'voacapgui'
LOCALE_PATH = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), 'po')
langs = []
lc, enc = locale.getdefaultlocale()
if lc:
langs = [lc]
language = os.environ.get('LANGUAGE', None)
if language:
langs += language.split(':')
gettext.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gettext.textdomain(GETTEXT_DOMAIN)
lang = gettext.translation(GETTEXT_DOMAIN, LOCALE_PATH, languages=langs, fallback=True)
lang.install()
# glade file
# see http://bugzilla.gnome.org/show_bug.cgi?id=344926 for why the
# next two commands look repeated.
gtk.glade.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gtk.glade.textdomain(GETTEXT_DOMAIN)
gettext.bindtextdomain(GETTEXT_DOMAIN, LOCALE_PATH)
gettext.textdomain(GETTEXT_DOMAIN)
from voaTextFileViewDialog import *
from voaDatFile import *
from voaDefaults import *
from voaSiteChooser import *
from voaP2PPlot import *
from voaP2PPlotgui import *
from voaAreaPlotgui import *
from ssnFetch import *
from voaSSNThumb import *
from voaFile import *
from voaAreaChooser import *
from voaAntennaChooser import *
class VOACAP_GUI:
"""GUI to create VOAArea Input Files"""
# Determine where the itshfbc and prefs files are, based on OS
# The windows paths are guesses and need checking....
if os.name == 'nt':
itshfbc_path = 'C:\itshfbc'
prefs_dir = 'C:\itshfbc\database\\'
else:
itshfbc_path = os.path.expanduser("~")+os.sep+'itshfbc'
prefs_dir = os.path.expanduser("~")+os.sep+'.voacapgui'+os.sep
prefs_path = prefs_dir + 'voacapgui.prefs'
ssn_path = prefs_dir + 'sunspot.predict'
# Check if the prefs directory exists, create one if if it doesn't
# (This is probably not required as the installer will probbaly end up
# creating and populating this directory.
if not os.path.isdir(prefs_dir):
os.makedirs(prefs_dir)
#ant_list = []
firstCornerX = 0
firstCornerY = 0
area_rect = VOAAreaRect()
model_list = ('CCIR', 'URSI88')
path_list = (_('Short'), _('Long'))
# These need to be lists later on to support multiple antennas
tx_antenna_path = ''
rx_antenna_path = ''
main_window_size = (560, 410)
site_chooser_map_size = area_chooser_map_size = (384,192)
antenna_chooser_size = (500,400)
def __init__(self):
self.area_templates_file = None
#Set the GUI file
self.uifile = os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), "voacapgui.glade")
self.wTree = gtk.Builder()
self.wTree.add_from_file(self.uifile)
self.get_objects("main_window", "statusbar", "notebook",
"tx_site_button", "tx_site_entry", "tx_lat_spinbutton",
"tx_lon_spinbutton", "tx_antenna_button", "tx_antenna_entry",
"tx_bearing_button", "tx_bearing_spinbutton",
"tx_power_spinbutton", "rx_site_button", "rx_site_entry",
"rx_lat_spinbutton", "rx_lon_spinbutton", "rx_antenna_button",
"rx_antenna_entry", "rx_bearing_button",
"rx_bearing_spinbutton", "ssn_tv", "ssn_plot_box",
"ssn_file_data_label", "ssn_web_update_button",
"foe_spinbutton", "fof1_spinbutton", "fof2_spinbutton",
"foes_spinbutton", "model_combo", "path_combo",
"mm_noise_spinbutton", "min_toa_spinbutton",
"reliability_spinbutton", "snr_spinbutton", "mpath_spinbutton",
"delay_spinbutton", "area_tv", "delbt", "savebt",
"templatescb", "gridsizespinbutton", "addtemplbt", "areayearspinbutton",
"freqspinbutton", "monthspinbutton", "utcspinbutton", "addbt",
"rstbt", "areabt", "area_label", "arearunbt", "p2pmy_tv",
"p2pfreq_tv", "p2pmydelbt", "p2pmyrstbt", "p2pfreqdelbt",
"p2pfreqrstbt", "p2psavebt", "p2padd_mybt", "p2padd_freqbt",
"p2pfreqspinbutton", "p2pdayspinbutton", "p2pmonthspinbutton",
"p2pyearspinbutton", "p2pcircuitcb", "p2pgraphcb", "p2prunbt",
"p2pcalbt", "p2pusedayck", "p2pmacrocb", "p2pmacroaddbt",
)
self.p2pcalbt.set_label(_('_Cal'))
self.p2pcalbt.set_image(gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_BUTTON))
self.p2p_useday = False
self.p2pdayspinbutton.set_sensitive(self.p2p_useday)
self.p2puseday_handler_id = self.p2pusedayck.connect('toggled', self.p2p_useday_tog)
today = datetime.today()
self.p2pyearspinbutton.set_value(today.year)
self.p2pmonthspinbutton.set_value(today.month)
self.p2pdayspinbutton.set_value(today.day)
self.p2pfreqspinbutton.set_value(14.2)
self.main_window.resize(self.main_window_size[0], self.main_window_size[1])
_model = gtk.ListStore(gobject.TYPE_STRING)
for item in self.model_list:
_model.append([item])
self.populate_combo(self.model_combo, _model)
_model = gtk.ListStore(gobject.TYPE_STRING)
for item in self.path_list:
_model.append([item])
self.populate_combo(self.path_combo, _model)
self.max_vg_files_warn = False
self.max_frequencies_warn = False
if os.name == 'posix':
self.max_vg_files = 25 #This was originally set to 12 in earlier versions of voacapl.
else:
self.max_vg_files = 9 # DOS 8.3 filenames
self.gridsizespinbutton.set_value(125)
self.areayearspinbutton.set_value(today.year)
self.monthspinbutton.set_value(today.month)
self.freqspinbutton.set_value(14.1)
self.ssn_repo = SSNFetch(save_location = self.ssn_path, s_bar=self.statusbar)
_min, _max = self.ssn_repo.get_data_range()
self.p2pyearspinbutton.set_range(float(_min), float(_max))
self.areayearspinbutton.set_range(float(_min), float(_max))
#self.write_ssns(self.ssn_repo.get_ssn_list())
self.build_area_tv()
self.ssn_build_tv()
self.build_p2p_tvs()
self.build_circuitcb()
self.build_graphcb()
self.build_macrocb()
self.read_user_prefs()
if not self.area_templates_file:
self.build_new_template_file()
self.area_label.set_text(self.area_rect.get_formatted_string())
self.build_area_template_ts()
#Create event dictionay and connect it
event_dic = { "on_main_window_destroy" : self.quit_application,
"on_tx_site_button_clicked" : (self.choose_site, 'tx'),
"on_rx_site_button_clicked" : (self.choose_site, 'rx'),
"on_tx_antenna_button_clicked" : (self.choose_antenna, 'tx'),
"on_rx_antenna_button_clicked" : (self.choose_antenna, 'rx'),
"on_tx_bearing_button_clicked" : (self.calculate_antenna_bearing, 'tx'),
"on_rx_bearing_button_clicked" : (self.calculate_antenna_bearing, 'rx'),
"on_mi_circuit_activate" : self.verify_input_data,
"on_mi_graph_activate" : self.verify_input_data,
"on_mi_run_activate": self.run_prediction,
"on_mi_about_activate" : self.show_about_dialog,
"on_mi_quit_activate" : self.quit_application,
"on_main_window_destroy" : self.quit_application,
"on_ssn_web_update_button_clicked" : self.update_ssn_table,
# notebook area page widgets event dict
'on_notebook_switch_page' : self.nb_switch_page,
'on_addbt_clicked' : self.area_add_tv_row_from_user,
'on_addtemplbt_clicked' : self.area_add_template,
'on_templatescb_changed' : self.area_templatescb_change,
'on_delbt_clicked' : self.area_del_tv_row,
'on_savebt_clicked' : self.area_save_as_template,
'on_rstbt_clicked' : self.area_clean_tv,
'on_areabt_clicked' : self.show_area_chooser,
'on_arearunbt_clicked' : self.run_prediction,
# notebook p2p page widgets event dict
'on_p2padd_mybt_clicked' : self.p2pmy_add_tv_row_from_user,
'on_p2padd_freqbt_clicked' : self.p2pfreq_add_tv_row_from_user,
'on_p2pmydelbt_clicked' : self.p2p_del_my_tv_row,
'on_p2pfreqdelbt_clicked' : self.p2p_del_freq_tv_row,
'on_p2psavebt_clicked' : self.p2p_save_as_template,
'on_p2pmyrstbt_clicked' : self.p2p_clean_my_tv,
'on_p2pfreqrstbt_clicked' : self.p2p_clean_freq_tv,
'on_p2prunbt_clicked' : self.run_prediction,
'on_p2pcalbt_clicked' : self.p2p_calendar,
# 'on_p2pusedayck_toggled' : self.p2p_useday_tog,
'on_p2pmacroaddbt_clicked' : self.p2p_add_macro,
}
self.wTree.connect_signals(event_dic)
# area plot accelgrp
self.area_accelgrp = None
# test for ~/itshfbc tree
if not os.path.exists(self.itshfbc_path):
e = _("ITSHFBC directory not found")
if os.name == 'posix':
e_os = _("Please install voacap for Linux and run 'makeitshfbc'.\n")
e_os += _("A 'itshfbc' directory cannot be found at: %s.\n") % (self.itshfbc_path)
e_os += _("Please install voacap before running voacapgui.")
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, e )
dialog.format_secondary_text(e_os)
dialog.run()
dialog.destroy()
return -1
def populate_combo(self, cb, model):
cb.set_model(model)
cell = gtk.CellRendererText()
cb.pack_start(cell, True)
cb.add_attribute(cell, 'text', 0)
#cb.set_wrap_width(20)
cb.set_active(0)
def get_objects(self, *names):
for name in names:
widget = self.wTree.get_object(name)
if widget is None:
raise ValueError, "Widget '%s' not found" % name
setattr(self, name, widget)
def choose_antenna(self, widget, site):
#print self.antenna_chooser_size
dialog = VOAAntennaChooser(self.itshfbc_path, size=self.antenna_chooser_size, parent=self.main_window)
return_code, return_antenna, antenna_description, self.antenna_chooser_size = dialog.run()
#print self.antenna_chooser_size
if ((return_code == 0) and (return_antenna)): # response_id: 0=OK, 1=Cancel
if site == 'tx':
self.tx_antenna_entry.set_text(return_antenna + ' : ' + antenna_description)
self.tx_antenna_path = return_antenna
else:
self.rx_antenna_entry.set_text(return_antenna + ' : ' + antenna_description)
self.rx_antenna_path = return_antenna
def choose_site(self, widget, site):
if site == 'tx':
lat = self.tx_lat_spinbutton.get_value()
lon = self.tx_lon_spinbutton.get_value()
name = self.tx_site_entry.get_text()
elif site == 'rx':
lat = self.rx_lat_spinbutton.get_value()
lon = self.rx_lon_spinbutton.get_value()
name = self.rx_site_entry.get_text()
else:
lat = 0
lon = 0
name = ''
dialog = VOASiteChooser(HamLocation(lat, lon, name), \
self.site_chooser_map_size, \
itshfbc_path=self.itshfbc_path, \
parent=self.main_window)
return_code, location, self.site_chooser_map_size = dialog.run()
if (return_code == 0): # response_id: 0=OK, 1=Cancel
if site == 'tx':
self.tx_site_entry.set_text(location.get_name())
self.tx_lat_spinbutton.set_value(location.get_latitude())
self.tx_lon_spinbutton.set_value(location.get_longitude())
else:
self.rx_site_entry.set_text(location.get_name())
self.rx_lat_spinbutton.set_value(location.get_latitude())
self.rx_lon_spinbutton.set_value(location.get_longitude())
def calculate_antenna_bearing(self, widget, site):
try:
tx_loc = HamLocation(self.tx_lat_spinbutton.get_value(),
lon = self.tx_lon_spinbutton.get_value())
rx_loc = HamLocation(float(self.rx_lat_spinbutton.get_value()),
lon = self.rx_lon_spinbutton.get_value())
except Exception:
#todo add a note to the status bar explaining the reason
#for the failure to actually do anything
return
if site == 'tx':
bearing, distance = tx_loc.path_to(rx_loc)
self.tx_bearing_spinbutton.set_value(bearing)
else:
bearing, distance = rx_loc.path_to(tx_loc)
self.rx_bearing_spinbutton.set_value(bearing)
def read_user_prefs(self) :
config = ConfigParser(VOADefaultDictionary())
config.read(self.prefs_path)
#set some defaults here for the system variables
try:
self.foe_spinbutton.set_value(float(config.get('DEFAULT','foe')))
self.fof1_spinbutton.set_value(float(config.get('DEFAULT','fof1')))
self.fof2_spinbutton.set_value(float(config.get('DEFAULT','fof2')))
self.foes_spinbutton.set_value(float(config.get('DEFAULT','foes')))
self.model_combo.set_active(int(config.get('DEFAULT', 'model')))
self.path_combo.set_active(int(config.get('DEFAULT', 'path')))
self.mm_noise_spinbutton.set_value(float(config.get('DEFAULT','mm_noise')))
self.min_toa_spinbutton.set_value(float(config.get('DEFAULT','min_toa')))
self.reliability_spinbutton.set_value(float(config.get('DEFAULT','required_reliability')))
self.snr_spinbutton.set_value(float(config.get('DEFAULT','required_snr')))
self.mpath_spinbutton.set_value(float(config.get('DEFAULT','mpath')))
self.delay_spinbutton.set_value(float(config.get('DEFAULT','delay')))
self.tx_bearing_spinbutton.set_value(float(config.get('DEFAULT', 'tx_bearing')))
self.tx_power_spinbutton.set_value(float(config.get('DEFAULT', 'tx_power')))
self.rx_bearing_spinbutton.set_value(float(config.get('DEFAULT', 'rx_bearing')))
self.tx_site_entry.set_text(config.get('tx site','name'))
self.tx_lat_spinbutton.set_value(float(config.get('tx site','lat')))
self.tx_lon_spinbutton.set_value(float(config.get('tx site','lon')))
self.tx_antenna_entry.set_text(config.get('tx site', 'antenna' ))
self.tx_antenna_path, sep, suffix = (config.get('tx site', 'antenna' )).partition(' :')
self.tx_bearing_spinbutton.set_value(float(config.get('tx site', 'bearing')))
self.tx_power_spinbutton.set_value(float(config.get('tx site', 'power')))
self.rx_site_entry.set_text(config.get('rx site','name'))
self.rx_lat_spinbutton.set_value(float(config.get('rx site','lat')))
self.rx_lon_spinbutton.set_value(float(config.get('rx site','lon')))
self.rx_antenna_entry.set_text(config.get('rx site', 'antenna' ))
self.rx_antenna_path, sep, suffix = (config.get('rx site', 'antenna' )).partition(' :')
self.rx_bearing_spinbutton.set_value(float(config.get('rx site', 'bearing')))
self.site_chooser_map_size = (config.getint('site chooser','map_width'),
config.getint('site chooser','map_height'))
self.area_chooser_map_size = (config.getint('area chooser','map_width'),
config.getint('area chooser','map_height'))
self.antenna_chooser_size = (config.getint('antenna chooser','width'),
config.getint('antenna chooser','height'))
self.gridsizespinbutton.set_value(config.getint('area', 'gridsize'))
self.areayearspinbutton.set_value(config.getint('area','year'))
self.monthspinbutton.set_value(config.getint('area','month'))
self.utcspinbutton.set_value(config.getint('area','utc'))
self.freqspinbutton.set_value(config.getfloat('area', 'frequency'))
self.area_templates_file = config.get('area', 'templates_file')
self.area_rect=VOAAreaRect(config.getfloat('area','sw_lat'),
config.getfloat('area','sw_lon'),
config.getfloat('area','ne_lat'),
config.getfloat('area','ne_lon'))
self.area_label.set_text(self.area_rect.get_formatted_string())
except Exception, X:
print 'Error reading the user prefs: %s - %s' % (Exception, X)
def save_user_prefs(self):
config = ConfigParser()
# voaSiteChooser map size
config.add_section('site chooser')
config.set('site chooser', 'map_width', self.site_chooser_map_size[0])
config.set('site chooser', 'map_height', self.site_chooser_map_size[1])
# voaAreaChooser map size
config.add_section('area chooser')
config.set('area chooser', 'map_width', self.area_chooser_map_size[0])
config.set('area chooser', 'map_height', self.area_chooser_map_size[1])
# voaAreaChooser map size
if self.antenna_chooser_size:
config.add_section('antenna chooser')
config.set('antenna chooser', 'width', self.antenna_chooser_size[0])
config.set('antenna chooser', 'height', self.antenna_chooser_size[1])
# Tx Site Parameters
config.add_section('tx site')
config.set('tx site', 'name', self.tx_site_entry.get_text())
config.set('tx site', 'lat', self.tx_lat_spinbutton.get_value())
config.set('tx site', 'lon', self.tx_lon_spinbutton.get_value())
config.set('tx site', 'antenna', self.tx_antenna_entry.get_text())
config.set('tx site', 'bearing', self.tx_bearing_spinbutton.get_value())
config.set('tx site', 'power', self.tx_power_spinbutton.get_value())
# Rx Site Parameters
config.add_section('rx site')
config.set('rx site', 'name', self.rx_site_entry.get_text())
config.set('rx site', 'lat', self.rx_lat_spinbutton.get_value())
config.set('rx site', 'lon', self.rx_lon_spinbutton.get_value())
config.set('rx site', 'antenna', self.rx_antenna_entry.get_text())
config.set('rx site', 'bearing', self.rx_bearing_spinbutton.get_value())
# Ionospheric Parameters
config.set('DEFAULT', 'foe', self.foe_spinbutton.get_value())
config.set('DEFAULT', 'fof1', self.fof1_spinbutton.get_value())
config.set('DEFAULT', 'fof2', self.fof2_spinbutton.get_value())
config.set('DEFAULT', 'foes', self.foes_spinbutton.get_value())
config.set('DEFAULT', 'model', self.model_combo.get_active())
config.set('DEFAULT', 'path', self.path_combo.get_active())
# System parameters
config.set('DEFAULT','mm_noise', self.mm_noise_spinbutton.get_value())
config.set('DEFAULT','min_toa', self.min_toa_spinbutton.get_value())
config.set('DEFAULT','required_reliability', self.reliability_spinbutton.get_value())
config.set('DEFAULT','required_snr', self.snr_spinbutton.get_value())
config.set('DEFAULT','mpath', self.mpath_spinbutton.get_value())
config.set('DEFAULT','delay', self.delay_spinbutton.get_value())
# area parameters
config.add_section('area')
config.set('area','gridsize', self.gridsizespinbutton.get_value_as_int())
config.set('area','year', self.areayearspinbutton.get_value_as_int())
config.set('area','month', self.monthspinbutton.get_value_as_int())
config.set('area','utc', self.utcspinbutton.get_value_as_int())
config.set('area','frequency', self.freqspinbutton.get_value())
config.set('area','sw_lat', self.area_rect.sw_lat)
config.set('area','sw_lon', self.area_rect.sw_lon)
config.set('area','ne_lat', self.area_rect.ne_lat)
config.set('area','ne_lon', self.area_rect.ne_lon)
config.set('area','templates_file', self.area_templates_file if self.area_templates_file else '')
with open(self.prefs_path, 'w') as configfile:
config.write(configfile)
# This function is called everytime a run submenu is activated
# It enables/disables further submenus until input data is valid
# todo use the status bar to indicate the reason for any failure
def verify_input_data(self, widget):
valid = (self.is_ssn_valid() and self.is_tx_site_data_valid())
self.arearunbt.set_sensitive(self.savebt.props.sensitive & valid)
def is_ssn_valid(self):
_has_entry = False
# jwtodo check some ssn values exist
#
# This obviously needs fixing.
_has_entry = True
#
#
if _has_entry != True:
context_id = self.statusbar.get_context_id("nossns")
self.statusbar.push(context_id, _("No SSNs defined"))
return _has_entry
def is_tx_site_data_valid(self):
_is_valid = True
if self.tx_power_spinbutton.get_value == 0: _is_valid = False
if self.tx_antenna_path == '': _is_valid = False
return _is_valid
def is_rx_site_data_valid(self):
if self.rx_antenna_path == '': _is_valid = False
#gettext here
#This function is used to force an update
def update_ssn_table(self, widget):
self.ssn_repo.update_ssn_file() #Force an update
# self.update_ssn_data_label()
self.ssn_file_data_label.set_text(self.ssn_repo.get_file_data())
#self.write_ssns(self.ssn_repo.get_ssn_list())
def update_ssn_data_label(self):
_text = _("SSN Data Last Updated:\n")
_text += self.ssn_repo.get_file_mtime_str()
self.ssn_file_data_label.set_text(_text)
#jwtodo figure out what this function does...
#def write_ssns(self, ssns):
# print 'Normally around this time I like to write the SSNs...'
def build_p2p_tvs(self):
# grey out delete and save buttons, since there are no entries in the model
self.p2pmydelbt.set_sensitive(False)
self.p2pmyrstbt.set_sensitive(False)
self.p2pfreqdelbt.set_sensitive(False)
self.p2pfreqrstbt.set_sensitive(False)
#?
self.p2psavebt.set_sensitive(False)
self.p2prunbt.set_sensitive(False)
# model: day, month name, month_ordinal, year
col_t = [ gobject.TYPE_UINT,
gobject.TYPE_STRING, gobject.TYPE_UINT, gobject.TYPE_UINT]
model_my = gtk.ListStore(*col_t)
col_t = [gobject.TYPE_STRING]
model_freq = gtk.ListStore(*col_t)
self.p2pmy_tv.set_model(model_my)
self.p2pfreq_tv.set_model(model_freq)
self.p2pmy_tv.set_property("rules_hint", True)
self.p2pmy_tv.set_property("enable_search", False)
self.p2pmy_tv.set_headers_visible(True)
self.p2pmy_tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.p2pfreq_tv.set_property("rules_hint", True)
self.p2pfreq_tv.set_property("enable_search", False)
self.p2pfreq_tv.set_headers_visible(True)
self.p2pfreq_tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
# col idx
self.p2pmy_tv_idx_day = 0
self.p2pmy_tv_idx_month_n = 1
self.p2pmy_tv_idx_month_i = 2
self.p2pmy_tv_idx_year = 3
self.p2pfreq_tv_idx_freq = 0
def dow_celldatafunction(column, cell, model, iter, user_data=None):
t = ''
d = model.get_value(iter, self.p2pmy_tv_idx_day)
m = model.get_value(iter, self.p2pmy_tv_idx_month_i)
y = model.get_value(iter, self.p2pmy_tv_idx_year)
if d: t = datetime(y,m,d).strftime('%a %d')
cell.set_property('text', t)
title = _("Day")
cell = gtk.CellRendererText()
tvcol = gtk.TreeViewColumn(title, cell)
# tvcol.add_attribute(cell, 'text' , self.p2pmy_tv_idx_month_n)
# tvcol.set_sort_column_id(self.p2pmy_tv_idx_month_n)
tvcol.set_cell_data_func(cell, dow_celldatafunction)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
self.p2pmy_tv.append_column(tvcol)
title = _("Month")
cell = gtk.CellRendererText()
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.p2pmy_tv_idx_month_n)
tvcol.set_sort_column_id(self.p2pmy_tv_idx_month_n)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
self.p2pmy_tv.append_column(tvcol)
title = _("Year")
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.p2pmy_tv_idx_year)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
tvcol.set_sort_column_id(self.p2pmy_tv_idx_year)
self.p2pmy_tv.append_column(tvcol)
title = _("Frequency (MHz)")
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.p2pfreq_tv_idx_freq)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
tvcol.set_sort_column_id(self.p2pfreq_tv_idx_freq)
self.p2pfreq_tv.append_column(tvcol)
def build_circuitcb(self):
col_t = [gobject.TYPE_UINT, gobject.TYPE_STRING]
model = gtk.ListStore(*col_t)
[ model.append( [i, label]) for i, label in [
(0, _("Select method to run")),
(30, _("Method 30 (Smoothed LP/SP Model)")),
(25, _("Method 25 (All Modes SP Model)")),
(22, _("Method 22 (Forced SP Model)")),
(21, _("Method 21 (Forced LP Model)")),
(20, _("Method 20 (Complete System Performance)")),
(15, _("Method 15 (Tx. & Rx. Antenna Pattern)")),
(14, _("Method 14 (Rx. Antenna Pattern)")),
(13, _("Method 13 (Tx. Antenna Pattern)")),
(9, _("Method 9 (HPF-MUF-FOT Text Graph)"))
]]
self.p2pcircuitcb.set_model(model)
cell = gtk.CellRendererText()
self.p2pcircuitcb.pack_start(cell, True)
self.p2pcircuitcb.add_attribute(cell, 'text', 1)
self.p2pcircuitcb.set_active(0)
def build_graphcb(self):
col_t = [gobject.TYPE_UINT, gobject.TYPE_STRING]
model = gtk.ListStore(*col_t)
[ model.append([i, label]) for i, label in [
(0, _("Select method to run")),
(30, _("Method 30 (Smoothed LP/SP Model)")),
(22, _("Method 22 (Forced SP Model)")),
(21, _("Method 21 (Forced LP Model)")),
(20, _("Method 20 (Complete System Performance)")) ]]
self.p2pgraphcb.set_model(model)
cell = gtk.CellRendererText()
self.p2pgraphcb.pack_start(cell, True)
self.p2pgraphcb.add_attribute(cell, 'text', 1)
self.p2pgraphcb.set_active(0)
def build_macrocb(self):
col_t = [gobject.TYPE_STRING, gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT]
model = gtk.ListStore(*col_t)
[ model.append([l,f,a]) for l,f,a in [
(_("Select set to load"), None, None),
(_("Next 3 months"), self.p2p_macro_next_months, [3]),
(_("Next 6 months"), self.p2p_macro_next_months, [6]),
(_("Next 12 months"), self.p2p_macro_next_months, [12]),
(_("Next 24 months"), self.p2p_macro_next_months, [24]),
(_("Next 30 days"), self.p2p_macro_next_days, [30]),
(_("Annual (Quarters)"), self.p2p_macro_annual, [4]),
(_("Annual (bi-month)"), self.p2p_macro_annual, [6]),
]]
self.p2pmacrocb.set_model(model)
cell = gtk.CellRendererText()
self.p2pmacrocb.pack_start(cell, True)
self.p2pmacrocb.add_attribute(cell, 'text', 0)
self.p2pmacrocb.set_active(0)
def p2p_macro_next_months(self, vals):
day = 0
# if the tv has any entries, use the last one as our
# start in the sequence.
tv_model = self.p2pmy_tv.get_model()
tv_iter = tv_model.get_iter_first()
if tv_iter == None:
#empty model
#so let's add this month to the model
today = date.today()
self.p2pmy_add_tv_rows([(day, today.month, today.year)])
else:
# the table has entries. find the last entry and use that
# as our starting point for the 'next' months
last_iter = None
while tv_iter:
last_iter = tv_iter
tv_iter = tv_model.iter_next(tv_iter)
month = tv_model.get_value(last_iter, self.p2pmy_tv_idx_month_i)
year = tv_model.get_value(last_iter, self.p2pmy_tv_idx_year)
today = date(year, month, 1)
#get the last entry
#build the value for today
mr = relativedelta(months=+1)
if len(vals) == 1:
next = today + mr
for n in range(vals[0]):
self.p2pmy_add_tv_rows([(day, next.month, next.year)])
next = next + mr
elif len(vals) >1:
pass
def p2p_macro_next_days(self, vals):
if not self.p2p_useday:
self.p2pusedayck.set_active(True)
today = date.today()
dr = relativedelta(days=+1)
if len(vals) == 1:
next = today + dr
for n in range(vals[0]):
self.p2pmy_add_tv_rows([(next.day, next.month, next.year)])
next = next + dr
elif len(vals) >1:
pass
def p2p_macro_annual(self, vals):
day = 0
# start the count from Jan of the current year
year = self.p2pyearspinbutton.get_value_as_int()
today = date(year, 1, 1)
self.p2pmy_add_tv_rows([(day, today.month, today.year)])
mr = relativedelta(months=+(12/vals[0]))
if len(vals) == 1:
next = today + mr
for n in range(vals[0]-1):
self.p2pmy_add_tv_rows([(day, next.month, next.year)])
next = next + mr
elif len(vals) >1:
pass
def p2p_macro_next_days(self, vals):
if not self.p2p_useday:
self.p2pusedayck.set_active(True)
today = date.today()
dr = relativedelta(days=+1)
if len(vals) == 1:
next = today + dr
for n in range(vals[0]):
self.p2pmy_add_tv_rows([(next.day, next.month, next.year)])
next = next + dr
elif len(vals) >1:
pass
def p2p_add_macro(self, *args):
model = self.p2pmacrocb.get_model()
f, args = model.get(self.p2pmacrocb.get_active_iter(),1,2)
if not f: return
f(args)
def build_area_tv(self):
# grey out delete and save buttons, since there are no entries in the model
self.delbt.set_sensitive(False)
self.rstbt.set_sensitive(False)
self.savebt.set_sensitive(False)
self.arearunbt.set_sensitive(False)
# model: year, month name, month_ordinal, utc time hour, freq in Hz
col_t = [gobject.TYPE_UINT, gobject.TYPE_STRING, gobject.TYPE_UINT, gobject.TYPE_UINT, gobject.TYPE_STRING]
model = gtk.ListStore(*col_t)
self.area_tv.set_model(model)
self.area_tv.set_property("rules_hint", True)
self.area_tv.set_property("enable_search", False)
self.area_tv.set_headers_visible(True)
self.area_tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
# col idx
self.area_tv_idx_year = 0
self.area_tv_idx_month_n = 1
self.area_tv_idx_month_i = 2
self.area_tv_idx_utc = 3
self.area_tv_idx_freq = 4
title = _("Year")
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.area_tv_idx_year)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
tvcol.set_sort_column_id(self.area_tv_idx_year)
self.area_tv.append_column(tvcol)
title = _("Month")
cell = gtk.CellRendererText()
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.area_tv_idx_month_n)
tvcol.set_sort_column_id(self.area_tv_idx_month_n)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
self.area_tv.append_column(tvcol)
title = _("Time (UTC)")
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.area_tv_idx_utc)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
tvcol.set_sort_column_id(self.area_tv_idx_utc)
self.area_tv.append_column(tvcol)
title = _("Frequency (MHz)")
cell = gtk.CellRendererText()
cell.set_property('xalign', 1.0)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text' , self.area_tv_idx_freq)
tvcol.set_resizable(True)
tvcol.set_reorderable(True)
tvcol.set_sort_column_id(self.area_tv_idx_freq)
self.area_tv.append_column(tvcol)
def build_area_template_ts(self):
# loads templates from a file and populates the combobox
model = self.templatescb.get_model()
if not model:
col_t = [gobject.TYPE_STRING, gobject.TYPE_PYOBJECT] # name, (year,month,utc,freq)
model = gtk.ListStore(*col_t)
self.templatescb.set_model(model)
cell = gtk.CellRendererText()
self.templatescb.pack_start(cell, True)
self.templatescb.add_attribute(cell, 'text', 0)
model.clear()
# this hack for letting the templates subdir be parth of the path
# so the templates/*.py can import between themselves
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_dir, 'templates'))
##
# scan the dir for scripts
import glob
import imp
tmplt_fs = []
pattern = os.path.join(current_dir, 'templates', "*.py")
for f in glob.glob(pattern):
if os.path.isfile(f):
tmplt_fs.append(f)
for f in tmplt_fs:
name, ext = os.path.splitext(f)
mod = imp.load_source(name, f)
try:
t_o = mod.templates(self.main_window)
except Exception, X:
print _("Can't import template module %s: %s") % (f, X)
continue
# set module parameters
ps = t_o.get_params()
for p in ps:
try:
t_o.__dict__[p] = self.__dict__[p]
except Exception, X:
print _("Fail to set property %s in template %s: %s") % (p, f, X)
# make the module get ready for use later
ret = t_o.load()
if ret:
print _("Can't load() template module %s") % f
continue
for tname in t_o.get_names():
model.append([tname, t_o])
if not len(model):
# put an informative entry in the model
model.append([_('There are no templates available'), None])
else:
model.prepend([_('Select a template to load'), None])
self.templatescb.set_active(0)
self.addtemplbt.set_sensitive(False)
def area_templatescb_change(self, *args):
active = self.templatescb.get_active()
if not active:# 0 is the indicative default, not a real template
self.addtemplbt.set_sensitive(False)
else:
self.addtemplbt.set_sensitive(True)
def p2p_useday_tog(self, *args):
change_to = None
e = ee = ''
#we only need to display a warning if the coeffs change.
if self.p2pusedayck.get_active():
e = _("URSI88 coefficients")
ee = _("Specifying days forces the use of URSI88 coefficients. ")
if len(self.p2pmy_tv.get_model()):
ee += _("Values of 'day' in existing entries will be set to '1'.")
change_to = 1
else:
e = _("Not specifing days reverts the forced use of URSI88 coefficients. \
The current setting is %s.") % ('CCIR' if (self.model_combo.get_active()==0) else 'URSI88')
if len(self.p2pmy_tv.get_model()):
ee = _("All existing day values will be deleted.")
change_to = 0
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, e)
dialog.set_title(_('Warning'))
dialog.format_secondary_text(ee)
ret = dialog.run()
dialog.destroy()
if ret != -5:
self.p2pusedayck.handler_block(self.p2puseday_handler_id)
if self.p2pusedayck.get_active():
self.p2pusedayck.set_active(False)
else:
self.p2pusedayck.set_active(True)
self.p2pusedayck.handler_unblock(self.p2puseday_handler_id)
return
self.p2p_useday = self.p2pusedayck.get_active()
if self.p2p_useday:
self.model_combo.set_active(1)
self.model_combo.set_sensitive(False)
else:
self.model_combo.set_sensitive(True)
self.p2pdayspinbutton.set_sensitive(self.p2p_useday)
model = self.p2pmy_tv.get_model()
iter = model.get_iter_first()
while iter:
model.set_value(iter, self.p2pmy_tv_idx_day, change_to)
iter = model.iter_next(iter)
def p2p_calendar(self, *args):
def calendar_retval(cal, dialog):
dialog.response(gtk.RESPONSE_ACCEPT)
dialog = gtk.Dialog(_("Select date"), self.main_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT | gtk.WIN_POS_CENTER_ON_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
cal = gtk.Calendar()
cal.connect('day-selected-double-click', calendar_retval, dialog)
dialog.vbox.pack_start(cal)
dialog.show_all()
# set def date as the last date used, else let it default to today
try:
cal.select_month(self.p2pcal_last[1], self.p2pcal_last[0])
cal.select_day(self.p2pcal_last[2])
except:
pass
ret = dialog.run()
dialog.destroy()
if ret != -3: #ok
return
self.p2pcal_last = cal.get_date()
self.p2pmy_add_tv_rows([(self.p2pcal_last[2], self.p2pcal_last[1]+1, self.p2pcal_last[0])])
def p2pmy_add_tv_row_from_user(self, *args):
day = self.p2pdayspinbutton.get_value_as_int()
month_i = self.p2pmonthspinbutton.get_value_as_int()
year = self.p2pyearspinbutton.get_value_as_int()
self.p2pmy_add_tv_rows([(day, month_i, year)])
def p2pfreq_add_tv_row_from_user(self, *args):
freq = self.p2pfreqspinbutton.get_value()
self.p2pfreq_add_tv_rows([(freq)])
def p2pmy_add_tv_rows(self, rows):
# rows: a list of (day, month_i, year) tuples
tv_model = self.p2pmy_tv.get_model()
had_rows = len(tv_model)
for (day, month_i, year) in rows:
day = day if self.p2p_useday else 0
month_n = time.strftime('%B', time.strptime(str(month_i), '%m'))
row = []
row.insert(self.p2pmy_tv_idx_day, day)
row.insert(self.p2pmy_tv_idx_month_n, month_n)
row.insert(self.p2pmy_tv_idx_month_i, month_i)
row.insert(self.p2pmy_tv_idx_year, year)
iter = tv_model.append(row)
self.p2pmydelbt.set_sensitive(True)
self.p2pmyrstbt.set_sensitive(True)
self.p2prunbt.set_sensitive(True)
# if self.area_templates_file:
# self.p2psavebt.set_sensitive(True)
# self.verify_input_data(None)
# def focus first row if the tv was previously empty
if not had_rows:
self.p2pmy_tv.set_cursor(0)
def p2pfreq_add_tv_rows(self, rows):
# rows: a list of (freq) tuples
tv_model = self.p2pfreq_tv.get_model()
had_rows = len(tv_model)
for (freq) in rows:
row = []
row.insert(self.p2pfreq_tv_idx_freq, '%.3f' % freq)
iter = tv_model.append(row)
self.p2pfreqdelbt.set_sensitive(True)
self.p2pfreqrstbt.set_sensitive(True)
# if self.area_templates_file:
# self.p2psavebt.set_sensitive(True)
# self.verify_input_data(None)
# def focus first row if the tv was previously empty
if not had_rows:
self.p2pfreq_tv.set_cursor(0)
if len(tv_model) > 11 and not self.max_frequencies_warn:
e = _("VOACAP can only process 11 frequencies")
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, e)
dialog.format_secondary_text(_('Only the first 11 entries will \
be processed, all other entries will be ignored. Please delete some entries \
from the frequency table.'))
dialog.run()
dialog.destroy()
self.max_frequencies_warn = True
def area_add_tv_row_from_user(self, *args):
year = self.areayearspinbutton.get_value_as_int()
month_i = self.monthspinbutton.get_value_as_int()
utc = self.utcspinbutton.get_value_as_int()
freq = self.freqspinbutton.get_value()
self.area_add_tv_rows([(year, month_i, utc, freq)])
def area_add_tv_rows(self, rows):#month_i, utc, freq, model=self.area_tv.get_model()):
# rows: a list of (month_i, utc, freq) tuples
tv_model = self.area_tv.get_model()
had_rows = len(tv_model)
for (year, month_i, utc, freq) in rows:
month_n = time.strftime('%B', time.strptime(str(month_i), '%m'))
row = []
row.insert(self.area_tv_idx_year, year)
row.insert(self.area_tv_idx_month_n, month_n)
row.insert(self.area_tv_idx_month_i, month_i)
row.insert(self.area_tv_idx_utc, utc)
row.insert(self.area_tv_idx_freq, '%.3f' % freq)
iter = tv_model.append(row)
self.delbt.set_sensitive(True)
self.rstbt.set_sensitive(True)
if self.area_templates_file:
self.savebt.set_sensitive(True)
self.verify_input_data(None)
# def focus first row if the tv was previously empty
if not had_rows:
self.area_tv.set_cursor(0)
#let the user know we did not run all their data
if len(tv_model) > self.max_vg_files and not self.max_vg_files_warn:
e = _("VOACAP can only process %d area entries") % self.max_vg_files
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, e)
dialog.format_secondary_text(_('Only the first 12 entries will \
be processed, all other entries will be ignored. Please delete some entries.'))
dialog.run()
dialog.destroy()
self.max_vg_files_warn = True
def p2p_clean_my_tv(self, *args):
self.p2pmy_tv.get_model().clear()
self.p2pmydelbt.set_sensitive(False)
self.p2pmyrstbt.set_sensitive(False)
#?
self.p2psavebt.set_sensitive(False)
self.p2prunbt.set_sensitive(False)
def p2p_clean_freq_tv(self, *args):
self.p2pfreq_tv.get_model().clear()
self.p2pfreqdelbt.set_sensitive(False)
self.p2pfreqrstbt.set_sensitive(False)
#?
self.p2psavebt.set_sensitive(False)
# self.p2prunbt.set_sensitive(False)
def area_clean_tv(self, *args):
self.area_tv.get_model().clear()
self.delbt.set_sensitive(False)
self.rstbt.set_sensitive(False)
self.savebt.set_sensitive(False)
self.arearunbt.set_sensitive(False)
def p2p_del_my_tv_row(self, *args):
selection = self.p2pmy_tv.get_selection()
if not selection.count_selected_rows(): return
model, paths = selection.get_selected_rows()
self.p2pmy_tv.freeze_child_notify()
self.p2pmy_tv.set_model(None)
iters = []
for path in paths:
iters.append(model.get_iter(path))
for iter in iters:
model.remove(iter)
if not len(model):
self.p2pmydelbt.set_sensitive(False)
self.p2pmyrstbt.set_sensitive(False)
#?
self.savebt.set_sensitive(False)
self.p2prunbt.set_sensitive(False)
self.p2pmy_tv.set_model(model)
self.p2pmy_tv.thaw_child_notify()
# select next row if it's there, or the previous instead
last_path = paths[-1][0]+1
for i in range(len(model) +1):
last_path -= 1
try:
model.get_iter(last_path)
except:
pass
else:
self.p2pmy_tv.set_cursor((last_path,))
return
def p2p_del_freq_tv_row(self, *args):
selection = self.p2pfreq_tv.get_selection()
if not selection.count_selected_rows(): return
model, paths = selection.get_selected_rows()
self.p2pfreq_tv.freeze_child_notify()
self.p2pfreq_tv.set_model(None)
iters = []
for path in paths:
iters.append(model.get_iter(path))
for iter in iters:
model.remove(iter)
if not len(model):
self.p2pfreqdelbt.set_sensitive(False)
self.p2pfreqrstbt.set_sensitive(False)
#?
# self.savebt.set_sensitive(False)
# self.p2prunbt.set_sensitive(False)
self.p2pfreq_tv.set_model(model)
self.p2pfreq_tv.thaw_child_notify()
# select next row if it's there, or the previous instead
last_path = paths[-1][0]+1
for i in range(len(model) +1):
last_path -= 1
try:
model.get_iter(last_path)
except:
pass
else:
self.p2pfreq_tv.set_cursor((last_path,))
return
def area_del_tv_row(self, *args):
selection = self.area_tv.get_selection()
if not selection.count_selected_rows(): return
model, paths = selection.get_selected_rows()
self.area_tv.freeze_child_notify()
self.area_tv.set_model(None)
iters = []
for path in paths:
iters.append(model.get_iter(path))
for iter in iters:
model.remove(iter)
if not len(model):
self.delbt.set_sensitive(False)
self.rstbt.set_sensitive(False)
self.savebt.set_sensitive(False)
self.arearunbt.set_sensitive(False)
self.area_tv.set_model(model)
self.area_tv.thaw_child_notify()
# select next row if it's there, or the previous instead
last_path = paths[-1][0]+1
for i in range(len(model) +1):
last_path -= 1
try:
model.get_iter(last_path)
except:
pass
else:
self.area_tv.set_cursor((last_path,))
return
def p2p_save_as_template(self, *args):
pass
def area_save_as_template(self, *args):
''' saves area_tv model content as a template '''
global ok_bt
global nentry
def text_change(self, *args):
global ok_bt
global nentry
if len(nentry.get_text()):
ok_bt.set_sensitive(True)
else:
ok_bt.set_sensitive(False)
dialog = gtk.Dialog(_("Creating new area template"),
self.main_window,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT))
hb = gtk.HBox(2)
label = gtk.Label(_("Template name"))
hb.pack_start(label)
nentry = gtk.Entry(max=50)
nentry.connect("changed", text_change)
hb.pack_start(nentry)
hb.show_all()
dialog.vbox.pack_start(hb)
ok_bt = gtk.Button(None, gtk.STOCK_OK)
ok_bt.set_sensitive(False)
ok_bt.show()
dialog.add_action_widget(ok_bt, gtk.RESPONSE_ACCEPT)
response = dialog.run()
if response == -3: # accept
# save it
fd = open(os.path.expandvars(self.area_templates_file), 'a')
fd.write(_('\n#template created by voacap GUI'))
title = nentry.get_text()
fd.write('\n[%s]' % title)
fd.write(_('\n#month utchour freq'))
model = self.area_tv.get_model()
iter = model.get_iter_first()
while iter:
m,u,f = model.get(iter,1,2,3)
fd.write('\n%02d %02d %.3f' % (m,u,float(f)))
iter = model.iter_next(iter)
fd.write(_('\n#End of %s') % title)
fd.close()
# reload templates_file to repopulate templatescb, then
# select this recently saved as the active one
self.build_area_template_ts()
model = self.templatescb.get_model()
iter = model.get_iter_first()
while iter:
if model.get_value(iter, 0) == title:
self.templatescb.set_active_iter(iter)
break
iter = model.iter_next(iter)
dialog.destroy()
def area_add_template(self, *args):
active = self.templatescb.get_active()
if not active:# 0 is the indicative default, not a real template
return
model = self.templatescb.get_model()
t_n = model.get_value(model.get_iter(active), 0)
t_o = model.get_value(model.get_iter(active), 1)
model = self.area_tv.get_model()
if t_o.set_ini(model):
print "Can't initialize module %s" % t_n
return
if t_o.run(): return
try:
templ_tups = t_o.ret_templates[t_n]
except: pass
if templ_tups:
self.area_add_tv_rows(templ_tups)
#####################SSN Tab functions follow
def ssn_build_tv(self):
self.ssn_tv.set_model(self.ssn_repo)
self.ssn_file_data_label.set_text(self.ssn_repo.get_file_data())
self.ssn_tv.set_property("rules_hint", True)
self.ssn_tv.set_property("enable_search", False)
self.ssn_tv.set_headers_visible(True)
# col idx
self.ssn_tv_idx_year = 0
title = _("Year")
cell = gtk.CellRendererText()
font = pango.FontDescription('bold')
cell.set_property('font-desc', font)
tvcol = gtk.TreeViewColumn(title, cell)
tvcol.add_attribute(cell, 'text', self.ssn_tv_idx_year)
tvcol.set_sort_column_id(self.area_tv_idx_month_n)
tvcol.set_resizable(True)
tvcol.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
tvcol.set_expand(True)
self.ssn_tv.append_column(tvcol)
for i in range (1,13):
cell = gtk.CellRendererText()
cell.set_property('xalign', 0.5)
tvcol = gtk.TreeViewColumn(calendar.month_abbr[i], cell)
tvcol.set_alignment(0.5)
tvcol.add_attribute(cell, 'text', i)
tvcol.add_attribute(cell, 'font', i+13)
tvcol.set_resizable(True)
tvcol.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
tvcol.set_expand(True)
self.ssn_tv.append_column(tvcol)
ssn_thumb = VOASSNThumb(self.ssn_repo)
_th = ssn_thumb.get_thumb()
_th.show()
self.ssn_plot_box.pack_start(_th, True, True)
# scroll to the current year
iter = self.ssn_repo.get_iter_first()
while iter:
if self.ssn_repo.get_value(iter, self.ssn_tv_idx_year) == str(datetime.today().year):
path = self.ssn_repo.get_path(iter)
self.ssn_tv.set_cursor(path)
self.ssn_tv.scroll_to_cell(path, None)
break
iter = self.ssn_repo.iter_next(iter)
def nb_switch_page(self, *args):
# area is the last page in the nb
if self.notebook.get_n_pages() == args[2] +1:
if not self.area_accelgrp:
self.area_accelgrp = gtk.AccelGroup()
self.area_accelgrp.connect_group(0xffff, 0, 0, self.area_del_tv_row)
self.main_window.add_accel_group(self.area_accelgrp)
else:
if self.area_accelgrp:
self.main_window.remove_accel_group(self.area_accelgrp)
self.area_accelgrp = None
def show_area_chooser(self, widget):
dialog = VOAAreaChooser(self.area_rect, self.area_chooser_map_size, parent=self.main_window)
return_code, return_rect, return_size = dialog.run()
if (return_code == 0): # 0=ok, 1=cancel
self.area_rect = return_rect
self.area_chooser_map_size = return_size
self.area_label.set_text(self.area_rect.get_formatted_string())
def run_prediction(self, button):
voacapl_args = ''
if button == self.arearunbt:
voacapl_args = self.itshfbc_path
###################################################################
vf = VOAFile(os.path.join(os.path.expanduser("~"),'itshfbc','areadata','pyArea.voa'))
vf.set_gridsize(self.gridsizespinbutton.get_value())
vf.set_location(vf.TX_SITE,
self.tx_site_entry.get_text(),
self.tx_lon_spinbutton.get_value(),
self.tx_lat_spinbutton.get_value())
vf.P_CENTRE = vf.TX_SITE
vf.set_xnoise(abs(self.mm_noise_spinbutton.get_value()))
vf.set_amind(self.min_toa_spinbutton.get_value())
vf.set_xlufp(self.reliability_spinbutton.get_value())
vf.set_rsn(self.snr_spinbutton.get_value())
vf.set_pmp(self.mpath_spinbutton.get_value())
vf.set_dmpx(self.delay_spinbutton.get_value())
vf.set_psc1(self.foe_spinbutton.get_value())
vf.set_psc2(self.fof1_spinbutton.get_value())
vf.set_psc3(self.fof2_spinbutton.get_value())
vf.set_psc4(self.foes_spinbutton.get_value())
vf.set_area(self.area_rect)
# Antennas, gain, tx power, bearing
#def set_rx_antenna(self, data_file, gain=0.0, bearing=0.0):
#rel_dir, file, description = self.ant_list[self.rx_ant_combobox.get_active()]
vf.set_rx_antenna(self.rx_antenna_path.ljust(21), 0.0,
self.rx_bearing_spinbutton.get_value())
#def set_tx_antenna(self, data_file, design_freq=0.0, bearing=0.0, power=0.125):
#rel_dir, file, description = self.ant_list[self.tx_ant_combobox.get_active()]
vf.set_tx_antenna(self.tx_antenna_path.ljust(21), 0.0,
self.tx_bearing_spinbutton.get_value(),
self.tx_power_spinbutton.get_value()/1000.0)
vf.clear_plot_data()
# treeview params
model = self.area_tv.get_model()
iter = model.get_iter_first()
# we're limited to 12 entries here
i = 0
while iter and i < self.max_vg_files:
year = int(model.get_value(iter, self.area_tv_idx_year))
month_i = float(model.get_value(iter, self.area_tv_idx_month_i))
utc = model.get_value(iter, self.area_tv_idx_utc)
freq = model.get_value(iter, self.area_tv_idx_freq)
# ssn entries are named as months (jan_ssn_entry) so to be sure
# we're getting the correct one, we need to map them
ssn = self.ssn_repo.get_ssn(month_i, year)
vf.add_plot((freq, utc, month_i, ssn))
iter = model.iter_next(iter)
i = i+1
vf.write_file()
#let the user know we did not run all their data
if iter:
e = _("VOACAP can only process %d area entries") % self.max_vg_files
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_WARNING, gtk.BUTTONS_CLOSE, e)
dialog.format_secondary_text(_('Only the first 12 entries will be processed,\
all other entries will be ignored.'))
dialog.run()
dialog.destroy()
print "executing vocapl..."
# os.system('voacapl ~/itshfbc area calc pyArea.voa')
# print os.path.join(os.path.expanduser("~"), 'itshfbc')
ret = os.spawnlp(os.P_WAIT, 'voacapl', 'voacapl', os.path.join(os.path.expanduser("~"), 'itshfbc'), "area", "calc", "pyArea.voa")
if ret:
e = "voacapl returned %s. Can't continue." % ret
dialog = gtk.MessageDialog(self.main_window, gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, e )
dialog.run()
dialog.destroy()
return -1
print "done voacapl"
s = os.path.join(os.path.expanduser("~"), 'itshfbc','areadata','pyArea.voa')
graph = VOAAreaPlotGUI(s, parent=self.main_window, exit_on_close=False)
graph.quit_application()
#P2P Predictions follow
if button == self.p2prunbt:
runs = []
iter = self.p2pcircuitcb.get_active_iter()
c_method = self.p2pcircuitcb.get_model().get_value(iter, 0)
if c_method:
runs.append('c')
iter = self.p2pgraphcb.get_active_iter()
g_method = self.p2pgraphcb.get_model().get_value(iter, 0)
if g_method:
runs.append('g')
_coeff = 'CCIR' if (self.model_combo.get_active()==0) else 'URSI88'
_path = VOADatFile.SHORT_PATH if (self.path_combo.get_active()==0) else VOADatFile.LONG_PATH
for rt in runs:
input_filename = 'voacapg.dat' if rt == 'g' else 'voacapx.dat'
output_filename = 'voacapg.out' if rt == 'g' else 'voacapx.out'
data_file_format = VOADatFile.GRAPHICAL_FORMAT if rt == 'g' else VOADatFile.CIRCUIT_FORMAT
df = VOADatFile(self.itshfbc_path + os.sep + 'run' + os.sep + input_filename)
voacapl_args = self.itshfbc_path + ' ' + input_filename + ' ' + output_filename
df.set_title([_('File generated by voacap-gui (www.qsl.net/hz1jw)'), _('File created: ')+datetime.now().strftime('%X %a %d %b %y')])
df.set_linemax(55)
method = g_method if rt == 'g' else c_method
df.set_method(method)
df.set_coeffs(_coeff)
df.set_sites(HamLocation(self.tx_lat_spinbutton.get_value(),
self.tx_lon_spinbutton.get_value(),
self.tx_site_entry.get_text()),
HamLocation(self.rx_lat_spinbutton.get_value(),
self.rx_lon_spinbutton.get_value(),
self.rx_site_entry.get_text()), _path)
df.set_system(self.tx_power_spinbutton.get_value()/1000.0,\
abs(self.mm_noise_spinbutton.get_value()),\
self.min_toa_spinbutton.get_value(),\
self.reliability_spinbutton.get_value(),\
self.snr_spinbutton.get_value(),\
self.mpath_spinbutton.get_value(),\
self.delay_spinbutton.get_value())
if rt == 'c':
# The frequencies are only applicable when performing text based predictions.
# voacap can accep up to 11 entries in the list.
# entries may be specified up to 3 decimal places.
# longer lists, additional prescision will be truncated
# by the set_frequency_list method.
# (The example freqs below are PSK31 calling freqs...)
# df.set_frequency_list((3.580, 7.035, 10.140, 14.070, 18.1, 21.08, 28.12))
freqs = []
model = self.p2pfreq_tv.get_model()
iter = model.get_iter_first()
while iter:
try:
freqs.append(float(model.get_value(iter, self.p2pfreq_tv_idx_freq)))
except:
pass
iter = model.iter_next(iter)
df.set_frequency_list(tuple(freqs))
df.set_antenna(VOADatFile.TX_ANTENNA, self.tx_antenna_path.ljust(21),
self.tx_bearing_spinbutton.get_value(),
self.tx_power_spinbutton.get_value()/1000.0)
df.set_antenna(VOADatFile.RX_ANTENNA, self.rx_antenna_path.ljust(21),
self.rx_bearing_spinbutton.get_value())
df.set_fprob(self.foe_spinbutton.get_value(),
self.fof1_spinbutton.get_value(), self.fof2_spinbutton.get_value(),
self.foes_spinbutton.get_value())
# ssn_list is a list of tuples (day, month, year, ssn)
ssn_list = []
model = self.p2pmy_tv.get_model()
iter = model.get_iter_first()
day = 0
while iter:
if rt == 'c':
day = model.get_value(iter, self.p2pmy_tv_idx_day)
if day:
df.set_coeffs('URSI88')
month = model.get_value(iter, self.p2pmy_tv_idx_month_i)
year = model.get_value(iter, self.p2pmy_tv_idx_year)
ssn = self.ssn_repo.get_ssn(month, year)
if not ssn:
e = _("Can't find SSN number for <%(m)s>-<%(y)s>. Can't continue without all SSNs.") % {'m':month, 'y':year}
dialog = gtk.MessageDialog(self.main_window,
gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, e )
dialog.run()
dialog.destroy()
return -1
ssn_list.append((day, month, year, ssn))
iter = model.iter_next(iter)
df.set_ssn(ssn_list)
df.write_file(data_file_format)
try:
retcode = subprocess.call("voacapl -s " + voacapl_args, shell=True)
if rt == 'c':
result_dialog = VOATextFileViewDialog(self.itshfbc_path+os.sep+'run'+os.sep+output_filename)
return_code = result_dialog.run()
if rt == 'g':
graph = VOAP2PPlotGUI(self.itshfbc_path+os.sep+'run'+os.sep+output_filename,
parent=self.main_window, exit_on_close=False)
graph.quit_application()
except OSError, e:
print "Voacapl execution failed:", e
break
def show_about_dialog(self, widget):
about = gtk.AboutDialog()
about.set_program_name("voacap-gui")
about.set_version("0.10")
about.set_authors(("J.Watson (HZ1JW/M0DNS)", "Fernando M. Maresca (LU2DFM)"))
about.set_comments(_("A voacap GUI"))
about.set_website("http://www.qsl.net/hz1jw")
about.set_logo(gtk.gdk.pixbuf_new_from_file(os.path.join(os.path.realpath(os.path.dirname(sys.argv[0])), "voacap.png")))
about.run()
about.destroy()
def build_new_template_file(self):
fn = os.path.join(self.prefs_dir,'area_templ.ex')
s = _('''# rough format for area plot templates:
# lines starting with # are ignored
# each line consist in three values separated by spaces
# each template is preceded by a name enclosed in square brackets:
# [template name]
# tags
# month utchour freq
# 11 22 14.250
# month: number month, 1=January
# utchour: UTC time HOUR, 00 to 23
# freq: frequecy in MHz
# example: all months at midnight on 14.100 MHz
[All months midnight 14.100 Mhz]
#year month utchour freq
2010 01 00 14.10
2010 02 00 14.10
2010 03 00 14.10
2010 04 00 14.10
2010 05 00 14.10
2010 06 00 14.10
2010 07 00 14.10
2010 08 00 14.10
2010 09 00 14.10
2010 10 00 14.10
2010 11 00 14.10
2010 12 00 14.10
[All months at 1600z 7.500 MHz]
#month utchour freq
2010 01 16 7.5
2010 02 16 7.5
2010 03 16 7.5
2010 04 16 7.5
2010 05 16 7.5
2010 06 16 7.5
2010 07 16 7.5
2010 08 16 7.5
2010 09 16 7.5
2010 10 16 7.5
2010 11 16 7.5
2010 12 16 7.5
\n
''')
with open(fn, 'w') as templates_def_fd:
templates_def_fd.write(s)
self.area_templates_file = fn
def quit_application(self, widget):
self.save_user_prefs()
gtk.main_quit
sys.exit(0)
if __name__ == "__main__":
app = VOACAP_GUI()
try:
gtk.main()
except KeyboardInterrupt:
sys.exit(1)
| gpl-2.0 | 8,430,356,252,253,323,000 | 42.22753 | 151 | 0.564609 | false |
nehresma/gocardless-python | test/test_params.py | 1 | 8989 | import datetime
import mock
import unittest
import six
from six.moves import urllib
import gocardless
from gocardless import utils, urlbuilder
class ExpiringLimitTestCase(object):
"""superclass factoring out tests for expiring limit param objects"""
def test_interval_length_is_positive(self):
pars = self.create_params(10, "1321230", 1, "day")
with self.assertRaises(ValueError):
pars = self.create_params(10, "1123210", -1, "day")
def test_interval_unit_is_valid(self):
for interval_unit in ["day", "week", "month"]:
pars = self.create_params(10, 10, "11235432", interval_unit)
with self.assertRaises(ValueError):
pars = self.create_params(10, 10, "1432233123", "invalid")
def _future_date_tester(self, argname):
invalid_date = datetime.datetime.now() - datetime.timedelta(100)
valid_date = datetime.datetime.now() + datetime.timedelta(2000)
par1 = self.create_params(10, 10, "23423421", "day", **{argname:valid_date})
with self.assertRaises(ValueError):
par1 = self.create_params(10, 10, "2342341", "day",
**{argname:invalid_date})
def test_expires_at_in_future(self):
self._future_date_tester("expires_at")
def test_interval_count_positive(self):
with self.assertRaises(ValueError):
self.create_params(10, 10, "merchid", "day", interval_count=-1)
class PreAuthParamsTestCase(ExpiringLimitTestCase, unittest.TestCase):
def default_args_construct(self, extra_options):
"""
For testing optional arguments, builds the param object with valid
required arguments and adds optionl arguments as keywords from
`extra_options`
:param extra_options: Extra optional keyword arguments to pass to
the constructor.
"""
return urlbuilder.\
PreAuthorizationParams(12, "3456", 6, "month", **extra_options)
def create_params(self, *args, **kwargs):
return urlbuilder.PreAuthorizationParams(*args, **kwargs)
def test_max_amount_is_positive(self):
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, -1, "1232532", 4, "month")
def test_interval_length_is_a_positive_integer(self):
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, 12, "!2343", -3, "month")
def test_interval_unit_is_one_of_accepted(self):
for unit_type in ["month", "day", "week"]:
pa = urlbuilder.PreAuthorizationParams(12, "1234", 3, unit_type)
self.assertRaises(ValueError, \
urlbuilder.PreAuthorizationParams, 21,"1234", 4, "soem other unit")
def test_expires_at_is_later_than_now(self):
earlier = datetime.datetime.now() - datetime.timedelta(1)
self.assertRaises(ValueError, self.default_args_construct, \
{"expires_at":earlier})
def test_interval_count_is_postive_integer(self):
self.assertRaises(ValueError, self.default_args_construct, \
{"interval_count":-1})
class PreAuthParamsToDictTestCase(unittest.TestCase):
def setUp(self):
self.all_params = {
"max_amount":12,
"interval_unit":"day",
"interval_length":10,
"merchant_id":"1234435",
"name":"aname",
"description":"adesc",
"interval_count":123,
"currency":"GBP",
"expires_at":datetime.datetime.strptime("2020-01-01", "%Y-%m-%d"),
"calendar_intervals":True
}
self.required_keys = [
"max_amount", "interval_unit", "interval_length", "merchant_id"]
def create_from_params_dict(self, in_params):
params = in_params.copy()
pa = urlbuilder.PreAuthorizationParams(params.pop("max_amount"), \
params.pop("merchant_id"), \
params.pop("interval_length"), \
params.pop("interval_unit"),\
**params)
return pa
def assert_inverse(self, keys):
params = dict([[k,v] for k,v in six.iteritems(self.all_params) \
if k in keys])
pa = self.create_from_params_dict(params)
self.assertEqual(params, pa.to_dict())
def test_to_dict_all_params(self):
self.assert_inverse(list(self.all_params.keys()))
def test_to_dict_only_required(self):
self.assert_inverse(self.required_keys)
class BillParamsTestCase(unittest.TestCase):
def create_params(self, *args, **kwargs):
return urlbuilder.BillParams(*args, **kwargs)
def test_amount_is_positive(self):
params = self.create_params(10, "merchid")
with self.assertRaises(ValueError):
par2 = self.create_params(-1, "merchid")
def test_to_dict_required(self):
pars = self.create_params(10, "merchid")
res = pars.to_dict()
expected = {"amount":10, "merchant_id":"merchid"}
self.assertEqual(res, expected)
def test_to_dict_optional(self):
pars = self.create_params(10, "merchid", name="aname", description="adesc")
res = pars.to_dict()
expected = {"amount":10,
"name":"aname",
"description":"adesc",
"merchant_id":"merchid"
}
self.assertEqual(res, expected)
def test_resource_name_is_bills(self):
pars = urlbuilder.BillParams(10, "merchid")
self.assertEqual(pars.resource_name, "bills")
class SubscriptionParamsTestCase(ExpiringLimitTestCase, unittest.TestCase):
def create_params(self, *args, **kwargs):
return urlbuilder.SubscriptionParams(*args, **kwargs)
def test_setup_fee(self):
pars = self.create_params(10, "merchid", 10, "day", setup_fee=20)
expected = {
"merchant_id": "merchid",
"amount": 10,
"interval_length": 10,
"interval_unit" : "day",
"setup_fee": 20
}
self.assertEqual(expected, pars.to_dict())
def test_start_at_in_future(self):
valid_date = datetime.datetime.now() + datetime.timedelta(200)
invalid_date = datetime.datetime.now() - datetime.timedelta(100)
par1 = self.create_params(10,"merchid", 10, "day", start_at=valid_date)
with self.assertRaises(ValueError):
par2 = self.create_params(10, "merchid", 10, "day",
start_at=invalid_date)
def test_expires_at_after_start_at(self):
date1 = datetime.datetime.now() + datetime.timedelta(100)
date2 = datetime.datetime.now() + datetime.timedelta(200)
par1 = self.create_params(10, "merchid", 10, "day",
expires_at=date2, start_at=date1)
with self.assertRaises(ValueError):
par2 = self.create_params(10, "merchid", 10, "day",
expires_at=date1, start_at=date2)
def test_to_dict_only_required(self):
expected = {
"merchant_id":"merchid",
"amount":10,
"interval_length":10,
"interval_unit":"day"}
pars = self.create_params(10, "merchid", 10, "day")
self.assertEqual(expected, pars.to_dict())
def test_to_dict_all(self):
start_at = datetime.datetime.now() + datetime.timedelta(1000)
expires_at =datetime.datetime.now() + datetime.timedelta(2000)
expected = {
"merchant_id":"merchid",
"amount":10,
"interval_length":10,
"interval_unit":"day",
"interval_count":5,
"start_at":start_at.isoformat()[:-7] + "Z",
"expires_at":expires_at.isoformat()[:-7] + "Z",
"name":"aname",
"description":"adesc",
}
par = self.create_params(10, "merchid", 10, "day", start_at=start_at,
expires_at=expires_at, interval_count=5, name="aname",
description="adesc")
self.assertEqual(expected, par.to_dict())
class PrepopDataTestCase(unittest.TestCase):
def setUp(self):
self.mock_prepop = {"first_name": "Tom",
"last_name": "Blomfield",
"email": "[email protected]"
}
def assert_prepop(self, params):
self.assertEqual(params.to_dict()["user"], self.mock_prepop)
def test_bill_params(self):
params = urlbuilder.BillParams(10, "amerchid", user=self.mock_prepop)
self.assert_prepop(params)
def test_sub_params(self):
params = urlbuilder.SubscriptionParams(10, "merchid", 3, "day", user=self.mock_prepop)
self.assert_prepop(params)
def test_pre_auth_params(self):
params = urlbuilder.PreAuthorizationParams(10, "amerchid", 5, "day", user=self.mock_prepop)
self.assert_prepop(params)
| mit | -7,502,890,767,632,523,000 | 36.92827 | 99 | 0.590833 | false |
genyang/classifip | classifip/representations/intervalsProbability.py | 1 | 8808 | import numpy as np
from credalset import CredalSet
class IntervalsProbability(CredalSet):
"""Class of probability intervals: probabilistic bounds on singletons
:param lproba: a 2xn array containing upper (1st row) and lower bounds
:type lproba: :class:`~numpy.array`
:param nbDecision: number of elements of the space
:type nbDecision: integer
>>> from numpy import array
>>> ip=array([[0.5, 0.5, 0.5], [0.1, 0.1, 0.1]])
>>> from classifip.representations.intervalsProbability import IntervalsProbability
>>> intprob=IntervalsProbability(ip)
>>> print(intprob)
y0 y1 y2
--------------------
upper bound | 0.500 0.500 0.500`
lower bound | 0.100 0.100 0.100
>>> ip2=array([[0.4, 0.5, 0.6], [0., 0.1, 0.2]])
>>> intprob2=IntervalsProbability(ip2)
>>> print intprob & intprob2
y0 y1 y2
--------------------
upper bound | 0.400 0.500 0.500
lower bound | 0.100 0.100 0.200
>>> print intprob | intprob2
y0 y1 y2
--------------------
upper bound | 0.500 0.500 0.600
lower bound | 0.000 0.100 0.100
>>> print intprob + intprob2
y0 y1 y2
--------------------
upper bound | 0.450 0.500 0.550
lower bound | 0.050 0.100 0.150
>>> ip3=array([[0.7, 0.5, 0.2], [0.4, 0.2, 0.1]])
>>> intprob3=IntervalsProbability(ip3)
>>> intprob3.isreachable()
1
>>> intprob3.getmaximindecision()
0
>>> intprob3.getmaximaxdecision()
0
>>> intprob3.getintervaldomdecision()
array([ 1., 1., 0.])
>>> intprob3.getmaximaldecision()
array([ 1., 1., 0.])
"""
def __init__(self,lproba):
"""Instanciate probability interval bounds
:param lproba: a 2xn array containing upper (1st row) and lower bounds
:type lproba: :class:`~numpy.array`
"""
if lproba.__class__.__name__ != 'ndarray':
raise Exception('Expecting a numpy array as argument')
if lproba[:,1].size != 2:
raise Exception('Array should contain two rows: top for upper prob, bottom for lower prob')
if lproba.ndim != 2:
raise Exception('Bad dimension of array: should contain 2 dimensions')
self.lproba=lproba
self.nbDecision=lproba[0].size
if np.all(lproba[0] >=lproba[1]) != 1:
raise Exception('Some upper bounds lower than lower bounds')
def isproper(self):
"""Check if probability intervals induce a non-empty probability set.
:returns: 0 (empty/incur sure loss) or 1 (non-empty/avoid sure loss).
:rtype: integer
"""
if self.lproba[1,:].sum()<=1 and self.lproba[0,:].sum()>=1:
return 1
else:
return 0
def getlowerprobability(self,subset):
"""Compute lower probability of an event expressed in binary code.
:param subset: the event of interest (a 1xn vector containing 1 for elements
in the event, 0 otherwise.)
:param type: np.array
:returns: lower probability value
:rtype: float
"""
if subset.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if subset.size != self.nbDecision:
raise Exception('Subset incompatible with the frame size')
if self.isreachable()==0:
self.setreachableprobability()
lowerProbability=max(self.lproba[1,subset[:]==1].sum(),1-self.lproba[0,subset[:]==0].sum())
return lowerProbability
def getupperprobability(self,subset):
"""Compute upper probability of an event expressed in binary code.
:param subset: the event of interest (a 1xn vector containing 1 for elements
in the event, 0 otherwise.)
:param type: np.array
:returns: upper probability value
:rtype: float
"""
if subset.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if subset.size != self.nbDecision:
raise Exception('Subset incompatible with the frame size')
if self.isreachable()==0:
self.setreachableprobability()
upperProbability=min(self.lproba[0,subset[:]==1].sum(),1-self.lproba[1,subset[:]==0].sum())
return upperProbability
def getlowerexpectation(self,function):
"""Compute the lower expectation of a given (bounded) function by using
the Choquet integral
:param function: the function values
:param type: np.array
:returns: lower expectation value
:rtype: float
"""
lowerexpe=0.
if function.__class__.__name__!='ndarray':
raise Exception('Expecting a numpy array as argument')
if function.size != self.nbDecision:
raise Exception('number of elements incompatible with the frame size')
function=function.astype(float)
sortedf=np.sort(function)
indexedf=np.argsort(function)
lowerexpe=lowerexpe+sortedf[0]
for i in range(self.nbDecision)[1:]:
addedval=sortedf[i]-sortedf[i-1]
event=np.zeros(self.nbDecision)
event[indexedf[i:]]=1
lowerexpe=lowerexpe+addedval*self.getlowerprobability(event)
return lowerexpe
def isreachable(self):
"""Check if the probability intervals are reachable (are coherent)
:returns: 0 (not coherent/tight) or 1 (tight/coherent).
:rtype: integer
"""
for i in range(self.nbDecision):
subset=np.ones(self.nbDecision)
subset[i]=0
if self.lproba[0,i] + self.lproba[1,subset[:]==1].sum() > 1.0:
return 0
if self.lproba[1,i] + self.lproba[0,subset[:]==1].sum() < 1.0:
return 0
return 1
def setreachableprobability(self):
"""Make the bounds reachable.
"""
if self.isproper()==1:
lreachableProba=np.zeros((2,self.nbDecision))
for i in range(self.nbDecision):
subset=np.ones(self.nbDecision)
subset[i]=0
lb=max(self.lproba[1,i],1-self.lproba[0,subset[:]==1].sum())
ub=min(self.lproba[0,i],1-self.lproba[1,subset[:]==1].sum())
lreachableProba[1,i]=lb
lreachableProba[0,i]=ub
self.lproba[:]=lreachableProba[:]
else:
raise Exception('intervals inducing empty set: operation not possible')
def __str__(self):
"""Print the current bounds
"""
str1,str2="upper bound |","lower bound |"
str3=" "
i=0
for interval in range(self.nbDecision):
str3+=" y%d " %i
str1+=" %.3f" % self.lproba[0,interval]
str2+=" %.3f" % self.lproba[1,interval]
i+=1
str3+="\n"
str3+=" "
str3+="--------------------"
str3+="\n"
str3+=str1
str3+="\n"
str3+=str2
str3+="\n"
return str3
def __and__(self,other):
"""Compute the intersection of two probability intervals
"""
mini=np.maximum(self.lproba[1,:],other.lproba[1,:])
maxi=np.minimum(self.lproba[0,:],other.lproba[0,:])
if mini.sum() >= 0.9999999 or maxi.sum() <= 0.9999999:
raise Exception('empty intersection')
for i in range(self.nbDecision):
if mini[i] >= maxi[i] - 0.0000001:
raise Exception('empty intersection')
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=mini
fusedproba[0,:]=maxi
result=IntervalsProbability(fusedproba)
result.setreachableprobability()
return result
def __or__(self,other):
"""Compute the union of two probability intervals
"""
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=np.minimum(self.lproba[1,:],other.lproba[1,:])
fusedproba[0,:]=np.maximum(self.lproba[0,:],other.lproba[0,:])
result=IntervalsProbability(fusedproba)
return result
def __add__(self,other):
"""Compute the average of two probability intervals
"""
fusedproba=np.zeros((2,self.nbDecision))
fusedproba[1,:]=np.mean([self.lproba[1,:],other.lproba[1,:]],axis=0)
fusedproba[0,:]=np.mean([self.lproba[0,:],other.lproba[0,:]],axis=0)
result=IntervalsProbability(fusedproba)
return result
| gpl-2.0 | -5,486,011,638,803,645,000 | 36.322034 | 103 | 0.557221 | false |
jeromeku/Python-Financial-Tools | portfolio.py | 1 | 8295 | # portfolio.py This class represents a portfolio of stocks. It supports optimization
# of assets via a quadratic program.
#
# The following is an example of how the portfolio class may be used to represent a
# portfolio of assets representing major technology companies:
# portfolio = Portfolio(["MSFT","GOOG","IBM"])
# print "The value at risk: %.2f" % portfolio.calculate_parametric_risk(.05,1000)
# print "The expected shortfall: %.2f" % portfolio.calculate_parametric_risk(.05,1000,True)
import numpy as np
from stock import Stock
from cvxopt import matrix
from cvxopt.blas import dot
from cvxopt import solvers
from scipy import stats
from pprint import pprint
solvers.options["show_progress"] = False
class Portfolio(object):
def __init__(self,assets,risk_free = None,position = None):
# The position refers to the dollar amount invested into this particular
# portfolio. The position can be allocated so that it corresponds to the
# portfolio with the maximum sharpe's ratio, or to the portfolio with the
# minimum risk.
self.position = position if position is not None else None
self.assets = [Stock(stock["ticker"],stock["date_range"]) if type(stock) is dict else Stock(stock) for stock in assets]
if risk_free is not None:
self.risk_free = Stock(risk_free["ticker"],risk_free["date_range"]) if type(risk_free) is dict else Stock(risk_free)
else:
self.risk_free = Stock("^IRX")
self.n = len(self.assets)
self.statistics = self.calculate_statistics()
self.optimization = self.optimize_portfolio()
self.returns = self.calculate_portfolio_returns()
def __str__(self):
print_string = "Assets in portfolio: [" + " ".join([asset.ticker for asset in self.assets]) + "]\n\n"
for asset in self.assets:
print_string += asset.__str__() + "\n\n"
print_string += "The weights for each asset in the portfolio:\n"
for i in range(self.n):
print_string += "\t" + self.assets[i].ticker + "\t: " + str(self.optimization["max_sharpe_weights"][i][0]) + "\n"
print_string += "\nExpected return: %.4f" % self.returns
return print_string
def calculate_portfolio_returns(self):
returns = 0.0
for i in range(self.n):
returns += self.assets[i].statistics["expected_return"] * self.optimization["max_sharpe_weights"][i][0]
return returns
def calculate_statistics(self):
statistics = {}
returns = np.zeros((len(self.assets[0].statistics["returns"]),self.n))
for i in range(self.n):
returns[:,i] = self.assets[i].statistics["returns"]
statistics["expected_asset_returns"] = np.array([asset.statistics["expected_return"] for asset in self.assets])
statistics["covariance"] = np.cov(returns,rowvar = 0)
# Due to the behavior of the numpy "diag" function, scalar inputs will fail and
# produce an error. This instance occurs when there is only a single asset in the
# portfolio. In this case, simply exclude the call to "diag" and calculate the
# standard deviation and the square root of a scalar covariance "matrix".
if statistics["covariance"].shape == ():
statistics["standard_deviation"] = np.sqrt(statistics["covariance"])
else:
statistics["standard_deviation"] = np.sqrt(np.diag(statistics["covariance"]))
return statistics
def calculate_parametric_risk(self,alpha,expected_shortfall = False,position = None):
if position is None and self.position is not None:
position = self.position
elif position is None and self.position is None:
print "Either specify a position for the portfolio object or provide one as an input parameter."
return np.nan
mu = self.statistics["expected_asset_returns"]
S = self.statistics["covariance"]
w = self.optimization["max_sharpe_weights"]
portfolio_mu = np.dot(mu,w)
portfolio_sigma = np.sqrt(np.dot(np.dot(w.T,S),w))[0]
quantile = stats.norm.ppf(alpha)
if expected_shortfall:
risk = position * (-portfolio_mu + portfolio_sigma * (stats.norm.pdf(quantile) / alpha))
else:
risk = -position * (portfolio_mu + quantile * portfolio_sigma)
return risk
def optimize_kelly_criterion(self):
# This code attempts to reproduce the optimization routine proposed by
# Vasily Nekrasov using the Kelly criterion. In particular, this code
# uses as reference the following work:
#
# Nekrasov, Vasily. 2013. "Kelly Criterion for Multivariate Portfolios:
# A Model-Free Approach".
kelly_optimization = {}
n = self.n
r = self.risk_free.statistics["expected_daily_return"]
S = matrix(1.0 / ((1 + r) ** 2) * self.statistics["covariance"])
r_assets = matrix([asset.statistics["expected_daily_return"] for asset in self.assets])
q = matrix(1.0 / (1 + r) * (r_assets - r))
G, h, A, b = self.optimization_constraint_matrices()
# Notice that the "linear" term in the quadratic optimization formulation is made
# negative. This is because Nekrasov maximizes the function, whereas CXVOPT is forced
# to minimize. By making the linear term negative, we arrive at an equivalent
# formulation.
portfolio_weights = solvers.qp(S,-q,G,h,A,b)["x"]
kelly_optimization["weights"] = np.array([portfolio_weights[i] for i in range(n)])
return kelly_optimization
def optimize_portfolio(self):
optimization = {}
n = self.n
S = matrix(2 * self.statistics["covariance"])
expected_returns = matrix(self.statistics["expected_asset_returns"])
G, h, A, b = self.optimization_constraint_matrices()
mu_array = [10**(5.0*t/100-1.0) for t in range(100)]
portfolio_weights = [solvers.qp(mu*S,-expected_returns,G,h,A,b)["x"] for mu in mu_array]
returns = [dot(expected_returns,w) for w in portfolio_weights]
risk = [np.sqrt(dot(w,S*w)) for w in portfolio_weights]
# Calculate the portfolio with the greatest "reward-to-risk" ratio, which
# is Sharpe's ratio. Notice that it is not necessary to specify the risk
# free rate in the calculation of Sharpe's ratio, as without loss of generality
# it may be assumed to be zero. In either case, the same portfolio will
# achieve the maximum. However, since the risk free asset defaults to a
# Treasury bill, we take no action regarding this observation.
mu_free = self.risk_free.statistics["expected_return"]
sharpe_ratio = (returns - mu_free) / risk
max_sharpe_index = sharpe_ratio == max(sharpe_ratio)
min_variance_index = risk == min(risk)
optimization["returns"] = returns
optimization["risk"] = risk
# If possible, try to decrease the number of for loops used to extract the
# optimal weights of the portfolio. At the time of writing this, it seems
# that the matrix data structure is somewhat bizarre. Therefore, in order to
# generate the desired numpy array object, so many for loops turned out to
# be necessary.
max_sharpe_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if max_sharpe_index[i]]
min_variance_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if min_variance_index[i]]
optimization["max_sharpe_weights"] = np.zeros((n,1))
optimization["min_variance_weights"] = np.zeros((n,1))
for i in range(len(max_sharpe_weights[0])):
optimization["max_sharpe_weights"][i] = max_sharpe_weights[0][i]
for i in range(len(min_variance_weights[0])):
optimization["min_variance_weights"][i] = min_variance_weights[0][i]
return optimization
def optimization_constraint_matrices(self):
n = self.n
G = matrix(0.0, (n,n))
G[::n+1] = -1.0
h = matrix(0.0, (n,1))
A = matrix(1.0, (1,n))
b = matrix(1.0)
return G, h, A, b
| mit | -9,167,423,260,467,966,000 | 43.837838 | 128 | 0.639662 | false |
qliu/globe_nocturne | globenocturne/globenocturneapp/migrations/0002_auto_20150416_1956.py | 1 | 2732 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('globenocturneapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DMSPDataset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('wms_layer', models.CharField(max_length=100, verbose_name=b'WMS Layer')),
],
options={
'db_table': 'dmsp_dataset',
'verbose_name': 'DMSP Dataset',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DMSPProduct',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
],
options={
'db_table': 'dmsp_product',
'verbose_name': 'DMSP Product',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Satellite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=10)),
],
options={
'db_table': 'satellite',
'verbose_name': 'Satellite',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SatYear',
fields=[
('year', models.IntegerField(serialize=False, primary_key=True)),
],
options={
'db_table': 'sat_year',
'verbose_name': 'Year',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='dmspdataset',
name='product',
field=models.ForeignKey(verbose_name=b'DMSP Product', to='globenocturneapp.DMSPProduct'),
preserve_default=True,
),
migrations.AddField(
model_name='dmspdataset',
name='satellite',
field=models.ForeignKey(verbose_name=b'Satellite', to='globenocturneapp.Satellite'),
preserve_default=True,
),
migrations.AddField(
model_name='dmspdataset',
name='year',
field=models.ForeignKey(verbose_name=b'Year', to='globenocturneapp.SatYear'),
preserve_default=True,
),
]
| gpl-2.0 | 2,225,764,835,229,984,000 | 33.15 | 114 | 0.504758 | false |
SEMAFORInformatik/femagtools | examples/calculation/ld_lq_fast.py | 1 | 3356 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ld-Lq-Identification with Femag
"""
import os
import femagtools
import femagtools.machine
import logging
import numpy as np
feapars = {
"num_move_steps": 25,
"calculationMode": "ld_lq_fast",
"magn_temp": 60.0,
"i1_max": 150.0,
"beta_max": 0.0,
"beta_min": -60.0,
"num_cur_steps": 3,
"num_beta_steps": 4,
"skew_angle": 0.0,
"num_par_wdgs": 1,
"speed": 50.0
}
magnetMat = [{
"name": "M395",
"remanenc": 1.17,
"temcoefbr": -0.001,
"spmaweight": 7.5,
"magntemp": 20.0,
"temcoefhc": -0.001,
"hcb": 810000.4,
"relperm": 1.05,
"magncond": 833333,
"magnwidth": 15.0e-3,
"magnlength": 100.0e-3,
"hc_min": 760000.0}
]
magnetizingCurve = "../magnetcurves"
pmMotor = {
"name": "PM 270 L8",
"desc": "PM Motor 270mm 8 poles VMAGN",
"poles": 8,
"outer_diam": 0.26924,
"bore_diam": 0.16192,
"inner_diam": 0.11064,
"airgap": 0.00075,
"lfe": 0.08356,
"stator": {
"num_slots": 48,
"num_slots_gen": 12,
"mcvkey_yoke": "M330-50A",
"nodedist": 4.0,
"statorRotor3": {
"slot_height": 0.0335,
"slot_h1": 0.001,
"slot_h2": 0.0,
"slot_width": 0.00193,
"slot_r1": 0.0001,
"slot_r2": 0.00282,
"wedge_width1": 0.00295,
"wedge_width2": 0.0,
"middle_line": 0.0,
"tooth_width": 0.0,
"slot_top_sh": 0.0}
},
"magnet": {
"nodedist": 1.0,
"material": "M395",
"mcvkey_yoke": "M330-50A",
"magnetIronV": {
"magn_angle": 145.0,
"magn_height": 0.00648,
"magn_width": 0.018,
"condshaft_r": 0.05532,
"magn_num": 1.0,
"air_triangle": 1,
"iron_hs": 0.0001,
"gap_ma_iron": 0.0002,
"iron_height": 0.00261,
"magn_rem": 1.2,
"iron_shape": 0.0802
}
},
"windings": {
"num_phases": 3,
"num_layers": 1,
"num_wires": 9,
"coil_span": 6.0,
"cufilfact": 0.4,
"culength": 1.4,
"slot_indul": 0.5e-3
}
}
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
workdir = os.path.join(os.path.expanduser('~'), 'femag')
try:
os.makedirs(workdir)
except OSError:
pass
femag = femagtools.Femag(workdir,
magnetizingCurves=magnetizingCurve,
magnets=magnetMat)
r = femag(pmMotor, feapars)
print(r.type)
# find speed at u1max
u1max = 340
tq = 170
ld = r.ldq['ld']
lq = r.ldq['lq']
i1 = r.ldq['i1']
beta = r.ldq['beta']
psim = r.ldq['psim']
p = r.machine['p']
r1 = 0.0
pm = femagtools.machine.PmRelMachineLdq(3, p,
psim,
ld,
lq,
r1,
beta,
i1)
tq = 170.0
u1 = 340.0
iqx, idx = pm.iqd_torque(tq)
w1 = pm.w1_u(u1, iqx, idx)
betaopt, i1 = femagtools.machine.betai1(iqx, idx)
print("f1 {0:8.1f} Hz, I1 {1:8.1f} A, Beta {2:4.1f} °".format(
w1/2/np.pi, i1, betaopt/np.pi*180))
| bsd-2-clause | 4,781,478,290,324,042,000 | 22.298611 | 63 | 0.467064 | false |
Symmetric/calico-docker | calico_containers/calico_ctl/status.py | 1 | 2475 | # Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
calicoctl status
Description:
Print current status information regarding calico-node container
and the BIRD routing daemon.
"""
import re
from utils import docker_client
def status(arguments):
"""
Main dispatcher for status commands. Calls the corresponding helper
function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
calico_node_info = filter(lambda container: "/calico-node" in
container["Names"],
docker_client.containers())
if len(calico_node_info) == 0:
print "calico-node container not running"
else:
print "calico-node container is running. Status: %s" % \
calico_node_info[0]["Status"]
apt_cmd = docker_client.exec_create("calico-node", ["/bin/bash", "-c",
"apt-cache policy calico-felix"])
result = re.search(r"Installed: (.*?)\s", docker_client.exec_start(apt_cmd))
if result is not None:
print "Running felix version %s" % result.group(1)
print "IPv4 Bird (BGP) status"
bird_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc -s /etc/service/bird/bird.ctl"])
print docker_client.exec_start(bird_cmd)
print "IPv6 Bird (BGP) status"
bird6_cmd = docker_client.exec_create("calico-node",
["/bin/bash", "-c",
"echo show protocols | "
"birdc6 -s "
"/etc/service/bird6/bird6.ctl"])
print docker_client.exec_start(bird6_cmd)
| apache-2.0 | 4,725,994,529,181,571,000 | 38.919355 | 84 | 0.585859 | false |
ULHPC/easybuild-easyblocks | easybuild/easyblocks/h/hdf5.py | 1 | 4242 | ##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing HDF5, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_HDF5(ConfigureMake):
"""Support for building/installing HDF5"""
def configure_step(self):
"""Configure build: set require config and make options, and run configure script."""
# configure options for dependencies
deps = [
("Szip", "--with-szlib"),
("zlib", "--with-zlib"),
]
for (dep, opt) in deps:
root = get_software_root(dep)
if root:
self.cfg.update('configopts', '%s=%s' % (opt, root))
fcomp = 'FC="%s"' % os.getenv('F90')
self.cfg.update('configopts', "--with-pic --with-pthread --enable-shared")
self.cfg.update('configopts', "--enable-cxx --enable-fortran %s" % fcomp)
# MPI and C++ support enabled requires --enable-unsupported, because this is untested by HDF5
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "--enable-unsupported --enable-parallel")
else:
self.cfg.update('configopts', "--disable-parallel")
# make options
self.cfg.update('buildopts', fcomp)
# set RUNPARALLEL if MPI is not enabled (or not supported by this toolchain)
if self.toolchain.options.get('usempi', None):
env.setvar('RUNPARALLEL', 'mpirun -np \$\${NPROCS:=2}')
super(EB_HDF5, self).configure_step()
# default make and make install are ok
def sanity_check_step(self):
"""
Custom sanity check for HDF5
"""
# also returns False if MPI is not supported by this toolchain
if self.toolchain.options.get('usempi', None):
extra_binaries = ["bin/%s" % x for x in ["h5perf", "h5pcc", "h5pfc", "ph5diff"]]
else:
extra_binaries = ["bin/%s" % x for x in ["h5cc", "h5fc"]]
libs = ['', '_cpp', '_fortran', '_hl_cpp', '_hl', 'hl_fortran']
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': ["bin/h5%s" % x for x in ["2gif", "c++", "copy", "debug", "diff",
"dump", "import", "jam","ls", "mkgrp",
"perf_serial", "redeploy", "repack",
"repart", "stat", "unjam"]] +
["bin/gif2h5"] + extra_binaries +
["lib/libhdf5%s.%s" % (l, shlib_ext) for l in libs],
'dirs': ['include'],
}
super(EB_HDF5, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 | 1,027,973,136,351,787,100 | 39.788462 | 101 | 0.621405 | false |
reflectometry/direfl | direfl/api/__init__.py | 1 | 1336 | # This program is public domain
# Author: Paul Kienzle
"""
Surround variation reflectometry
The surround variation reflectometry package consists of three interacting
pieces: simulation, phase reconstruction and phase inversion. Before the
experiment starts, you should provide an expected profile to the simulator.
This will indicate whether inversion is feasible for the given structure, and
allow you to determine noise sensitivity and the effects of various substrate
and surround media. Once you are comfortable with the parameters, you can
perform your experiment by back reflectivity through the film on a substrate.
Two experimental runs are needed where the only difference in the setup is that
one of the surround materials (either the incident or substrate layer) is
changed. This generates two datasets each containing a real and an imaginary
reflectivity for your sample as a reversed film with the substrate material on
either side. Next you perform inversion on the real portion of the reflection
amplitude which returns the scattering length density profile. This profile
can then be used to compute the expected reflectivity for the original
measurements, which if all goes well, should agree prefectly.
See :class:`invert.Inversion`, :func:`invert.reconstruct` and
:class:`simulate.Simulation` for details.
"""
| mit | -1,315,653,074,551,409,400 | 54.666667 | 79 | 0.817365 | false |
rahulguptakota/paper-To-Reviewer-Matching-System | train.py | 1 | 7082 | from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.model_selection import KFold
from random import shuffle
from sklearn.svm import SVC
from sklearn import preprocessing
import numpy as np
files = ["k4","k7","k9","k10"]
# ,"features_k6.txt","features_k7.txt","features_k9.txt","features_k10.txt","features_k12.txt"]
dhawalfile = ["output.csv"]
data = {}
fd = open("db/MeaningfulCitationsDataset/ValenzuelaAnnotations1.csv",'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
if i != 0:
line = line.split(",")
try:
data[(line[1],line[2])] = {}
data[(line[1],line[2])]["test"] = line[-1]
# print(line)
except:
pass
i = i + 1
fd.close()
# print(data)
for f in files:
fd = open("features_" + f + ".txt",'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
line = line.split(" ")
try:
data[(line[0],line[1])][f] = line[-1]
except:
pass
# print(line)
i = i + 1
fd.close()
# print(data)
data_dhawal = {}
for f in dhawalfile:
fd = open(f,'rb')
t = fd.read()
i=0
for line in t.decode().split("\n"):
line = line.split(",")
# print(line)
data_dhawal[(line[0],line[1])] = {}
data_dhawal[(line[0],line[1])][f] = line[2:]
# print(data[(line[0],line[1])])
data_dhawal[(line[0],line[1])]["test"] = data[(line[0],line[1])]["test"]
for f1 in files:
data_dhawal[(line[0],line[1])][f1] = data[(line[0],line[1])][f1]
# print(line)
i = i + 1
fd.close()
# rahulfile = ["semantic.csv"]
# for f in rahulfile:
# fd = open(f,'rb')
# t = fd.read()
# i=0
# for line in t.decode().split("\n"):
# line = line.split(",")
# print(line)
# data_dhawal[(line[0],line[1])][f] = line[-1]
# # print(line)
# i = i + 1
# fd.close()
# print(data_dhawal)
# exit()
X = []
Y = []
for key in data_dhawal.keys():
# print(key)
# try:
temp = []
for f in files:
temp.append(data_dhawal[key][f])
for t in data_dhawal[key]["output.csv"]:
temp.append(t)
# for t in data_dhawal[key]["semantic.csv"]:
# temp.append(t)
# temp.append(t for t in data_dhawal[key]["output.csv"])
X.append(temp)
Y.append(data_dhawal[key]['test'])
# except:
# pass
# print(X[1])
# exit()
# print(Y)
Y = np.array(Y)
kf = KFold(n_splits=5)
X = preprocessing.scale(X)
globalaccuracy = 0
globalaccuracy1 = 0
globalsvmaccuracy = 0
globaldecisiontreeaccuracy = 0
globalknnaccuracy = 0
for train_index, test_index in kf.split(X):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
# print(Y)
y_train, y_test = Y[train_index], Y[test_index]
# print("The size of X_train, X_test, y_train, y_test is {}, {}, {}, {}".format(np.shape(X_train),np.shape(X_test),np.shape(y_train),np.shape(y_test)))
# svm_model_linear = SVC(kernel = 'rbf', gamma=5).fit(X_train, y_train)
# svm_predictions = svm_model_linear.predict(X_test)
# # model accuracy for X_test
# accuracy = svm_model_linear.score(X_test, y_test)
# # creating a confusion matrix
# cm = confusion_matrix(y_test, svm_predictions)
# print("The accuracy for SVM is ", accuracy)
# print("The confusion matrix for SVM is\n",cm)
# # training a KNN classifier
# knn = KNeighborsClassifier(n_neighbors = 7).fit(X_train, y_train)
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=4)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2), ('svc', clf3)])
# clf1 = clf1.fit(X_train,y_train)
# clf2 = clf2.fit(X_train,y_train)
# clf3 = clf3.fit(X_train,y_train)
eclf = eclf.fit(X_train,y_train)
eclf_accuracy = eclf.score(X_test,y_test)
prediction = eclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
globalaccuracy += eclf_accuracy
# print("The accracy for Voting classifier is ",eclf_accuracy)
# print("The cm for Voting classifier is \n",cm)
svmclf = SVC(kernel='rbf', probability=True)
svmclf = svmclf.fit(X_train,y_train)
svmclf_accuracy = svmclf.score(X_test,y_test)
prediction = svmclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
globalsvmaccuracy += svmclf_accuracy
# print("The accracy for SVM classifier is ",svmclf_accuracy)
# print("The cm for Voting classifier is \n",cm)
dtclf = DecisionTreeClassifier(max_depth=7)
dtclf = dtclf.fit(X_train,y_train)
dtclf_accuracy = dtclf.score(X_test,y_test)
prediction = dtclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
globaldecisiontreeaccuracy += dtclf_accuracy
print("The importance of Features in DT is {}".format(dtclf.feature_importances_))
# print("The accracy for SVM classifier is ",dtclf_accuracy)
# print("The cm for Voting classifier is \n",cm)
knnclf = KNeighborsClassifier(n_neighbors=5)
knnclf = knnclf.fit(X_train,y_train)
knnclf_accuracy = knnclf.score(X_test,y_test)
prediction = knnclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
globalknnaccuracy += knnclf_accuracy
# print("The accracy for SVM classifier is ",knnclf_accuracy)
# print("The cm for Voting classifier is \n",cm)
eclf = VotingClassifier(estimators=[('dt', clf1), ('svc', clf3)], voting='soft', weights=[2,2])
bclf = BaggingClassifier(base_estimator=eclf)
bclf = bclf.fit(X_train,y_train)
bclf_accuracy = bclf.score(X_test,y_test)
prediction = bclf.predict(X_test)
cm = confusion_matrix(y_test, prediction)
globalaccuracy1 += bclf_accuracy
# print("The accracy for bagging Voting classifier is ",bclf_accuracy)
# print("The cm for bagging Voting classifier is \n",cm)
print("The accracy for Voting classifier is ",globalaccuracy/5)
print("The accracy for bagging Voting classifier is ",globalaccuracy1/5)
print("The accracy for SVM classifier is ",globalsvmaccuracy/5)
print("The accracy for Decision Tree classifier is ",globaldecisiontreeaccuracy/5)
print("The accracy for KNN classifier is ",globalknnaccuracy/5)
# adaclf = AdaBoostClassifier(base_estimator=SVC(kernel='linear', probability=True),n_estimators=100)
# # accracy = cross_val_score(adaclf, X_test, y_test)
# # accuracy = cross_val_score(adaclf, X, Y)
# adaclf = adaclf.fit(X_train,y_train)
# adaclf_accuracy = adaclf.score(X_test,y_test)
# prediction = adaclf.predict(X_test)
# cm = confusion_matrix(y_test, prediction)
# print("Accuracy is ",adaclf_accuracy)
# print("The confusion matrix is:\n",cm) | mit | 8,721,934,809,690,223,000 | 34.238806 | 155 | 0.641344 | false |
leahrnh/ticktock_text_api | prepare_data_user_input.py | 1 | 1698 | #!/usr/bin/etc python
import sys
import time
import os
import json
import pickle
def readfile(fn):
result = {}
result["Turns"] = {}
current_turn = 0
key_index = 0
keys = ["Turn", "You", "TickTock", "Appropriateness"]
for l in open(fn):
if ":" in l:
key = l.split(":")[0]
value = ":".join(l.split(":")[1:]).strip()
if key == "TurkID" or key == "UserID":
result[key] = value
else:
if keys[key_index%4] != key:
print l
assert(False)
key_index += 1
if key == "Turn":
current_turn = int(value)
result["Turns"][current_turn] = {}
elif key in keys[1:4]:
result["Turns"][current_turn][key] = value
else:
assert(False)
return result
def readall(dir_path):
result = {}
for f in os.listdir(dir_path):
print f
if ".txt" in f and "rating" in f:
full_path = os.path.join(dir_path, f)
result[full_path] = readfile(full_path)
return result
def get_log(rating_logs):
writelist =[]
for f,r in rating_logs.iteritems():
num_turns = len(r["Turns"])
for i in range(1, num_turns + 1):
tmpdict ={}
tmpdict["question"]= r["Turns"][i]["You"]
tmpdict["answer"] = r["Turns"][i]["TickTock"]
tmpdict["app_value"]=r["Turns"][i]["Appropriateness"]
tmpdict["user_id"]=r["TurkID"]
#tmpdict["aSentId"]=2016
writelist.append(tmpdict)
return writelist
rating_logs = readall("/home/ubuntu/zhou/Backend/rating_log/v2")
writelist = get_log(rating_logs)
with open('user_input_v2.txt','w') as f:
for tmpdict in writelist:
f.write(tmpdict["question"]+'\n')
with open('user_input_v2.pkl','w') as f:
pickle.dump(writelist,f)
| gpl-2.0 | -2,080,913,587,433,048,600 | 24.727273 | 64 | 0.585984 | false |
un-brs/mfiles-tools | mfiles_project/mfiles_sync/management/commands/mfcache.py | 1 | 9571 | # -*- coding: utf-8 -*-
import datetime
import calendar
from mfiles_sync.models import (Vault, Document, DocumentView, PropertyDef,
Property, DocumentProperty)
from django.conf import settings
from django.core.management.base import BaseCommand
from win32com.client import gencache
mfiles = gencache.EnsureModule(
'{B9C079AA-92DD-4FB4-A0E0-AA3198955B45}', 0, 1, 0
)
def add_months(sourcedate, months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
# print(year, month)
day = min(sourcedate.day, calendar.monthrange(year, month)[1])
return datetime.date(year, month, day)
class Command(BaseCommand):
help = 'Syncronize MFiles'
def add_arguments(self, parser):
pass
def get_server_vaults(self):
server = mfiles.MFilesServerApplication()
server.Connect(AuthType=mfiles.constants.MFAuthTypeSpecificMFilesUser,
UserName=settings.MFILES_USERNAME,
Password=settings.MFILES_PASSWORD,
NetworkAddress=settings.MFILES_HOST,
Endpoint="2266")
return server.GetVaults()
def process_valuelist(self, db_pdef, mfiles_valuelist):
for mfiles_item in mfiles_valuelist:
db_prop = Property(
mfiles_display_id=mfiles_item.DisplayID, pdef=db_pdef)
db_prop.set_value(mfiles_item.Name)
db_prop.save()
def process_propertydef(self, mfiles_pdef, mfiles_vault, db_vault):
db_pdefs = list(
PropertyDef.objects.filter(
mfiles_id=mfiles_pdef.ID, vault=db_vault)
)
if db_pdefs:
db_pdef = db_pdefs[0]
else:
db_pdef = PropertyDef(
name=mfiles_pdef.Name,
mfiles_id=mfiles_pdef.ID,
vault=db_vault,
dtype=mfiles_pdef.DataType
)
db_pdef.save()
if mfiles_pdef.ValueList:
mfiles_valuelist = (
mfiles_vault.ValueListItemOperations.GetValueListItems(
mfiles_pdef.ValueList
)
)
self.process_valuelist(
db_pdef=db_pdef,
mfiles_valuelist=mfiles_valuelist
)
return db_pdef
def process_property(self, mfiles_typedvalue, db_pdef, db_doc):
if db_pdef.dtype in (PropertyDef.MFDatatypeMultiSelectLookup,
PropertyDef.MFDatatypeLookup):
for lookup in mfiles_typedvalue.GetValueAsLookups():
db_props = list(
db_pdef.property_set.filter(
mfiles_display_id=lookup.DisplayID
)
)
if db_props:
db_prop = db_props[0]
db_docprop = DocumentProperty(doc=db_doc, prop=db_prop)
db_docprop.save()
else:
if mfiles_typedvalue.Value:
db_prop = Property(pdef=db_pdef)
db_prop.set_value(mfiles_typedvalue.Value)
db_prop.save()
db_docprop = DocumentProperty(doc=db_doc, prop=db_prop)
db_docprop.save()
def process_properties(self, mfiles_props, mfiles_vault, db_vault, db_doc):
for mfiles_prop in mfiles_props:
mfiles_pdef = mfiles_vault.PropertyDefOperations.GetPropertyDef(
mfiles_prop.PropertyDef
)
db_pdef = self.process_propertydef(
mfiles_pdef=mfiles_pdef,
mfiles_vault=mfiles_vault,
db_vault=db_vault
)
self.process_property(
mfiles_typedvalue=mfiles_prop.Value,
db_pdef=db_pdef,
db_doc=db_doc
)
def process_object_version(self, mfiles_vault, object_version, db_view,
db_vault):
if object_version.FilesCount != 1:
self.stderr.write(
"'%s' does not contains files" % object_version.Title
)
return
file = object_version.Files.Item(1)
db_doc = Document(
mfiles_id=object_version.ObjVer.ID,
vault=db_vault,
name=file.Title,
ext=file.Extension,
size=file.LogicalSize,
created=object_version.CreatedUtc,
modified=object_version.LastModifiedUtc
)
db_doc.save()
self.stdout.write("Process document '%s.%s'" %
(db_doc.name, db_doc.ext)
)
db_docview = DocumentView(doc=db_doc, view=db_view)
db_docview.save()
mfiles_props = (
mfiles_vault.ObjectOperations.GetObjectVersionAndProperties(
object_version.ObjVer
).Properties
)
self.process_properties(
mfiles_vault=mfiles_vault,
mfiles_props=mfiles_props,
db_vault=db_vault,
db_doc=db_doc
)
def process_view(self, mfiles_vault, mfiles_view, db_view, db_vault):
self.stdout.write(str(db_view))
db_view.condition = (
mfiles_view.SearchConditions.GetAsExportedSearchString(
mfiles.constants.MFSearchFlagReturnLatestVisibleVersion
)
)
db_view.save()
conditions = mfiles_view.SearchConditions
df_date = mfiles.DataFunctionCall()
df_date.SetDataDate()
# ======================================================================
search = mfiles.SearchCondition()
expression = mfiles.Expression()
value = mfiles.TypedValue()
expression.SetPropertyValueExpression(
mfiles.constants.MFBuiltInPropertyDefLastModified,
mfiles.constants.MFParentChildBehaviorNone,
df_date
)
# value.SetValue(mfiles.constants.MFDatatypeDate, '15/12/2014')
search.Set(
expression,
mfiles.constants.MFConditionTypeGreaterThanOrEqual,
value
)
conditions.Add(-1, search)
# ======================================================================
search = mfiles.SearchCondition()
expression = mfiles.Expression()
# value = mfiles.TypedValue()
expression.SetPropertyValueExpression(
mfiles.constants.MFBuiltInPropertyDefLastModified,
mfiles.constants.MFParentChildBehaviorNone,
df_date
)
# value.SetValue(mfiles.constants.MFDatatypeDate, '15/12/2014')
search.Set(
expression, mfiles.constants.MFConditionTypeLessThan, value
)
conditions.Add(-1, search)
# ======================================================================
start = datetime.date(2014, 12, 1)
end = add_months(start, 1)
while start < datetime.date.today():
print("Process date range", start, end)
conditions.Item(conditions.Count - 1).TypedValue.SetValue(
mfiles.constants.MFDatatypeDate, start.strftime('%d/%m/%Y')
)
conditions.Item(conditions.Count).TypedValue.SetValue(
mfiles.constants.MFDatatypeDate, end.strftime('%d/%m/%Y')
)
objs = (
mfiles_vault.ObjectSearchOperations.
SearchForObjectsByConditionsEx(
conditions,
mfiles.constants.MFSearchFlagReturnLatestVisibleVersion,
False,
0
)
)
for object_version in objs:
self.process_object_version(
mfiles_vault=mfiles_vault,
object_version=object_version,
db_view=db_view,
db_vault=db_vault
)
start, end = end, add_months(start, 1)
def process_vault(self, mfiles_vault, db_vault):
self.stdout.write('Vault %s %s' % (db_vault.name,
mfiles_vault.GetGUID()))
db_vault.guid = mfiles_vault.GetGUID()
db_vault.save()
mfiles_views = {
v.Name: v for v in mfiles_vault.ViewOperations.GetViews()
}
for db_view in db_vault.view_set.filter(is_enabled=True):
mfiles_view = mfiles_views.get(db_view.name)
if mfiles_view:
self.process_view(
mfiles_vault=mfiles_vault,
mfiles_view=mfiles_view,
db_view=db_view,
db_vault=db_vault
)
else:
self.stdout.write("Could not find view '%s'" % db_view.name)
def handle(self, *args, **options):
mfiles_svaults = {v.Name: v for v in self.get_server_vaults()}
for db_vault in Vault.objects.filter(is_enabled=True):
mfiles_svault = mfiles_svaults.get(db_vault.name)
if mfiles_svault:
mfiles_vault = mfiles_svault.LogIn()
if mfiles_vault.LoggedIn:
self.process_vault(mfiles_vault, db_vault)
else:
self.stderr.write("Could not login to '%s' vault " %
db_vault.name)
else:
self.stderr.write("Could not find vault %s" % db_vault.name)
| mit | 4,169,048,206,284,975,000 | 34.579926 | 80 | 0.532024 | false |
beblount/Steer-Clear-Backend-Web | env/Lib/site-packages/testfixtures/tests/test_manuel.py | 1 | 6036 | # Copyright (c) 2010-2013 Simplistix Ltd
#
# See license.txt for more details.
import re
from manuel import Document, Region, RegionContainer, Manuel
from mock import Mock
from testfixtures import compare, Comparison as C, TempDirectory
from testfixtures.manuel import Files,FileBlock,FileResult
from unittest import TestCase
class TestContainer(RegionContainer):
def __init__(self,attr,*blocks):
self.regions = []
for block in blocks:
region = Region(0,' ')
setattr(region,attr,block)
self.regions.append(region)
class TestManuel(TestCase):
def tearDown(self):
TempDirectory.cleanup_all()
def test_multiple_files(self):
d = Document("""
.. topic:: file.txt
:class: write-file
line 1
line 2
line 3
.. topic:: file2.txt
:class: read-file
line 4
line 5
line 6
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content="line 1\n\nline 2\nline 3\n",
action='write'),
C(FileBlock,
path='file2.txt',
content='line 4\n\nline 5\nline 6\n',
action='read'),
],[r.parsed for r in d])
def test_ignore_literal_blocking(self):
d = Document("""
.. topic:: file.txt
:class: write-file
::
line 1
line 2
line 3
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content="line 1\n\nline 2\nline 3\n",
action='write'),
],[r.parsed for r in d])
def test_file_followed_by_text(self):
d = Document("""
.. topic:: file.txt
:class: write-file
.. code-block:: python
print "hello"
out = 'there'
foo = 'bar'
This is just some normal text!
""")
d.parse_with(Files('td'))
compare([
None,
C(FileBlock,
path='file.txt',
content='.. code-block:: python\n\nprint "hello"'
'\nout = \'there\'\n\nfoo = \'bar\'\n',
action='write'),
None,
],[r.parsed for r in d])
def test_red_herring(self):
d = Document("""
.. topic:: file.txt
:class: not-a-file
print "hello"
out = 'there'
""")
d.parse_with(Files('td'))
compare([r.parsed for r in d],[None])
def test_no_class(self):
d = Document("""
.. topic:: file.txt
print "hello"
out = 'there'
""")
d.parse_with(Files('td'))
compare([r.parsed for r in d],[None])
def test_unclaimed_works(self):
# a test manuel
CLASS = re.compile(r'^\s+:class:',re.MULTILINE)
class Block(object):
def __init__(self,source): self.source = source
def find_class_blocks(document):
for region in document.find_regions(CLASS):
region.parsed = Block(region.source)
document.claim_region(region)
def Test():
return Manuel(parsers=[find_class_blocks])
# now our test
d = Document("""
.. topic:: something-else
:class: not-a-file
line 1
line 2
line 3
""")
d.parse_with(Files('td')+Test())
# now check FileBlock didn't mask class block
compare([
None,
C(Block,
source=' :class:\n'),
None,
],[r.parsed for r in d])
def test_evaluate_non_fileblock(self):
m = Mock()
d = TestContainer('parsed',m)
d.evaluate_with(Files('td'),globs={})
compare([None],[r.evaluated for r in d])
compare(m.call_args_list,[])
compare(m.method_calls,[])
def test_evaluate_read_same(self):
dir = TempDirectory()
dir.write('foo', b'content')
d = TestContainer('parsed',FileBlock('foo','content','read'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=True,
expected=None,
actual=None)],
[r.evaluated for r in d])
def test_evaluate_read_difference(self):
dir = TempDirectory()
dir.write('foo', b'actual')
d = TestContainer('parsed',FileBlock('foo','expected','read'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=False,
path='foo',
expected='expected',
actual='actual')],
[r.evaluated for r in d])
def test_evaulate_write(self):
dir = TempDirectory()
d = TestContainer('parsed',FileBlock('foo','content','write'))
d.evaluate_with(Files('td'),globs={'td':dir})
compare([C(FileResult,
passed=True,
expected=None,
actual=None)],
[r.evaluated for r in d])
dir.check('foo')
compare(dir.read('foo', 'ascii'), 'content')
def test_formatter_non_fileblock(self):
d = TestContainer('evaluated',object)
d.format_with(Files('td'))
compare(d.formatted(),'')
def test_formatter_passed(self):
d = TestContainer('evaluated',FileResult())
d.format_with(Files('td'))
compare(d.formatted(),'')
def test_formatter_failed(self):
r = FileResult()
r.passed = False
r.path = '/foo/bar'
r.expected = 'same\nexpected\n'
r.actual = 'same\nactual\n'
d = TestContainer('evaluated',r)
d.format_with(Files('td'))
compare('File "<memory>", line 0:\n'
'Reading from "/foo/bar":\n'
'@@ -1,3 +1,3 @@\n'
' same\n'
'-expected\n'
'+actual\n ',
d.formatted()
)
| mit | 1,924,768,134,299,826,000 | 25.017241 | 70 | 0.50381 | false |
bolkedebruin/airflow | airflow/hooks/dbapi_hook.py | 1 | 10860 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Optional
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
def connect(host, port, username, schema):
...
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: Optional[str]
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, str):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 | 3,902,563,915,036,066,000 | 35.079734 | 85 | 0.580203 | false |
drowsy810301/NTHUOJ_web | nthuoj/settings.py | 1 | 4460 | #-*- encoding=UTF-8 -*-
"""
Django settings for nthuoj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from utils.config_info import get_config
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."),)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kivl1x)by8$98z6y3b^7texw&+d1arad2qlq-(sn=8g^lw_(+&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'autocomplete_light',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'utils',
'problem',
'index',
'contest',
'users',
'team',
'group',
'status',
'axes',
'bootstrapform',
'djangobower',
'datetimewidget',
'vjudge',
'ckeditor',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'utils.render_helper.CustomHttpExceptionMiddleware',
'axes.middleware.FailedLoginMiddleware',
)
ROOT_URLCONF = 'nthuoj.urls'
WSGI_APPLICATION = 'nthuoj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
CONFIG_PATH = os.path.join(BASE_DIR, 'nthuoj/config/nthuoj.cfg')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': CONFIG_PATH,
},
}
}
# Custom User auth
AUTH_USER_MODEL = 'users.User'
# where @login_required will redirect to
LOGIN_URL = '/users/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
# django-axes 1.3.8 configurations
# https://pypi.python.org/pypi/django-axes/
# redirect to broken page when exceed wrong-try limits
AXES_LOCKOUT_URL = '/users/block_wrong_tries'
# freeze login access for that ip for 0.1*60 = 6 minites
AXES_COOLOFF_TIME = 0.1
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_config('email', 'user')
EMAIL_HOST_PASSWORD = get_config('email', 'password')
EMAIL_PORT = 587
# django-ckeditor configurations
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_IMAGE_BACKEND = 'pillow'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full',
},
}
# django-bower settings
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components')
BOWER_INSTALLED_APPS = (
'Chart.js',
'jquery',
'jquery-ui#1.9.2',
'https://github.com/thomaspark/bootswatch.git', # bootswatch
'https://github.com/dimsemenov/Magnific-Popup.git', # Magnific-Popup
'https://github.com/codemirror/CodeMirror.git', # CodeMirror
'http://gregpike.net/demos/bootstrap-file-input/bootstrap.file-input.js', # bootstrap fileinput
'https://github.com/lou/multi-select.git', # multiselect
'https://github.com/riklomas/quicksearch.git', # quicksearch
'https://gantry.googlecode.com/svn/trunk/root/js/jquery.url.min.js', # jquery url plugin
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
#maximum of public users for a single contest
MAX_PUBLIC_USER = 200
#public user username prefix
PUBLIC_USER_PREFIX = "TEAM"
PUBLIC_USER_DEFAULT_PASSWORD = "000"
| mit | -503,740,949,557,418,000 | 25.706587 | 99 | 0.703587 | false |
gotcha/testcounter | counterpartylib/lib/messages/dividend.py | 1 | 8391 | #! /usr/bin/python3
"""Pay out dividends."""
import struct
import decimal
D = decimal.Decimal
import logging
logger = logging.getLogger(__name__)
from counterpartylib.lib import (config, exceptions, util)
FORMAT_1 = '>QQ'
LENGTH_1 = 8 + 8
FORMAT_2 = '>QQQ'
LENGTH_2 = 8 + 8 + 8
ID = 50
def initialise (db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS dividends(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
asset TEXT,
dividend_asset TEXT,
quantity_per_unit INTEGER,
fee_paid INTEGER,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON dividends (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON dividends (source)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON dividends (asset)
''')
def validate (db, source, quantity_per_unit, asset, dividend_asset, block_index):
cursor = db.cursor()
problems = []
if asset == config.BTC:
problems.append('cannot pay dividends to holders of {}'.format(config.BTC))
if asset == config.XCP:
if (not block_index >= 317500) or block_index >= 320000 or config.TESTNET: # Protocol change.
problems.append('cannot pay dividends to holders of {}'.format(config.XCP))
if quantity_per_unit <= 0: problems.append('non‐positive quantity per unit')
# Examine asset.
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY tx_index ASC''', ('valid', asset)))
if not issuances:
problems.append('no such asset, {}.'.format(asset))
return None, None, problems, 0
divisible = issuances[0]['divisible']
# Only issuer can pay dividends.
if block_index >= 320000 or config.TESTNET: # Protocol change.
if issuances[-1]['issuer'] != source:
problems.append('only issuer can pay dividends')
# Examine dividend asset.
if dividend_asset in (config.BTC, config.XCP):
dividend_divisible = True
else:
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?)''', ('valid', dividend_asset)))
if not issuances:
problems.append('no such dividend asset, {}.'.format(dividend_asset))
return None, None, problems, 0
dividend_divisible = issuances[0]['divisible']
# Calculate dividend quantities.
holders = util.holders(db, asset)
outputs = []
addresses = []
dividend_total = 0
for holder in holders:
if block_index < 294500 and not config.TESTNET: # Protocol change.
if holder['escrow']: continue
address = holder['address']
address_quantity = holder['address_quantity']
if block_index >= 296000 or config.TESTNET: # Protocol change.
if address == source: continue
dividend_quantity = address_quantity * quantity_per_unit
if divisible: dividend_quantity /= config.UNIT
if not dividend_divisible: dividend_quantity /= config.UNIT
if dividend_asset == config.BTC and dividend_quantity < config.DEFAULT_MULTISIG_DUST_SIZE: continue # A bit hackish.
dividend_quantity = int(dividend_quantity)
outputs.append({'address': address, 'address_quantity': address_quantity, 'dividend_quantity': dividend_quantity})
addresses.append(address)
dividend_total += dividend_quantity
if not dividend_total: problems.append('zero dividend')
if dividend_asset != config.BTC:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, dividend_asset)))
if not balances or balances[0]['quantity'] < dividend_total:
problems.append('insufficient funds ({})'.format(dividend_asset))
fee = 0
if not problems and dividend_asset != config.BTC:
holder_count = len(set(addresses))
if block_index >= 330000 or config.TESTNET: # Protocol change.
fee = int(0.0002 * config.UNIT * holder_count)
if fee:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, config.XCP)))
if not balances or balances[0]['quantity'] < fee:
problems.append('insufficient funds ({})'.format(config.XCP))
cursor.close()
return dividend_total, outputs, problems, fee
def compose (db, source, quantity_per_unit, asset, dividend_asset):
dividend_total, outputs, problems, fee = validate(db, source, quantity_per_unit, asset, dividend_asset, util.CURRENT_BLOCK_INDEX)
if problems: raise exceptions.ComposeError(problems)
logger.info('Total quantity to be distributed in dividends: {} {}'.format(util.value_out(db, dividend_total, dividend_asset), dividend_asset))
if dividend_asset == config.BTC:
return (source, [(output['address'], output['dividend_quantity']) for output in outputs], None)
asset_id = util.get_asset_id(db, asset, util.CURRENT_BLOCK_INDEX)
dividend_asset_id = util.get_asset_id(db, dividend_asset, util.CURRENT_BLOCK_INDEX)
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT_2, quantity_per_unit, asset_id, dividend_asset_id)
return (source, [], data)
def parse (db, tx, message):
dividend_parse_cursor = db.cursor()
# Unpack message.
try:
if (tx['block_index'] > 288150 or config.TESTNET) and len(message) == LENGTH_2:
quantity_per_unit, asset_id, dividend_asset_id = struct.unpack(FORMAT_2, message)
asset = util.get_asset_name(db, asset_id, tx['block_index'])
dividend_asset = util.get_asset_name(db, dividend_asset_id, tx['block_index'])
status = 'valid'
elif len(message) == LENGTH_1:
quantity_per_unit, asset_id = struct.unpack(FORMAT_1, message)
asset = util.get_asset_name(db, asset_id, tx['block_index'])
dividend_asset = config.XCP
status = 'valid'
else:
raise exceptions.UnpackError
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
dividend_asset, quantity_per_unit, asset = None, None, None
status = 'invalid: could not unpack'
if dividend_asset == config.BTC:
status = 'invalid: cannot pay {} dividends within protocol'.format(config.BTC)
if status == 'valid':
# For SQLite3
quantity_per_unit = min(quantity_per_unit, config.MAX_INT)
dividend_total, outputs, problems, fee = validate(db, tx['source'], quantity_per_unit, asset, dividend_asset, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
# Debit.
util.debit(db, tx['source'], dividend_asset, dividend_total, action='dividend', event=tx['tx_hash'])
if tx['block_index'] >= 330000 or config.TESTNET: # Protocol change.
util.debit(db, tx['source'], config.XCP, fee, action='dividend fee', event=tx['tx_hash'])
# Credit.
for output in outputs:
util.credit(db, output['address'], dividend_asset, output['dividend_quantity'], action='dividend', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'asset': asset,
'dividend_asset': dividend_asset,
'quantity_per_unit': quantity_per_unit,
'fee_paid': fee,
'status': status,
}
sql='insert into dividends values(:tx_index, :tx_hash, :block_index, :source, :asset, :dividend_asset, :quantity_per_unit, :fee_paid, :status)'
dividend_parse_cursor.execute(sql, bindings)
dividend_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | 3,948,772,545,682,078,000 | 42.010256 | 148 | 0.619888 | false |
GammaC0de/pyload | src/pyload/plugins/decrypters/GoogledriveComFolder.py | 1 | 5023 | # -*- coding: utf-8 -*
import json
from pyload.core.network.http.exceptions import BadHeader
from ..base.decrypter import BaseDecrypter
class GoogledriveComFolder(BaseDecrypter):
__name__ = "GoogledriveComFolder"
__type__ = "decrypter"
__version__ = "0.12"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?drive\.google\.com/(?:folderview\?.*id=|drive/(?:.+?/)?folders/)(?P<ID>[-\w]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
(
"folder_per_package",
"Default;Yes;No",
"Create folder for each package",
"Default",
),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("dl_subfolders", "bool", "Download subfolders", False),
("package_subfolder", "bool", "Subfolder as a seperate package", False),
]
__description__ = """Drive.google.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
NAME_PATTERN = r"folderName: '(?P<N>.+?)'"
OFFLINE_PATTERN = r"<TITLE>"
API_URL = "https://www.googleapis.com/drive/v3/"
API_KEY = "AIzaSyAcA9c4evtwSY1ifuvzo6HKBkeot5Bk_U4"
def api_response(self, cmd, **kwargs):
kwargs["key"] = self.API_KEY
try:
json_data = json.loads(
self.load("{}{}".format(self.API_URL, cmd), get=kwargs)
)
self.log_debug(f"API response: {json_data}")
return json_data
except BadHeader as exc:
try:
json_data = json.loads(exc.content)
self.log_error(
"API Error: {}".format(cmd),
json_data["error"]["message"],
"ID: {}".format(self.info["pattern"]["ID"]),
"Error code: {}".format(exc.code),
)
except ValueError:
self.log_error(
"API Error: {}".format(cmd),
exc,
"ID: {}".format(self.info["pattern"]["ID"]),
"Error code: {}".format(exc.code),
)
return None
def enum_folder(self, folder_id):
links = []
json_data = self.api_response(
"files",
q="'{}' in parents".format(folder_id),
pageSize=100,
fields="files/id,files/mimeType,nextPageToken",
)
if json_data is None:
self.fail("API error")
if "error" in json_data:
self.fail(json_data["error"]["message"])
for f in json_data.get("files", []):
if f["mimeType"] != "application/vnd.google-apps.folder":
links.append("https://drive.google.com/file/d/" + f["id"])
elif self.config.get("dl_subfolders"):
if self.config.get("package_subfolder"):
links.append("https://drive.google.com/drive/folders/" + f["id"])
else:
links.extend(self.enum_folder(f["id"]))
next_page = json_data.get("nextPageToken", None)
while next_page:
json_data = self.api_response(
"files",
q="'{}' in parents".format(folder_id),
pageToken=next_page,
pageSize=100,
fields="files/id,files/mimeType,nextPageToken",
)
if json_data is None:
self.fail("API error")
if "error" in json_data:
self.fail(json_data["error"]["message"])
for f in json_data.get("files", []):
if f["mimeType"] != "application/vnd.google-apps.folder":
links.append("https://drive.google.com/file/d/" + f["id"])
elif self.config.get("dl_subfolders"):
if self.config.get("package_subfolder"):
links.append(
"https://drive.google.com/drive/folders/" + f["id"]
)
else:
links.extend(self.enum_folder(f["id"]))
next_page = json_data.get("nextPageToken", None)
return links
def decrypt(self, pyfile):
links = []
json_data = self.api_response("files/{}".format(self.info["pattern"]["ID"]))
if json_data is None:
self.fail("API error")
if "error" in json_data:
if json_data["error"]["code"] == 404:
self.offline()
else:
self.fail(json_data["error"]["message"])
pack_name = json_data.get("name", pyfile.package().name)
links = self.enum_folder(self.info["pattern"]["ID"])
if links:
self.packages = [(pack_name, links, pack_name)]
| agpl-3.0 | -5,063,890,733,911,918,000 | 32.046053 | 118 | 0.495122 | false |
iw3hxn/LibrERP | project_extended/__openerp__.py | 1 | 1877 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2016 Didotech SRL
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Extended',
'version': '3.17.28.32',
'category': 'Generic Modules/Projects & Services',
'description': """Tasks list on a dedicated tab on the project form
""",
'author': 'Didotech SRL',
'website': 'http://www.didotech.com',
'license': 'AGPL-3',
"depends": [
'account',
'project',
'sale',
'project_timesheet',
'task_time_control',
'res_users_helper_functions',
'core_extended',
'res_users_kanban',
'mrp',
'hr_timesheet_invoice',
],
"data": [
'security/security.xml',
'security/ir.model.access.csv',
'views/account_analytic_line.xml',
'views/project.xml',
'views/project_task.xml',
'views/project_view_menu.xml',
'views/res_partner.xml',
'views/project_task_work.xml',
'views/account_analytic_account.xml',
],
"active": False,
"installable": True
}
| agpl-3.0 | 9,003,898,129,431,591,000 | 33.759259 | 80 | 0.566329 | false |
mpeyrotc/govector | condominios/forms.py | 1 | 2051 | # coding=utf-8
from django import forms
import pyodbc
class LoginForm(forms.Form):
username = forms.CharField(label="Nombre de Usuario", max_length=50)
password = forms.CharField(label="Contraseña", widget=forms.PasswordInput)
widgets = {
"password" : forms.PasswordInput(),
}
def clean(self):
cleaned_data = super(LoginForm, self).clean()
username = cleaned_data.get("username")
password = cleaned_data.get("password")
try:
server = 'CINTERMEX2003'
database = 'Condominos'
usernameDB = 'sa'
passwordDB = 'sis2333'
driver = '{ODBC Driver 13 for SQL Server}'
cnxn = pyodbc.connect(
'DRIVER=' + driver + ';PORT=61451;SERVER=' + server + ';PORT=1443;DATABASE=' + database + ';UID=' + usernameDB + ';PWD=' + passwordDB)
cur = cnxn.cursor()
querystring = "SELECT Username, Password, RFC FROM Usuarios WHERE Username = '{username}' AND Password = '{password}'".format(username=username, password=password)
cur.execute(querystring)
nombreusuario = cur.fetchall()
cur.close()
if nombreusuario[0][0] == username and nombreusuario[0][1] == password:
return cleaned_data
else:
raise forms.ValidationError("Usuario o contraseña invalidos")
except:
raise forms.ValidationError("El nombre de usuario no existe")
return cleaned_data
def clean_username(self):
username = self.cleaned_data.get("username")
if not username:
raise forms.ValidationError("Proporcione un nombre de usuario")
return username
def clean_password(self):
password = self.cleaned_data.get("password")
if not password:
raise forms.ValidationError("Proporcione una contraseña")
return password | mit | -3,781,641,314,589,348,400 | 33.15 | 175 | 0.571777 | false |
kartvep/Combaine | combaine/plugins/DistributedStorage/MongoReplicaSet.py | 1 | 2716 | from __AbstractStorage import AbstractDistributedStorage
import pymongo
import hashlib
import time
class MongoReplicaSet(AbstractDistributedStorage):
def __init__(self, **config):
self.hosts = config['hosts']
self.rs = None
self.db = None
self.coll_name = None
self.cache_key_list = list()
def connect(self, namespace):
try:
self.rs = pymongo.Connection(self.hosts, fsync=True)
db, collection = namespace.split('/')
self.coll_name = collection
self.db = self.rs[db]
if collection in self.db.collection_names():
if not self.db[collection].options().get("capped"):
self.db.drop_collection(collection)
self.db.create_collection(collection, capped=True, size=500000000, max=2000)
else:
self.db.create_collection(collection, capped=True, size=500000000, max=2000)
self.db_cursor = self.db[collection]
self.db_cursor.ensure_index("_id")
except Exception, err:
print str(err)
return False
else:
return True
def close(self):
try:
self.rs.close()
except Exception, err:
print err
return False
else:
return True
def insert(self, key, data):
try:
print key
_id = hashlib.md5(key).hexdigest()
print data
value = {"_id" : _id, "key" : key, "value" : data, "time" : int(time.time()) }
#print self.db_cursor.insert(value, continue_on_error=True, w=0, manipulate=False)
print self.db_cursor.save(value, continue_on_error=True, w=1, manipulate=False)
except Exception, err:
print str(err)
return False
else:
return True
def read(self, key, cache=False):
try:
_id = hashlib.md5(key).hexdigest()
ret = self.db_cursor.find_one({"_id" : _id }, fields={"key" : False, "_id" : False, "time" : False})
if ret is not None:
if cache:
self.cache_key_list.append(key)
return ret["value"]
else:
return []
except Exception as err:
print str(err)
return []
def remove(self, key):
try:
return "OK" #for capped
_id = hashlib.md5(key).hexdigest()
return str(self.db_cursor.remove(_id, w=1))
except Exception as err:
print str(err)
return False
else:
return True
PLUGIN_CLASS = MongoReplicaSet
| lgpl-3.0 | 6,870,602,319,103,538,000 | 31.333333 | 112 | 0.529823 | false |
claymation/django-zendesk | djzendesk/tests.py | 1 | 3695 | import base64
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
def http_basic_auth_string(username, password):
credentials = base64.encodestring('%s:%s' % (username, password)).strip()
auth_string = 'Basic %s' % credentials
return auth_string
@mock.patch.object(settings, 'ZENDESK_CALLBACK_USERNAME', 'foo')
@mock.patch.object(settings, 'ZENDESK_CALLBACK_PASSWORD', 'bar')
@mock.patch('djzendesk.views.target_callback_received')
class DjangoZendeskTestCase(TestCase):
def test_view_requires_post(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
# Test GET
response = self.client.get(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test PUT
response = self.client.put(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test DELETE
response = self.client.delete(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test HEAD
response = self.client.head(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 405)
# Test POST
response = self.client.post(url, {'message': 'Hello, world!'})
self.assertNotEqual(response.status_code, 405)
def test_view_requires_authentication(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
# Test no credentials
response = self.client.post(url, {'message': 'Hello, world!'})
self.assertEqual(response.status_code, 403)
# Test wrong credentials
auth_string = http_basic_auth_string(username='foo', password='bad')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertEqual(response.status_code, 403)
# Test correct credentials
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertNotEqual(response.status_code, 403)
def test_view_requires_message(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
# Test without message
response = self.client.post(url, {'blah': 'blah'}, HTTP_AUTHORIZATION=auth_string)
self.assertEqual(response.status_code, 400)
# Test with message
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertNotEqual(response.status_code, 400)
def test_view_ok(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
self.assertContains(response, 'OK')
def test_view_sends_signal(self, mock_target_callback_received):
url = reverse('zendesk:callback', kwargs={'ticket_id': '123'})
auth_string = http_basic_auth_string(username='foo', password='bar')
response = self.client.post(url, {'message': 'Hello, world!'}, HTTP_AUTHORIZATION=auth_string)
mock_target_callback_received.send.assert_called_once_with(
sender=None,
ticket_id='123',
message='Hello, world!',
)
| bsd-3-clause | -4,604,251,067,905,810,000 | 42.470588 | 102 | 0.658457 | false |
taigaio/taiga-back | taiga/users/migrations/0001_initial.py | 1 | 3199 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import re
import django.core.validators
import taiga.users.models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0001_initial"),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=30, help_text='Required. 30 characters or fewer. Letters, numbers and /./-/_ characters', verbose_name='username', unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.-]+$', 32), 'Enter a valid username.', 'invalid')])),
('email', models.EmailField(max_length=75, blank=True, verbose_name='email address')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('full_name', models.CharField(max_length=256, blank=True, verbose_name='full name')),
('color', models.CharField(default=taiga.users.models.generate_random_hex_color, max_length=9, blank=True, verbose_name='color')),
('bio', models.TextField(default='', blank=True, verbose_name='biography')),
('photo', models.FileField(null=True, max_length=500, blank=True, verbose_name='photo', upload_to='users/photo')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('default_language', models.CharField(default='', max_length=20, blank=True, verbose_name='default language')),
('default_timezone', models.CharField(default='', max_length=20, blank=True, verbose_name='default timezone')),
('colorize_tags', models.BooleanField(default=False, verbose_name='colorize tags')),
('token', models.CharField(default=None, max_length=200, blank=True, verbose_name='token', null=True)),
('email_token', models.CharField(default=None, max_length=200, blank=True, verbose_name='email token', null=True)),
('new_email', models.EmailField(null=True, max_length=75, blank=True, verbose_name='new email address')),
('github_id', models.IntegerField(null=True, blank=True, verbose_name='github ID')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'ordering': ['username'],
},
bases=(models.Model,),
),
]
| agpl-3.0 | 7,655,461,915,151,665,000 | 65.645833 | 306 | 0.632385 | false |
tiffanyj41/hermes | src/utils/lastfm_etl/lastfm.py | 3 | 8114 | #!/usr/bin/env python
"""Translate the Last.fm data files to JSON.
This script takes the various Last.fm data files and write them out as
JSON. It removes the Last.fm artist URLs.
Attributes:
ARTISTS (dict): A dictionary that stores information about the artists. The
variables are as follows:
- artist_id (int): A unique identifier for each artist.
- name (str): The name of the artist.
FRIENDS (dict): A dictionary that stores information about the friends
graph. The variables are as follows:
- user_id (int): A unique identifier for each user.
- friend_user_id (int): A unique identifier of a user on the
friends list.
TAGS (dict): A dictionary that stores information about the tags. The
variables are as follows:
- tag_id (int): A unique identifier for each tag.
- name (int): The name of the tag.
PLAYS (dict): A dictionary that stores information about the number of
plays by each user. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- plays (int): The number of plays by the user of the artist.
APPLIED_TAGS (dict): A dictionary that stores information about the tags
various users applied to various artists. The variables are as follows:
- user_id (int): A unique identifier for each user.
- artist_id (int): A unique identifier for each artist.
- tag_id (int): A unique identifier for each tag.
- day (int): The day the tag was added.
- month (int): The month the tag was added.
- year (int): The year the tag was added.
"""
from copy import deepcopy
import json
import csv
# JSON objects
ARTISTS = {
"artist_id": None,
"name": None,
}
FRIENDS = {
"user_id": None,
"friend_user_id": None,
}
TAGS = {
"tag_id": None,
"name": None,
}
PLAYS = {
"user_id": None,
"artist_id": None,
"plays": None,
}
APPLIED_TAGS = {
"user_id": None,
"artist_id": None,
"tag_id": None,
"day": None,
"month": None,
"year": None,
}
def convert_str(string):
"""Convert a string from 'iso-8859-1' to 'utf8'."""
return string.decode('iso-8859-1').encode('utf8')
def iter_lines(open_file):
"""Open the Last.fm CSVs and return an iterator over the lines.
Args:
open_file: A file handle object from open().
Retunrs:
iterator: An iterator over each line in the file. Each line is a list,
with string elements for each column value.
"""
reader = csv.reader(
open_file,
delimiter='\t',
)
next(reader) # Skip the header
return reader
def parse_artist_line(line):
"""Parse a line from the Artist CSV file.
A line is a list of strings as follows:
line = [
artist_id,
name,
band_url,
band_photo_url,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "artist_id" and
"name".
"""
(artist_id, name, _, _) = line
current_artist = deepcopy(ARTISTS)
current_artist["artist_id"] = int(artist_id)
current_artist["name"] = name
return current_artist
def parse_friends_line(line):
"""Parse a line from the Friends CSV file.
A line is a list of strings as follows:
line = [
user_id,
user_id_of_friend,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id" and "friend_user_id".
"""
(user_id, friend_id) = line
current_friend = deepcopy(FRIENDS)
current_friend["user_id"] = int(user_id)
current_friend["friend_user_id"] = int(friend_id)
return current_friend
def parse_tag_line(line):
"""Parse a line from the Tag CSV file.
A line is a list of strings as follows:
line = [
tag_id,
tag,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "tag_id" and "tag".
"""
(tag_id, tag) = line
current_tag = deepcopy(TAGS)
current_tag["tag_id"] = int(tag_id)
current_tag["name"] = convert_str(tag)
return current_tag
def parse_applied_tag_line(line):
"""Parse a line from the Applied Tags CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
tag_id,
day,
month,
year,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id",
"tag_id", "day", "month", and "year".
"""
(user_id, artist_id, tag_id, day, month, year) = line
current_tag = deepcopy(APPLIED_TAGS)
current_tag["user_id"] = int(user_id)
current_tag["artist_id"] = int(artist_id)
current_tag["tag_id"] = int(tag_id)
current_tag["day"] = int(day)
current_tag["month"] = int(month)
current_tag["year"] = int(year)
return current_tag
def parse_plays_line(line):
"""Parse a line from the Played Artists CSV file.
A line is a list of strings as follows:
line = [
user_id,
artist_id,
play_count,
]
Args:
lines (list): A list of strings as described above.
Returns:
dict: A dictionary containing the keys "user_id", "artist_id", and
"plays".
"""
(user_id, artist_id, plays) = line
current_plays = deepcopy(PLAYS)
current_plays["user_id"] = int(user_id)
current_plays["artist_id"] = int(artist_id)
current_plays["plays"] = int(plays)
return current_plays
if __name__ == "__main__":
import argparse
# Set up command line flag handling
parser = argparse.ArgumentParser(
description="Transform the Last.FM datasets to JSON",
)
parser.add_argument(
'artists',
type=str,
help="the file containing the artists, normally 'artists.dat'",
)
parser.add_argument(
'tags',
type=str,
help="the file containing the tags, normally 'tags.dat'",
)
parser.add_argument(
'friends',
type=str,
help="the file containing the friends graph, normally 'user_friends.dat'",
)
parser.add_argument(
'applied_tags',
type=str,
help="the file containing the applied tags, normally 'user_taggedartists.dat'",
)
parser.add_argument(
'plays',
type=str,
help="the file containing the play counts, normally 'user_artists.dat'",
)
parser.add_argument(
'-o',
'--output-directory',
type=str,
action="store",
help="the directory to save the output JSON files, by default the current directory",
default="./",
)
args = parser.parse_args()
# Parse the files
processing_queue = (
(args.artists, args.output_directory + "/lastfm_artists.json", parse_artist_line),
(args.tags, args.output_directory + "/lastfm_tags.json", parse_tag_line),
(args.friends, args.output_directory + "/lastfm_friends.json", parse_friends_line),
(args.applied_tags, args.output_directory + "/lastfm_applied_tags.json", parse_applied_tag_line),
(args.plays, args.output_directory + "/lastfm_plays.json", parse_plays_line),
)
for input_file, output_file, function in processing_queue:
with open(input_file, 'rb') as csv_file, open(output_file, 'w') as json_file:
for row in iter_lines(csv_file):
json_file.write(json.dumps(function(row)) + '\n')
| apache-2.0 | 1,649,158,540,482,051,300 | 26.137124 | 105 | 0.579123 | false |
manyunkai/tweixin | weixin/docking/utils/menu.py | 1 | 8631 | # -*-coding:utf-8 -*-
"""
Created on 11/26/2015
@author: Danny<[email protected]>
DannyWork Project.
"""
import json
import tornado.gen
from .request import request_async
from .token import get_access_token
from exception.request import GetSelfMenuFailed
from docking.models.material import Material, NewsMessageItem, NewsMessage, NewsMessageItemsMapping, MaterialNewsMapping
from docking.models.menu import Menu
from docking.models.event import EventRule
from docking.utils.generic import generate_random_key
SELFMENU_INFO_URL = 'https://api.weixin.qq.com/cgi-bin/get_current_selfmenu_info'
@tornado.gen.coroutine
def set_menu_with_materials(account, agent, buttons, parent=0):
for data in buttons:
btype = data.get('type')
menu_params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'parent_id': parent,
'name': data['name'],
}
if btype:
if btype == 'view':
menu_params.update({
'type': 'view',
'url': data['url']
})
elif btype in ['click', 'scancode_push', 'scancode_waitmsg', 'pic_sysphoto',
'pic_photo_or_album', 'pic_weixin', 'location_select']:
menu_params.update({
'type': btype,
'key': data['key'][:16]
})
elif btype in ['media_id', 'view_limited']:
menu_params.update({
'type': btype,
'media_id': data['media_id']
})
elif btype == 'text':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'T',
'description': data['value']
}
material = yield Material(alias='文本消息', title='文本消息', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'img':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'P',
'media_id': data['value']
}
material = yield Material(alias='远程图片', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'voice':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'V',
'media_id': data['value']
}
material = yield Material(alias='远程语音', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'video':
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'F',
'fltype': 'R',
'file': data['value']
}
material = yield Material(alias='远程视频', **params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
elif btype == 'news':
news = yield NewsMessage(account_id=account.id,
account_agent_id=agent.id if agent else 0,
alias='图文消息').get_or_create()
ordering = 1
for item in data['news_info']['list']:
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'title': item['title'][:16],
'description': item['digest'],
'pltype': 'R',
'pic_large': item['cover_url'],
'pic_small': item['cover_url'],
'url': item['content_url']
}
item = yield NewsMessageItem(**params).get_or_create()
yield NewsMessageItemsMapping(news_id=news.id, item_id=item.id, ordering=ordering).get_or_create(news_id=news.id, item_id=item.id)
ordering += 1
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'type': 'N',
}
material = yield Material(alias='图文消息', **params).get_or_create(**params)
params = {
'material_id': material.id,
'news_id': news.id,
}
yield MaterialNewsMapping(**params).get_or_create(**params)
key = generate_random_key()
params = {
'account_id': account.id,
'account_agent_id': agent.id if agent else 0,
'key': key,
'material_id': material.id
}
yield EventRule(**params).get_or_create(**params)
menu_params.update({
'type': 'click',
'key': key
})
print('menu_params:', menu_params)
yield Menu(**menu_params).get_or_create(**menu_params)
else:
menu_params['type'] = 'parent'
menu = yield Menu(**menu_params).get_or_create(**menu_params)
yield set_menu_with_materials(account, agent, data['sub_button']['list'], parent=menu.id)
@tornado.gen.coroutine
def init_selfmenu_info(account, agent=None):
"""
Pull the current selfmenu.
Please note that this method will cover the current menus.
:param account: docking.models.Account instance.
:param agent: docking.models.Agent instance.
:return: True if succeed.
"""
access_token = yield get_access_token(account)
params = {
'access_token': access_token
}
response = yield request_async(SELFMENU_INFO_URL, params)
try:
data = json.loads(response.decode('utf8'))
except AttributeError:
raise GetSelfMenuFailed('Error in decoding response data.')
except (TypeError, ValueError) as e:
raise GetSelfMenuFailed('Error in parsing response data: {0}.'.format(e))
# Clear existed menus.
query = {
'account_id': account.id
}
if agent:
query['account_agent_id'] = agent.id
yield Menu(**query).delete()
# Parse the new menus.
try:
yield set_menu_with_materials(account, agent, data['selfmenu_info']['button'])
except Exception as e:
raise GetSelfMenuFailed('Error in parsing response data: {0}.'.format(str(e)))
| gpl-2.0 | 6,789,176,383,028,225,000 | 35.645299 | 150 | 0.464723 | false |
Hobsons/hippo | data_sources/redis_queue.py | 1 | 1363 | import redis
from data_sources.hippo_base import HippoDataSource
class RedisQueue(HippoDataSource):
namespace = 'redis'
label = 'Redis Queue'
inputs = {
'host': {'input':'text','label':'Redis Host'},
'port': {'input':'number','label':'Redis Port','default':6379},
'db' : {'input':'number','label':'Redis DB','default':0},
'name': {'input':'text','label':'Redis Queue Key Name'}
}
def __init__(self, *args):
super().__init__(*args, namespace=RedisQueue.namespace, inputs=RedisQueue.inputs)
def process(self):
if not self.name or not self.host:
return
redis_client = redis.StrictRedis(host=self.host, port=int(self.port), db=int(self.db))
count = 0
list_name = self.name
limbo_list_name = 'hippo:queue:' + list_name + '_limbo'
limbo_items = redis_client.lrange(limbo_list_name,0,-1)
if limbo_items:
count = len(limbo_items)
self.create_tasks(limbo_items)
items = []
while count < self.new_task_limit:
i = redis_client.rpoplpush(list_name,limbo_list_name)
if i:
items.append(i)
count += 1
else:
break
if items:
self.create_tasks(items)
redis_client.delete(limbo_list_name) | apache-2.0 | 3,635,951,184,378,449,400 | 30 | 94 | 0.557594 | false |
acrazing/dbapi | dbapi/config.py | 1 | 1038 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017 The Authors. All Rights Reserved.
# License: MIT.
# Author: acrazing <[email protected]>.
# File: config.
"""
默认配置
"""
import logging
import os
import sys
api_config = {
'persist_file': os.path.join(os.path.expanduser("~"), ".__cache__dbapi.json"),
'headers': {
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh,zh-CN;q=0.8,zh-TW;q=0.6,en;q=0.4,en-US;q=0.2',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/538.36 (KHTML, like Gecko) '
'Chrome/57.0.3029.110 Safari/538.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'https://www.douban.com/people/junbaoyang/',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
},
'logger': 'dbapi',
'log_level': logging.DEBUG,
'log_destination': sys.stderr,
'timeout': 5.0,
}
| mit | -682,045,576,200,020,600 | 31.1875 | 111 | 0.596117 | false |
zak-k/iris | lib/iris/fileformats/pp.py | 1 | 89450 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides UK Met Office Post Process (PP) format specific capabilities.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import abc
import collections
from copy import deepcopy
import itertools
import operator
import os
import re
import struct
import warnings
import biggus
import cf_units
import numpy as np
import numpy.ma as ma
import netcdftime
from iris._deprecation import warn_deprecated
import iris.config
import iris.fileformats.rules
import iris.fileformats.pp_rules
import iris.coord_systems
try:
import mo_pack
except ImportError:
mo_pack = None
try:
from iris.fileformats import _old_pp_packing as pp_packing
except ImportError:
pp_packing = None
__all__ = ['load', 'save', 'load_cubes', 'PPField',
'reset_load_rules', 'add_save_rules',
'as_fields', 'load_pairs_from_fields', 'as_pairs',
'save_pairs_from_cube', 'reset_save_rules',
'save_fields', 'STASH', 'EARTH_RADIUS']
EARTH_RADIUS = 6371229.0
# Cube->PP rules are loaded on first use
_save_rules = None
PP_HEADER_DEPTH = 256
PP_WORD_DEPTH = 4
NUM_LONG_HEADERS = 45
NUM_FLOAT_HEADERS = 19
# The header definition for header release 2.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 2 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_2 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbday', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbdayd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# The header definition for header release 3.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 3 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_3 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbsec', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbsecd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# A map from header-release-number to header definition
UM_HEADERS = {2: UM_HEADER_2, 3: UM_HEADER_3}
# Offset value to convert from UM_HEADER positions to PP_HEADER offsets.
UM_TO_PP_HEADER_OFFSET = 1
#: A dictionary mapping IB values to their names.
EXTRA_DATA = {
1: 'x',
2: 'y',
3: 'lower_y_domain',
4: 'lower_x_domain',
5: 'upper_y_domain',
6: 'upper_x_domain',
7: 'lower_z_domain',
8: 'upper_z_domain',
10: 'field_title',
11: 'domain_title',
12: 'x_lower_bound',
13: 'x_upper_bound',
14: 'y_lower_bound',
15: 'y_upper_bound',
}
#: Maps lbuser[0] to numpy data type. "default" will be interpreted if
#: no match is found, providing a warning in such a case.
LBUSER_DTYPE_LOOKUP = {1: np.dtype('>f4'),
2: np.dtype('>i4'),
3: np.dtype('>i4'),
-1: np.dtype('>f4'),
-2: np.dtype('>i4'),
-3: np.dtype('>i4'),
'default': np.dtype('>f4'),
}
# LBPROC codes and their English equivalents
LBPROC_PAIRS = ((1, "Difference from another experiment"),
(2, "Difference from zonal (or other spatial) mean"),
(4, "Difference from time mean"),
(8, "X-derivative (d/dx)"),
(16, "Y-derivative (d/dy)"),
(32, "Time derivative (d/dt)"),
(64, "Zonal mean field"),
(128, "Time mean field"),
(256, "Product of two fields"),
(512, "Square root of a field"),
(1024, "Difference between fields at levels BLEV and BRLEV"),
(2048, "Mean over layer between levels BLEV and BRLEV"),
(4096, "Minimum value of field during time period"),
(8192, "Maximum value of field during time period"),
(16384, "Magnitude of a vector, not specifically wind speed"),
(32768, "Log10 of a field"),
(65536, "Variance of a field"),
(131072, "Mean over an ensemble of parallel runs"))
# lbproc_map is dict mapping lbproc->English and English->lbproc
# essentially a one to one mapping
lbproc_map = {x: y for x, y in
itertools.chain(LBPROC_PAIRS, ((y, x) for x, y in LBPROC_PAIRS))}
class STASH(collections.namedtuple('STASH', 'model section item')):
"""
A class to hold a single STASH code.
Create instances using:
>>> model = 1
>>> section = 2
>>> item = 3
>>> my_stash = iris.fileformats.pp.STASH(model, section, item)
Access the sub-components via:
>>> my_stash.model
1
>>> my_stash.section
2
>>> my_stash.item
3
String conversion results in the MSI format:
>>> print(iris.fileformats.pp.STASH(1, 16, 203))
m01s16i203
"""
__slots__ = ()
def __new__(cls, model, section, item):
"""
Args:
* model
A positive integer less than 100, or None.
* section
A non-negative integer less than 100, or None.
* item
A positive integer less than 1000, or None.
"""
model = cls._validate_member('model', model, 1, 99)
section = cls._validate_member('section', section, 0, 99)
item = cls._validate_member('item', item, 1, 999)
return super(STASH, cls).__new__(cls, model, section, item)
@staticmethod
def from_msi(msi):
"""Convert a STASH code MSI string to a STASH instance."""
if not isinstance(msi, six.string_types):
raise TypeError('Expected STASH code MSI string, got %r' % (msi,))
msi_match = re.match('^\s*m(.*)s(.*)i(.*)\s*$', msi, re.IGNORECASE)
if msi_match is None:
raise ValueError('Expected STASH code MSI string "mXXsXXiXXX", '
'got %r' % (msi,))
return STASH(*msi_match.groups())
@staticmethod
def _validate_member(name, value, lower_limit, upper_limit):
# Returns a valid integer or None.
try:
value = int(value)
if not lower_limit <= value <= upper_limit:
value = None
except (TypeError, ValueError):
value = None
return value
def __str__(self):
model = self._format_member(self.model, 2)
section = self._format_member(self.section, 2)
item = self._format_member(self.item, 3)
return 'm{}s{}i{}'.format(model, section, item)
def _format_member(self, value, num_digits):
if value is None:
result = '?' * num_digits
else:
format_spec = '0' + str(num_digits)
result = format(value, format_spec)
return result
def lbuser3(self):
"""Return the lbuser[3] value that this stash represents."""
return (self.section or 0) * 1000 + (self.item or 0)
def lbuser6(self):
"""Return the lbuser[6] value that this stash represents."""
return self.model or 0
@property
def is_valid(self):
return '?' not in str(self)
def __hash__(self):
return super(STASH, self).__hash__()
def __eq__(self, other):
if isinstance(other, six.string_types):
return super(STASH, self).__eq__(STASH.from_msi(other))
else:
return super(STASH, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
class SplittableInt(object):
"""
A class to hold integers which can easily get each decimal digit
individually.
>>> three_six_two = SplittableInt(362)
>>> print(three_six_two)
362
>>> print(three_six_two[0])
2
>>> print(three_six_two[2])
3
.. note:: No support for negative numbers
"""
def __init__(self, value, name_mapping_dict=None):
"""
Build a SplittableInt given the positive integer value provided.
Kwargs:
* name_mapping_dict - (dict)
A special mapping to provide name based access to specific integer
positions:
>>> a = SplittableInt(1234, {'hundreds': 2})
>>> print(a.hundreds)
2
>>> a.hundreds = 9
>>> print(a.hundreds)
9
>>> print(a)
1934
"""
if value < 0:
raise ValueError('Negative numbers not supported with splittable'
' integers object')
# define the name lookup first (as this is the way __setattr__ is
# plumbed)
#: A dictionary mapping special attribute names on this object
#: to the slices/indices required to access them.
self._name_lookup = name_mapping_dict or {}
self._value = value
self._calculate_str_value_from_value()
def __int__(self):
return int(self._value)
def _calculate_str_value_from_value(self):
# Reverse the string to get the appropriate index when getting the
# sliced value
self._strvalue = [int(c) for c in str(self._value)[::-1]]
# Associate the names in the lookup table to attributes
for name, index in self._name_lookup.items():
object.__setattr__(self, name, self[index])
def _calculate_value_from_str_value(self):
self._value = np.sum([10**i * val for
i, val in enumerate(self._strvalue)])
def __len__(self):
return len(self._strvalue)
def __getitem__(self, key):
try:
val = self._strvalue[key]
except IndexError:
val = 0
# if the key returns a list of values, then combine them together
# to an integer
if isinstance(val, list):
val = sum([10**i * val for i, val in enumerate(val)])
return val
def __setitem__(self, key, value):
# The setitem method has been overridden so that assignment using
# ``val[0] = 1`` style syntax updates
# the entire object appropriately.
if (not isinstance(value, int) or value < 0):
raise ValueError('Can only set %s as a positive integer value.'
% key)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice objects'
' containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# get indices for as many digits as have been requested. Putting
# the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
raise ValueError('Cannot put %s into %s as it has too many'
' digits.' % (value, key))
# Iterate over each of the indices in the slice,
# zipping them together with the associated digit
for index, digit in zip(indices,
str(value).zfill(current_length)[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
# If we are trying to set to an index which does not currently
# exist in _strvalue then extend it to the
# appropriate length
if (key + 1) > len(self):
new_str_value = [0] * (key + 1)
new_str_value[:len(self)] = self._strvalue
self._strvalue = new_str_value
self._strvalue[key] = value
for name, index in self._name_lookup.items():
if index == key:
object.__setattr__(self, name, value)
self._calculate_value_from_str_value()
def __setattr__(self, name, value):
# if the attribute is a special value, update the index value which
# will in turn update the attribute value
if name != '_name_lookup' and name in self._name_lookup:
self[self._name_lookup[name]] = value
else:
object.__setattr__(self, name, value)
def __str__(self):
return str(self._value)
def __repr__(self):
return 'SplittableInt(%r, name_mapping_dict=%r)' % (self._value,
self._name_lookup)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, SplittableInt):
result = self._value == other._value
elif isinstance(other, int):
result = self._value == other
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _compare(self, other, op):
result = NotImplemented
if isinstance(other, SplittableInt):
result = op(self._value, other._value)
elif isinstance(other, int):
result = op(self._value, other)
return result
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ge__(self, other):
return self._compare(other, operator.ge)
class BitwiseInt(SplittableInt):
"""
A class to hold an integer, of fixed bit-length, which can easily get/set
each bit individually.
.. deprecated:: 1.8
Please use `int` instead.
.. note::
Uses a fixed number of bits.
Will raise an Error when attempting to access an out-of-range flag.
>>> a = BitwiseInt(511)
>>> a.flag1
1
>>> a.flag8
1
>>> a.flag128
1
>>> a.flag256
1
>>> a.flag512
AttributeError: 'BitwiseInt' object has no attribute 'flag512'
>>> a.flag512 = 1
AttributeError: Cannot set a flag that does not exist: flag512
"""
def __init__(self, value, num_bits=None):
# intentionally empty docstring as all covered in the class docstring.
""" """
warn_deprecated('BitwiseInt is deprecated - please use `int` instead.')
SplittableInt.__init__(self, value)
self.flags = ()
# do we need to calculate the number of bits based on the given value?
self._num_bits = num_bits
if self._num_bits is None:
self._num_bits = 0
while((value >> self._num_bits) > 0):
self._num_bits += 1
else:
# make sure the number of bits is enough to store the given value.
if (value >> self._num_bits) > 0:
raise ValueError("Not enough bits to store value")
self._set_flags_from_value()
def _set_flags_from_value(self):
all_flags = []
# Set attributes "flag[n]" to 0 or 1
for i in range(self._num_bits):
flag_name = 1 << i
flag_value = ((self._value >> i) & 1)
object.__setattr__(self, 'flag%d' % flag_name, flag_value)
# Add to list off all flags
if flag_value:
all_flags.append(flag_name)
self.flags = tuple(all_flags)
def _set_value_from_flags(self):
self._value = 0
for i in range(self._num_bits):
bit_value = pow(2, i)
flag_name = "flag%i" % bit_value
flag_value = object.__getattribute__(self, flag_name)
self._value += flag_value * bit_value
def __iand__(self, value):
"""Perform an &= operation."""
self._value &= value
self._set_flags_from_value()
return self
def __ior__(self, value):
"""Perform an |= operation."""
self._value |= value
self._set_flags_from_value()
return self
def __iadd__(self, value):
"""Perform an inplace add operation"""
self._value += value
self._set_flags_from_value()
return self
def __setattr__(self, name, value):
# Allow setting of the attribute flags
# Are we setting a flag?
if name.startswith("flag") and name != "flags":
# true and false become 1 and 0
if not isinstance(value, bool):
raise TypeError("Can only set bits to True or False")
# Setting an existing flag?
if hasattr(self, name):
# which flag?
flag_value = int(name[4:])
# on or off?
if value:
self |= flag_value
else:
self &= ~flag_value
# Fail if an attempt has been made to set a flag that does not
# exist
else:
raise AttributeError("Cannot set a flag that does not"
" exist: %s" % name)
# If we're not setting a flag, then continue as normal
else:
SplittableInt.__setattr__(self, name, value)
def _make_flag_getter(value):
def getter(self):
warn_deprecated('The `flag` attributes are deprecated - please use '
'integer bitwise operators instead.')
return int(bool(self._value & value))
return getter
def _make_flag_setter(value):
def setter(self, flag):
warn_deprecated('The `flag` attributes are deprecated - please use '
'integer bitwise operators instead.')
if not isinstance(flag, bool):
raise TypeError('Can only set bits to True or False')
if flag:
self._value |= value
else:
self._value &= ~value
return setter
class _FlagMetaclass(type):
NUM_BITS = 18
def __new__(cls, classname, bases, class_dict):
for i in range(cls.NUM_BITS):
value = 2 ** i
name = 'flag{}'.format(value)
class_dict[name] = property(_make_flag_getter(value),
_make_flag_setter(value))
class_dict['NUM_BITS'] = cls.NUM_BITS
return type.__new__(cls, classname, bases, class_dict)
class _LBProc(six.with_metaclass(_FlagMetaclass, BitwiseInt)):
# Use a metaclass to define the `flag1`, `flag2`, `flag4, etc.
# properties.
def __init__(self, value):
"""
Args:
* value (int):
The initial value which will determine the flags.
"""
value = int(value)
if value < 0:
raise ValueError('Negative numbers not supported with '
'splittable integers object')
self._value = value
def __len__(self):
"""
Base ten length.
.. deprecated:: 1.8
The value of a BitwiseInt only makes sense in base-two.
"""
warn_deprecated('Length is deprecated')
return len(str(self._value))
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __getitem__(self, key):
"""
Base ten indexing support.
.. deprecated:: 1.8
The value of an _LBProc only makes sense in base-two.
"""
warn_deprecated('Indexing is deprecated')
try:
value = int('0' + str(self._value)[::-1][key][::-1])
except IndexError:
value = 0
# If the key returns a list of values, then combine them
# together to an integer.
if isinstance(value, list):
value = sum(10**i * val for i, val in enumerate(value))
return value
def __setitem__(self, key, value):
"""
Base ten indexing support.
.. deprecated:: 1.8
The value of an _LBProc only makes sense in base-two.
"""
warn_deprecated('Indexing is deprecated')
if (not isinstance(value, int) or value < 0):
msg = 'Can only set {} as a positive integer value.'.format(key)
raise ValueError(msg)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice '
'objects containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# Get indices for as many digits as have been requested.
# Putting the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
fmt = 'Cannot put {} into {} as it has too many digits.'
raise ValueError(fmt.format(value, key))
# Iterate over each of the indices in the slice, zipping
# them together with the associated digit.
filled_value = str(value).zfill(current_length)
for index, digit in zip(indices, filled_value[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
if value > 9:
raise ValueError('Can only set a single digit')
# Setting a single digit.
factor = 10 ** key
head, tail = divmod(self._value, factor)
head = head // 10
self._value = (head * 10 + value) * factor + tail
def __iadd__(self, value):
self._value += value
return self
def __and__(self, value):
return self._value & value
def __iand__(self, value):
self._value &= value
return self
def __ior__(self, value):
self._value |= value
return self
def __int__(self):
return self._value
def __repr__(self):
return '_LBProc({})'.format(self._value)
def __str__(self):
return str(self._value)
@property
def flags(self):
warn_deprecated('The `flags` attribute is deprecated - please use '
'integer bitwise operators instead.')
return tuple(2 ** i for i in range(self.NUM_BITS)
if self._value & 2 ** i)
class PPDataProxy(object):
"""A reference to the data payload of a single PP field."""
__slots__ = ('shape', 'src_dtype', 'path', 'offset', 'data_len',
'_lbpack', 'boundary_packing', 'mdi', 'mask')
def __init__(self, shape, src_dtype, path, offset, data_len,
lbpack, boundary_packing, mdi, mask):
self.shape = shape
self.src_dtype = src_dtype
self.path = path
self.offset = offset
self.data_len = data_len
self.lbpack = lbpack
self.boundary_packing = boundary_packing
self.mdi = mdi
self.mask = mask
# lbpack
def _lbpack_setter(self, value):
self._lbpack = value
def _lbpack_getter(self):
value = self._lbpack
if not isinstance(self._lbpack, SplittableInt):
mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
value = SplittableInt(self._lbpack, mapping)
return value
lbpack = property(_lbpack_getter, _lbpack_setter)
@property
def dtype(self):
return self.src_dtype.newbyteorder('=')
@property
def fill_value(self):
return self.mdi
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
with open(self.path, 'rb') as pp_file:
pp_file.seek(self.offset, os.SEEK_SET)
data_bytes = pp_file.read(self.data_len)
data = _data_bytes_to_shaped_array(data_bytes,
self.lbpack,
self.boundary_packing,
self.shape, self.src_dtype,
self.mdi, self.mask)
return data.__getitem__(keys)
def __repr__(self):
fmt = '<{self.__class__.__name__} shape={self.shape}' \
' src_dtype={self.dtype!r} path={self.path!r}' \
' offset={self.offset} mask={self.mask!r}>'
return fmt.format(self=self)
def __getstate__(self):
# Because we have __slots__, this is needed to support Pickle.dump()
return [(name, getattr(self, name)) for name in self.__slots__]
def __setstate__(self, state):
# Because we have __slots__, this is needed to support Pickle.load()
# (Use setattr, as there is no object dictionary.)
for (key, value) in state:
setattr(self, key, value)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPDataProxy):
result = True
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _data_bytes_to_shaped_array(data_bytes, lbpack, boundary_packing,
data_shape, data_type, mdi,
mask=None):
"""
Convert the already read binary data payload into a numpy array, unpacking
and decompressing as per the F3 specification.
"""
if lbpack.n1 in (0, 2):
data = np.frombuffer(data_bytes, dtype=data_type)
elif lbpack.n1 == 1:
if mo_pack is not None:
try:
decompress_wgdos = mo_pack.decompress_wgdos
except AttributeError:
decompress_wgdos = mo_pack.unpack_wgdos
elif pp_packing is not None:
msg = 'iris.fileformats.pp_packing has been ' \
'deprecated and will be removed in a future release. ' \
'Install mo_pack to make use of the new unpacking ' \
'functionality.'
warn_deprecated(msg)
decompress_wgdos = pp_packing.wgdos_unpack
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
raise ValueError(msg)
data = decompress_wgdos(data_bytes, data_shape[0], data_shape[1], mdi)
elif lbpack.n1 == 4:
if mo_pack is not None and hasattr(mo_pack, 'decompress_rle'):
decompress_rle = mo_pack.decompress_rle
elif pp_packing is not None:
msg = 'iris.fileformats.pp_packing has been ' \
'deprecated and will be removed in a future release. ' \
'Install/upgrade mo_pack to make use of the new unpacking ' \
'functionality.'
warn_deprecated(msg)
decompress_rle = pp_packing.rle_decode
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
raise ValueError(msg)
data = decompress_rle(data_bytes, data_shape[0], data_shape[1], mdi)
else:
raise iris.exceptions.NotYetImplementedError(
'PP fields with LBPACK of %s are not yet supported.' % lbpack)
# Ensure we have write permission on the data buffer.
data.setflags(write=True)
# Ensure the data is in the native byte order
if not data.dtype.isnative:
data.byteswap(True)
data.dtype = data.dtype.newbyteorder('=')
if boundary_packing is not None:
# Convert a long string of numbers into a "lateral boundary
# condition" array, which is split into 4 quartiles, North
# East, South, West and where North and South contain the corners.
compressed_data = data
data = np.ma.masked_all(data_shape)
boundary_height = boundary_packing.y_halo + boundary_packing.rim_width
boundary_width = boundary_packing.x_halo + boundary_packing.rim_width
y_height, x_width = data_shape
# The height of the east and west components.
mid_height = y_height - 2 * boundary_height
n_s_shape = boundary_height, x_width
e_w_shape = mid_height, boundary_width
# Keep track of our current position in the array.
current_posn = 0
north = compressed_data[:boundary_height*x_width]
current_posn += len(north)
data[-boundary_height:, :] = north.reshape(*n_s_shape)
east = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(east)
data[boundary_height:-boundary_height,
-boundary_width:] = east.reshape(*e_w_shape)
south = compressed_data[current_posn:
current_posn + boundary_height * x_width]
current_posn += len(south)
data[:boundary_height, :] = south.reshape(*n_s_shape)
west = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(west)
data[boundary_height:-boundary_height,
:boundary_width] = west.reshape(*e_w_shape)
elif lbpack.n2 == 2:
if mask is None:
raise ValueError('No mask was found to unpack the data. '
'Could not load.')
land_mask = mask.data.astype(np.bool)
sea_mask = ~land_mask
new_data = np.ma.masked_all(land_mask.shape)
if lbpack.n3 == 1:
# Land mask packed data.
new_data.mask = sea_mask
# Sometimes the data comes in longer than it should be (i.e. it
# looks like the compressed data is compressed, but the trailing
# data hasn't been clipped off!).
new_data[land_mask] = data[:land_mask.sum()]
elif lbpack.n3 == 2:
# Sea mask packed data.
new_data.mask = land_mask
new_data[sea_mask] = data[:sea_mask.sum()]
else:
raise ValueError('Unsupported mask compression.')
data = new_data
else:
# Reform in row-column order
data.shape = data_shape
# Mask the array?
if mdi in data:
data = ma.masked_values(data, mdi, copy=False)
return data
# The special headers of the PPField classes which get some improved
# functionality
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbpack', 'lbproc', 'data', 'stash',
't1', 't2')
def _header_defn(release_number):
"""
Returns the zero-indexed header definition for a particular release of
a PPField.
"""
um_header = UM_HEADERS[release_number]
offset = UM_TO_PP_HEADER_OFFSET
return [(name, tuple(position - offset for position in positions))
for name, positions in um_header]
def _pp_attribute_names(header_defn):
"""
Returns the allowed attributes of a PPField:
all of the normal headers (i.e. not the _SPECIAL_HEADERS),
the _SPECIAL_HEADERS with '_' prefixed,
the possible extra data headers.
"""
normal_headers = list(name for name, positions in header_defn
if name not in _SPECIAL_HEADERS)
special_headers = list('_' + name for name in _SPECIAL_HEADERS)
extra_data = list(EXTRA_DATA.values())
special_attributes = ['_raw_header', 'raw_lbtim', 'raw_lbpack',
'boundary_packing']
return normal_headers + special_headers + extra_data + special_attributes
class PPField(six.with_metaclass(abc.ABCMeta, object)):
"""
A generic class for PP fields - not specific to a particular
header release number.
A PPField instance can easily access the PP header "words" as attributes
with some added useful capabilities::
for field in iris.fileformats.pp.load(filename):
print(field.lbyr)
print(field.lbuser)
print(field.lbuser[0])
print(field.lbtim)
print(field.lbtim.ia)
print(field.t1)
"""
# NB. Subclasses must define the attribute HEADER_DEFN to be their
# zero-based header definition. See PPField2 and PPField3 for examples.
__slots__ = ()
def __init__(self, header=None):
# Combined header longs and floats data cache.
self._raw_header = header
self.raw_lbtim = None
self.raw_lbpack = None
self.boundary_packing = None
if header is not None:
self.raw_lbtim = header[self.HEADER_DICT['lbtim'][0]]
self.raw_lbpack = header[self.HEADER_DICT['lbpack'][0]]
def __getattr__(self, key):
"""
This method supports deferred attribute creation, which offers a
significant loading optimisation, particularly when not all attributes
are referenced and therefore created on the instance.
When an 'ordinary' HEADER_DICT attribute is required, its associated
header offset is used to lookup the data value/s from the combined
header longs and floats data cache. The attribute is then set with this
value/s on the instance. Thus future lookups for this attribute will be
optimised, avoiding the __getattr__ lookup mechanism again.
When a 'special' HEADER_DICT attribute (leading underscore) is
required, its associated 'ordinary' (no leading underscore) header
offset is used to lookup the data value/s from the combined header
longs and floats data cache. The 'ordinary' attribute is then set
with this value/s on the instance. This is required as 'special'
attributes have supporting property convenience functionality base on
the attribute value e.g. see 'lbpack' and 'lbtim'. Note that, for
'special' attributes the interface is via the 'ordinary' attribute but
the underlying attribute value is stored within the 'special'
attribute.
"""
try:
loc = self.HEADER_DICT[key]
except KeyError:
if key[0] == '_' and key[1:] in self.HEADER_DICT:
# Must be a special attribute.
loc = self.HEADER_DICT[key[1:]]
else:
cls = self.__class__.__name__
msg = '{!r} object has no attribute {!r}'.format(cls, key)
raise AttributeError(msg)
if len(loc) == 1:
value = self._raw_header[loc[0]]
else:
start = loc[0]
stop = loc[-1] + 1
value = tuple(self._raw_header[start:stop])
# Now cache the attribute value on the instance.
if key[0] == '_':
# First we need to assign to the attribute so that the
# special attribute is calculated, then we retrieve it.
setattr(self, key[1:], value)
value = getattr(self, key)
else:
setattr(self, key, value)
return value
@abc.abstractproperty
def t1(self):
pass
@abc.abstractproperty
def t2(self):
pass
def __repr__(self):
"""Return a string representation of the PP field."""
# Define an ordering on the basic header names
attribute_priority_lookup = {name: loc[0] for name, loc
in self.HEADER_DEFN}
# With the attributes sorted the order will remain stable if extra
# attributes are added.
public_attribute_names = list(attribute_priority_lookup.keys()) + \
list(EXTRA_DATA.values())
self_attrs = [(name, getattr(self, name, None))
for name in public_attribute_names]
self_attrs = [pair for pair in self_attrs if pair[1] is not None]
# Output any masked data as separate `data` and `mask`
# components, to avoid the standard MaskedArray output
# which causes irrelevant discrepancies between NumPy
# v1.6 and v1.7.
if ma.isMaskedArray(self._data):
# Force the fill value to zero to have the minimum
# impact on the output style.
self_attrs.append(('data.data', self._data.filled(0)))
self_attrs.append(('data.mask', self._data.mask))
else:
self_attrs.append(('data', self._data))
# sort the attributes by position in the pp header followed,
# then by alphabetical order.
attributes = sorted(self_attrs, key=lambda pair:
(attribute_priority_lookup.get(pair[0], 999),
pair[0]))
return 'PP Field' + ''.join(['\n %s: %s' % (k, v)
for k, v in attributes]) + '\n'
@property
def stash(self):
"""
A stash property giving access to the associated STASH object,
now supporting __eq__
"""
if (not hasattr(self, '_stash') or
self.lbuser[6] != self._stash.lbuser6() or
self.lbuser[3] != self._stash.lbuser3()):
self._stash = STASH(self.lbuser[6], self.lbuser[3] // 1000,
self.lbuser[3] % 1000)
return self._stash
@stash.setter
def stash(self, stash):
if isinstance(stash, six.string_types):
self._stash = STASH.from_msi(stash)
elif isinstance(stash, STASH):
self._stash = stash
else:
raise ValueError('Cannot set stash to {!r}'.format(stash))
# Keep the lbuser up to date.
self.lbuser = list(self.lbuser)
self.lbuser[6] = self._stash.lbuser6()
self.lbuser[3] = self._stash.lbuser3()
@property
def lbtim(self):
return self._lbtim
@lbtim.setter
def lbtim(self, value):
value = int(value)
self.raw_lbtim = value
self._lbtim = SplittableInt(value, {'ia': slice(2, None), 'ib': 1,
'ic': 0})
# lbcode
def _lbcode_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ix/iy values for lbcode
new_value = SplittableInt(new_value,
{'iy': slice(0, 2), 'ix': slice(2, 4)})
self._lbcode = new_value
lbcode = property(lambda self: self._lbcode, _lbcode_setter)
# lbpack
def _lbpack_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
self.raw_lbpack = new_value
# add the n1/n2/n3/n4/n5 values for lbpack
name_mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
new_value = SplittableInt(new_value, name_mapping)
else:
self.raw_lbpack = new_value._value
self._lbpack = new_value
lbpack = property(lambda self: self._lbpack, _lbpack_setter)
@property
def lbproc(self):
return self._lbproc
@lbproc.setter
def lbproc(self, value):
if not isinstance(value, _LBProc):
value = _LBProc(value)
self._lbproc = value
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multidimensional data
of the pp file
"""
# Cache the real data on first use
if isinstance(self._data, biggus.Array):
data = self._data.masked_array()
if ma.count_masked(data) == 0:
data = data.data
self._data = data
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def calendar(self):
"""Return the calendar of the field."""
# TODO #577 What calendar to return when ibtim.ic in [0, 3]
calendar = cf_units.CALENDAR_GREGORIAN
if self.lbtim.ic == 2:
calendar = cf_units.CALENDAR_360_DAY
elif self.lbtim.ic == 4:
calendar = cf_units.CALENDAR_365_DAY
return calendar
def _read_extra_data(self, pp_file, file_reader, extra_len,
little_ended=False):
"""Read the extra data section and update the self appropriately."""
dtype_endian_char = '<' if little_ended else '>'
# While there is still extra data to decode run this loop
while extra_len > 0:
dtype = '%cL' % dtype_endian_char
extra_int_code = struct.unpack_from(dtype,
file_reader(PP_WORD_DEPTH))[0]
extra_len -= PP_WORD_DEPTH
ib = extra_int_code % 1000
ia = extra_int_code // 1000
data_len = ia * PP_WORD_DEPTH
if ib == 10:
dtype = '%c%ds' % (dtype_endian_char, data_len)
field_title = struct.unpack_from(dtype, file_reader(data_len))
self.field_title = field_title[0].rstrip(b'\00').decode()
elif ib == 11:
dtype = '%c%ds' % (dtype_endian_char, data_len)
domain_title = struct.unpack_from(dtype,
file_reader(data_len))
self.domain_title = domain_title[0].rstrip(b'\00').decode()
elif ib in EXTRA_DATA:
attr_name = EXTRA_DATA[ib]
dtype = np.dtype('%cf%d' % (dtype_endian_char, PP_WORD_DEPTH))
values = np.fromfile(pp_file, dtype=dtype, count=ia)
# Ensure the values are in the native byte order
if not values.dtype.isnative:
values.byteswap(True)
values.dtype = values.dtype.newbyteorder('=')
setattr(self, attr_name, values)
else:
raise ValueError('Unknown IB value for extra data: %s' % ib)
extra_len -= data_len
@property
def x_bounds(self):
if hasattr(self, "x_lower_bound") and hasattr(self, "x_upper_bound"):
return np.column_stack((self.x_lower_bound, self.x_upper_bound))
@property
def y_bounds(self):
if hasattr(self, "y_lower_bound") and hasattr(self, "y_upper_bound"):
return np.column_stack((self.y_lower_bound, self.y_upper_bound))
def save(self, file_handle):
"""
Save the PPField to the given file object
(typically created with :func:`open`).
::
# to append the field to a file
with open(filename, 'ab') as fh:
a_pp_field.save(fh)
# to overwrite/create a file
with open(filename, 'wb') as fh:
a_pp_field.save(fh)
.. note::
The fields which are automatically calculated are: 'lbext',
'lblrec' and 'lbuser[0]'. Some fields are not currently
populated, these are: 'lbegin', 'lbnrec', 'lbuser[1]'.
"""
# Before we can actually write to file, we need to calculate the header
# elements. First things first, make sure the data is big-endian
data = self.data
if isinstance(data, ma.core.MaskedArray):
data = data.filled(fill_value=self.bmdi)
if data.dtype.newbyteorder('>') != data.dtype:
# take a copy of the data when byteswapping
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('>')
# Create the arrays which will hold the header information
lb = np.empty(shape=NUM_LONG_HEADERS,
dtype=np.dtype(">u%d" % PP_WORD_DEPTH))
b = np.empty(shape=NUM_FLOAT_HEADERS,
dtype=np.dtype(">f%d" % PP_WORD_DEPTH))
# Populate the arrays from the PPField
for name, pos in self.HEADER_DEFN:
try:
header_elem = getattr(self, name)
except AttributeError:
raise AttributeError("PPField.save() could not find %s" % name)
if pos[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET:
index = slice(pos[0], pos[-1] + 1)
if isinstance(header_elem, SplittableInt):
header_elem = int(header_elem)
lb[index] = header_elem
else:
index = slice(pos[0] - NUM_LONG_HEADERS,
pos[-1] - NUM_LONG_HEADERS + 1)
b[index] = header_elem
# Although all of the elements are now populated, we still need to
# update some of the elements in case
# things have changed (for example, the data length etc.)
# Set up a variable to represent the datalength of this PPField in
# WORDS.
len_of_data_payload = 0
# set up a list to hold the extra data which will need to be encoded
# at the end of the data
extra_items = []
# iterate through all of the possible extra data fields
for ib, extra_data_attr_name in six.iteritems(EXTRA_DATA):
# try to get the extra data field, returning None if it doesn't
# exist
extra_elem = getattr(self, extra_data_attr_name, None)
if extra_elem is not None:
# The special case of character extra data must be caught
if isinstance(extra_elem, six.string_types):
ia = len(extra_elem)
# pad any strings up to a multiple of PP_WORD_DEPTH
# (this length is # of bytes)
ia = (PP_WORD_DEPTH - (ia-1) % PP_WORD_DEPTH) + (ia-1)
extra_elem = extra_elem.ljust(ia, '\00')
# ia is now the datalength in WORDS of the string
ia //= PP_WORD_DEPTH
else:
# ia is the datalength in WORDS
ia = np.product(extra_elem.shape)
# flip the byteorder if the data is not big-endian
if extra_elem.dtype.newbyteorder('>') != extra_elem.dtype:
# take a copy of the extra data when byte swapping
extra_elem = extra_elem.byteswap(False)
extra_elem.dtype = extra_elem.dtype.newbyteorder('>')
# add the number of bytes to the len_of_data_payload variable
# + the extra integer which will encode ia/ib
len_of_data_payload += PP_WORD_DEPTH * ia + PP_WORD_DEPTH
integer_code = 1000 * ia + ib
extra_items.append([integer_code, extra_elem])
if ia >= 1000:
raise IOError('PP files cannot write extra data with more'
' than 1000 elements. Tried to write "%s"'
' which has %s elements.'
% (extra_data_attr_name, ib)
)
# populate lbext in WORDS
lb[self.HEADER_DICT['lbext'][0]] = len_of_data_payload // PP_WORD_DEPTH
# Put the data length of pp.data into len_of_data_payload (in BYTES)
lbpack = lb[self.HEADER_DICT['lbpack'][0]]
if lbpack == 0:
len_of_data_payload += data.size * PP_WORD_DEPTH
elif lbpack == 1:
if mo_pack is not None:
try:
compress_wgdos = mo_pack.compress_wgdos
except AttributeError:
compress_wgdos = mo_pack.pack_wgdos
packed_data = compress_wgdos(data.astype(np.float32),
b[self.HEADER_DICT['bacc'][0]-45],
b[self.HEADER_DICT['bmdi'][0]-45])
len_of_data_payload += len(packed_data)
else:
msg = 'Writing packed pp data with lbpack of {} ' \
'requires mo_pack to be installed.'.format(lbpack)
raise NotImplementedError(msg)
# populate lbrec in WORDS
lb[self.HEADER_DICT['lblrec'][0]] = len_of_data_payload // \
PP_WORD_DEPTH
# populate lbuser[0] to have the data's datatype
if data.dtype == np.dtype('>f4'):
lb[self.HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>f8'):
warnings.warn("Downcasting array precision from float64 to float32"
" for save.If float64 precision is required then"
" please save in a different format")
data = data.astype('>f4')
lb[self.HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>i4'):
# NB: there is no physical difference between lbuser[0] of 2 or 3
# so we encode just 2
lb[self.HEADER_DICT['lbuser'][0]] = 2
else:
raise IOError('Unable to write data array to a PP file. '
'The datatype was %s.' % data.dtype)
# NB: lbegin, lbnrec, lbuser[1] not set up
# Now that we have done the manouvering required, write to the file...
if not hasattr(file_handle, 'write'):
raise TypeError('The file_handle argument must be an instance of a'
' Python file object, but got %r. \n e.g. '
'open(filename, "wb") to open a binary file with'
' write permission.' % type(file_handle))
pp_file = file_handle
# header length
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# 45 integers
lb.tofile(pp_file)
# 19 floats
b.tofile(pp_file)
# Header length (again)
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# Data length (including extra data length)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
# the data itself
if lbpack == 0:
data.tofile(pp_file)
elif lbpack == 1:
pp_file.write(packed_data)
else:
msg = 'Writing packed pp data with lbpack of {} ' \
'is not supported.'.format(lbpack)
raise NotImplementedError(msg)
# extra data elements
for int_code, extra_data in extra_items:
pp_file.write(struct.pack(">L", int(int_code)))
if isinstance(extra_data, six.string_types):
pp_file.write(struct.pack(">%ss" % len(extra_data),
extra_data.encode()))
else:
extra_data = extra_data.astype(np.dtype('>f4'))
extra_data.tofile(pp_file)
# Data length (again)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
##############################################################
#
# From here on define helper methods for PP -> Cube conversion.
#
def time_unit(self, time_unit, epoch='epoch'):
return cf_units.Unit('%s since %s' % (time_unit, epoch),
calendar=self.calendar)
def coord_system(self):
"""Return a CoordSystem for this PPField.
Returns:
Currently, a :class:`~iris.coord_systems.GeogCS` or
:class:`~iris.coord_systems.RotatedGeogCS`.
"""
geog_cs = iris.coord_systems.GeogCS(EARTH_RADIUS)
def degrees_ne(angle, ref_angle):
"""
Return whether an angle differs significantly from a set value.
The inputs are in degrees.
The difference is judged significant if more than 0.0001 degrees.
"""
return abs(angle - ref_angle) > 0.0001
if (degrees_ne(self.bplat, 90.0) or (degrees_ne(self.bplon, 0.0) and
degrees_ne(self.bplon, 180.0))):
# NOTE: when bplat,bplon=90,0 this encodes an unrotated system.
# However, the rotated system which is *equivalent* to an unrotated
# one actually has blat,bplon=90,180, due to a quirk in the
# definition equations.
# So we accept BPLON of 0 *or* 180 to mean 'unrotated'.
geog_cs = iris.coord_systems.RotatedGeogCS(
self.bplat, self.bplon, ellipsoid=geog_cs)
return geog_cs
def _x_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
x_name = "longitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
x_name = "grid_longitude"
return x_name
def _y_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
y_name = "latitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
y_name = "grid_latitude"
return y_name
def copy(self):
"""
Returns a deep copy of this PPField.
Returns:
A copy instance of the :class:`PPField`.
"""
return self._deepcopy({})
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo):
field = self.__class__()
for attr in self.__slots__:
if hasattr(self, attr):
value = getattr(self, attr)
# Cope with inability to deepcopy a 0-d NumPy array.
if attr == '_data' and value is not None and value.ndim == 0:
setattr(field, attr, np.array(deepcopy(value[()], memo)))
else:
setattr(field, attr, deepcopy(value, memo))
return field
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPField):
result = True
for attr in self.__slots__:
attrs = [hasattr(self, attr), hasattr(other, attr)]
if all(attrs):
self_attr = getattr(self, attr)
other_attr = getattr(other, attr)
if isinstance(self_attr, biggus.NumpyArrayAdapter):
self_attr = self_attr.concrete
if isinstance(other_attr, biggus.NumpyArrayAdapter):
other_attr = other_attr.concrete
if not np.all(self_attr == other_attr):
result = False
break
elif any(attrs):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
class PPField2(PPField):
"""
A class to hold a single field from a PP file, with a
header release number of 2.
"""
HEADER_DEFN = _header_defn(2)
HEADER_DICT = dict(HEADER_DEFN)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat,
self.lbhr, self.lbmin)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbday = int(dt.strftime('%j'))
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon,"
" lbdat, lbhr, and lbmin attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond,
self.lbdatd, self.lbhrd,
self.lbmind)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbdayd = int(dt.strftime('%j'))
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, "
"lbmond, lbdatd, lbhrd, and lbmind attributes.")
class PPField3(PPField):
"""
A class to hold a single field from a PP file, with a
header release number of 3.
"""
HEADER_DEFN = _header_defn(3)
HEADER_DICT = dict(HEADER_DEFN)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat,
self.lbhr, self.lbmin, self.lbsec)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbsec = dt.second
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon,"
" lbdat, lbhr, lbmin, and lbsec attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond,
self.lbdatd, self.lbhrd,
self.lbmind, self.lbsecd)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbsecd = dt.second
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, "
"lbmond, lbdatd, lbhrd, lbmind, and lbsecd attributes.")
PP_CLASSES = {
2: PPField2,
3: PPField3
}
def make_pp_field(header):
# Choose a PP field class from the value of LBREL
lbrel = header[21]
if lbrel not in PP_CLASSES:
raise ValueError('Unsupported header release number: {}'.format(lbrel))
pp_field = PP_CLASSES[lbrel](header)
return pp_field
LoadedArrayBytes = collections.namedtuple('LoadedArrayBytes', 'bytes, dtype')
def load(filename, read_data=False, little_ended=False):
"""
Return an iterator of PPFields given a filename.
Args:
* filename - string of the filename to load.
Kwargs:
* read_data - boolean
Flag whether or not the data should be read, if False an empty
data manager will be provided which can subsequently load the data
on demand. Default False.
* little_ended - boolean
If True, file contains all little-ended words (header and data).
To iterate through all of the fields in a pp file::
for field in iris.fileformats.pp.load(filename):
print(field)
"""
return _interpret_fields(_field_gen(filename,
read_data_bytes=read_data,
little_ended=little_ended))
def _interpret_fields(fields):
"""
Turn the fields read with load and FF2PP._extract_field into useable
fields. One of the primary purposes of this function is to either convert
"deferred bytes" into "deferred arrays" or "loaded bytes" into actual
numpy arrays (via the _create_field_data) function.
"""
land_mask = None
landmask_compressed_fields = []
for field in fields:
# Store the first reference to a land mask, and use this as the
# definitive mask for future fields in this generator.
if land_mask is None and field.lbuser[6] == 1 and \
(field.lbuser[3] // 1000) == 0 and \
(field.lbuser[3] % 1000) == 30:
land_mask = field
# Handle land compressed data payloads,
# when lbpack.n2 is 2.
if (field.raw_lbpack // 10 % 10) == 2:
if land_mask is None:
landmask_compressed_fields.append(field)
continue
# Land compressed fields don't have a lbrow and lbnpt.
field.lbrow, field.lbnpt = land_mask.lbrow, land_mask.lbnpt
data_shape = (field.lbrow, field.lbnpt)
_create_field_data(field, data_shape, land_mask)
yield field
if landmask_compressed_fields:
if land_mask is None:
warnings.warn('Landmask compressed fields existed without a '
'landmask to decompress with. The data will have '
'a shape of (0, 0) and will not read.')
mask_shape = (0, 0)
else:
mask_shape = (land_mask.lbrow, land_mask.lbnpt)
for field in landmask_compressed_fields:
field.lbrow, field.lbnpt = mask_shape
_create_field_data(field, (field.lbrow, field.lbnpt), land_mask)
yield field
def _create_field_data(field, data_shape, land_mask):
"""
Modifies a field's ``_data`` attribute either by:
* converting DeferredArrayBytes into a biggus array,
* converting LoadedArrayBytes into an actual numpy array.
"""
if isinstance(field._data, LoadedArrayBytes):
loaded_bytes = field._data
field._data = _data_bytes_to_shaped_array(loaded_bytes.bytes,
field.lbpack,
field.boundary_packing,
data_shape,
loaded_bytes.dtype,
field.bmdi, land_mask)
else:
# Wrap the reference to the data payload within a data proxy
# in order to support deferred data loading.
fname, position, n_bytes, dtype = field._data
proxy = PPDataProxy(data_shape, dtype,
fname, position, n_bytes,
field.raw_lbpack,
field.boundary_packing,
field.bmdi, land_mask)
field._data = biggus.NumpyArrayAdapter(proxy)
def _field_gen(filename, read_data_bytes, little_ended=False):
"""
Returns a generator of "half-formed" PPField instances derived from
the given filename.
A field returned by the generator is only "half-formed" because its
`_data` attribute represents a simple one-dimensional stream of
bytes. (Encoded as an instance of either LoadedArrayBytes or
DeferredArrayBytes, depending on the value of `read_data_bytes`.)
This is because fields encoded with a land/sea mask do not contain
sufficient information within the field to determine the final
two-dimensional shape of the data.
"""
dtype_endian_char = '<' if little_ended else '>'
with open(filename, 'rb') as pp_file:
# Get a reference to the seek method on the file
# (this is accessed 3* #number of headers so can provide a small
# performance boost)
pp_file_seek = pp_file.seek
pp_file_read = pp_file.read
field_count = 0
# Keep reading until we reach the end of file
while True:
# Move past the leading header length word
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Get the LONG header entries
dtype = '%ci%d' % (dtype_endian_char, PP_WORD_DEPTH)
header_longs = np.fromfile(pp_file, dtype=dtype,
count=NUM_LONG_HEADERS)
# Nothing returned => EOF
if len(header_longs) == 0:
break
# Get the FLOAT header entries
dtype = '%cf%d' % (dtype_endian_char, PP_WORD_DEPTH)
header_floats = np.fromfile(pp_file, dtype=dtype,
count=NUM_FLOAT_HEADERS)
header = tuple(header_longs) + tuple(header_floats)
# Make a PPField of the appropriate sub-class (depends on header
# release number)
try:
pp_field = make_pp_field(header)
except ValueError as e:
msg = 'Unable to interpret field {}. {}. Skipping ' \
'the remainder of the file.'.format(field_count,
str(e))
warnings.warn(msg)
break
# Skip the trailing 4-byte word containing the header length
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Read the word telling me how long the data + extra data is
# This value is # of bytes
len_of_data_plus_extra = struct.unpack_from(
'%cL' % dtype_endian_char,
pp_file_read(PP_WORD_DEPTH))[0]
if len_of_data_plus_extra != pp_field.lblrec * PP_WORD_DEPTH:
wmsg = ('LBLREC has a different value to the integer recorded '
'after the header in the file ({} and {}). '
'Skipping the remainder of the file.')
warnings.warn(wmsg.format(pp_field.lblrec * PP_WORD_DEPTH,
len_of_data_plus_extra))
break
# calculate the extra length in bytes
extra_len = pp_field.lbext * PP_WORD_DEPTH
# Derive size and datatype of payload
data_len = len_of_data_plus_extra - extra_len
dtype = LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0],
LBUSER_DTYPE_LOOKUP['default'])
if little_ended:
# Change data dtype for a little-ended file.
dtype = str(dtype)
if dtype[0] != '>':
msg = ("Unexpected dtype {!r} can't be converted to "
"little-endian")
raise ValueError(msg)
dtype = np.dtype('<' + dtype[1:])
if read_data_bytes:
# Read the actual bytes. This can then be converted to a numpy
# array at a higher level.
pp_field._data = LoadedArrayBytes(pp_file.read(data_len),
dtype)
else:
# Provide enough context to read the data bytes later on.
pp_field._data = (filename, pp_file.tell(), data_len, dtype)
# Seek over the actual data payload.
pp_file_seek(data_len, os.SEEK_CUR)
# Do we have any extra data to deal with?
if extra_len:
pp_field._read_extra_data(pp_file, pp_file_read, extra_len,
little_ended=little_ended)
# Skip that last 4 byte record telling me the length of the field I
# have already read
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
field_count += 1
yield pp_field
def reset_load_rules():
"""
Resets the PP load process to use only the standard conversion rules.
.. deprecated:: 1.7
"""
warn_deprecated('reset_load_rules was deprecated in v1.7.')
def _ensure_save_rules_loaded():
"""Makes sure the standard save rules are loaded."""
# Uses these module-level variables
global _save_rules
if _save_rules is None:
# Load the pp save rules
rules_filename = os.path.join(iris.config.CONFIG_PATH,
'pp_save_rules.txt')
with iris.fileformats.rules._disable_deprecation_warnings():
_save_rules = iris.fileformats.rules.RulesContainer(
rules_filename, iris.fileformats.rules.ProcedureRule)
def add_save_rules(filename):
"""
Registers a rules file for use during the PP save process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
.. deprecated:: 1.10
If you need to customise pp field saving, please refer to the functions
:func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
for an alternative solution.
"""
warn_deprecated(
'custom pp save rules are deprecated from v1.10.\n'
'If you need to customise pp field saving, please refer to the '
'functions iris.fileformats.pp.as_fields, '
'iris.fileformats.pp.save_pairs_from_cube and '
'iris.fileformats.pp.save_fields for an alternative solution.')
_ensure_save_rules_loaded()
_save_rules.import_rules(filename)
def reset_save_rules():
"""
Resets the PP save process to use only the standard conversion rules.
.. deprecated:: 1.10
If you need to customise pp field saving, please refer to the functions
:func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
for an alternative solution.
"""
warn_deprecated(
'custom pp save rules are deprecated from v1.10.\n'
'If you need to customise pp field saving, please refer to the '
'functions iris.fileformats.pp.as_fields, '
'iris.fileformats.pp.save_pairs_from_cube and '
'iris.fileformats.pp.save_fields for an alternative solution.')
# Uses this module-level variable
global _save_rules
_save_rules = None
# Stash codes not to be filtered (reference altitude and pressure fields).
_STASH_ALLOW = [STASH(1, 0, 33), STASH(1, 0, 1)]
def _convert_constraints(constraints):
"""
Converts known constraints from Iris semantics to PP semantics
ignoring all unknown constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pp_constraints = {}
unhandled_constraints = False
def _make_func(stashobj):
"""
Provides unique name-space for each lambda function's stashobj
variable.
"""
return lambda stash: stash == stashobj
for con in constraints:
if isinstance(con, iris.AttributeConstraint) and \
list(con._attributes.keys()) == ['STASH']:
# Convert a STASH constraint.
# The attribute can be a STASH object, a stashcode string, or a
# callable.
stashobj = con._attributes['STASH']
if callable(stashobj):
call_func = stashobj
elif isinstance(stashobj, (six.string_types, STASH)):
call_func = _make_func(stashobj)
else:
raise TypeError("STASH constraints should be either a"
" callable, string or STASH object")
if 'stash' not in pp_constraints:
pp_constraints['stash'] = [call_func]
else:
pp_constraints['stash'].append(call_func)
else:
# only keep the pp constraints set if they are all handled as
# pp constraints
unhandled_constraints = True
def pp_filter(field):
"""
return True if field is to be kept,
False if field does not match filter
"""
res = True
if field.stash not in _STASH_ALLOW:
if pp_constraints.get('stash'):
res = False
for call_func in pp_constraints['stash']:
if call_func(str(field.stash)):
res = True
break
return res
if pp_constraints and not unhandled_constraints:
result = pp_filter
else:
result = None
return result
def load_cubes(filenames, callback=None, constraints=None):
"""
Loads cubes from a list of pp filenames.
Args:
* filenames - list of pp filenames to load
Kwargs:
* constraints - a list of Iris constraints
* callback - a function which can be passed on to
:func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file
(order is not preserved when there is a field with orography
references)
"""
return _load_cubes_variable_loader(filenames, callback, load,
constraints=constraints)
def load_cubes_little_endian(filenames, callback=None, constraints=None):
"""
Loads cubes from a list of pp filenames containing little-endian data.
Args:
* filenames - list of pp filenames to load
Kwargs:
* constraints - a list of Iris constraints
* callback - a function which can be passed on to
:func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file
(order is not preserved when there is a field with orography
references)
"""
return _load_cubes_variable_loader(filenames, callback, load,
{'little_ended': True},
constraints=constraints)
def load_pairs_from_fields(pp_fields):
"""
Convert an iterable of PP fields into an iterable of tuples of
(Cubes, PPField).
Args:
* pp_fields:
An iterable of :class:`iris.fileformats.pp.PPField`.
Returns:
An iterable of :class:`iris.cube.Cube`s.
This capability can be used to filter out fields before they are passed to
the load pipeline, and amend the cubes once they are created, using
PP metadata conditions. Where this filtering
removes a significant number of fields, the speed up to load can be
significant:
>>> import iris
>>> from iris.fileformats.pp import load_pairs_from_fields
>>> filename = iris.sample_data_path('E1.2098.pp')
>>> filtered_fields = []
>>> for field in iris.fileformats.pp.load(filename):
... if field.lbproc == 128:
... filtered_fields.append(field)
>>> cube_field_pairs = load_pairs_from_fields(filtered_fields)
>>> for cube, field in cube_field_pairs:
... cube.attributes['lbproc'] = field.lbproc
... print(cube.attributes['lbproc'])
128
This capability can also be used to alter fields before they are passed to
the load pipeline. Fields with out of specification header elements can
be cleaned up this way and cubes created:
>>> filename = iris.sample_data_path('E1.2098.pp')
>>> cleaned_fields = list(iris.fileformats.pp.load(filename))
>>> for field in cleaned_fields:
... if field.lbrel == 0:
... field.lbrel = 3
>>> cubes_field_pairs = list(load_pairs_from_fields(cleaned_fields))
"""
load_pairs_from_fields = iris.fileformats.rules.load_pairs_from_fields
return load_pairs_from_fields(pp_fields, iris.fileformats.pp_rules.convert)
def _load_cubes_variable_loader(filenames, callback, loading_function,
loading_function_kwargs=None,
constraints=None):
import iris.fileformats.um._fast_load as um_fast_load
pp_filter = None
if constraints is not None:
pp_filter = _convert_constraints(constraints)
if um_fast_load.STRUCTURED_LOAD_CONTROLS.loads_use_structured:
# For structured loads, pass down the pp_filter function as an extra
# keyword to the low-level generator function.
loading_function_kwargs = loading_function_kwargs or {}
loading_function_kwargs['pp_filter'] = pp_filter
# Also do *not* use this filter in generic rules processing, as for
# structured loading, the 'field' of rules processing is no longer a
# PPField but a FieldCollation.
pp_filter = None
# Make a loader object for the generic rules code.
loader = iris.fileformats.rules.Loader(
um_fast_load._basic_load_function,
loading_function_kwargs,
um_fast_load._convert_collation)
else:
loader = iris.fileformats.rules.Loader(
loading_function, loading_function_kwargs or {},
iris.fileformats.pp_rules.convert)
result = iris.fileformats.rules.load_cubes(filenames, callback, loader,
pp_filter)
if um_fast_load.STRUCTURED_LOAD_CONTROLS.loads_use_structured:
# We need an additional concatenate-like operation to combine cubes
# from different files. Unfortunately, the 'merge' call provided in
# the iris_load_xx functions cannot do this.
result = um_fast_load._combine_structured_cubes(result)
return result
def save(cube, target, append=False, field_coords=None):
"""
Use the PP saving rules (and any user rules) to save a cube to a PP file.
Args:
* cube - A :class:`iris.cube.Cube`
* target - A filename or open file handle.
Kwargs:
* append - Whether to start a new file afresh or add the cube(s)
to the end of the file.
Only applicable when target is a filename, not a file
handle.
Default is False.
* field_coords - list of 2 coords or coord names which are to be used
for reducing the given cube into 2d slices,
which will ultimately determine the x and y
coordinates of the resulting fields.
If None, the final two dimensions are chosen
for slicing.
See also :func:`iris.io.save`.
"""
fields = as_fields(cube, field_coords, target)
save_fields(fields, target, append=append)
def as_pairs(cube, field_coords=None, target=None):
"""
.. deprecated:: 1.10
Please use :func:`iris.fileformats.pp.save_pairs_from_cube` for the same
functionality.
"""
warn_deprecated('as_pairs is deprecated in v1.10; please use'
' save_pairs_from_cube instead.')
return save_pairs_from_cube(cube, field_coords=field_coords,
target=target)
def save_pairs_from_cube(cube, field_coords=None, target=None):
"""
Use the PP saving rules (and any user rules) to convert a cube or
iterable of cubes to an iterable of (2D cube, PP field) pairs.
Args:
* cube:
A :class:`iris.cube.Cube`
Kwargs:
* field_coords:
List of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
* target:
A filename or open file handle.
"""
# Open issues
# Could use rules in "sections" ... e.g. to process the extensive
# dimensions; ...?
# Could pre-process the cube to add extra convenient terms?
# e.g. x-coord, y-coord ... but what about multiple coordinates on the
# dimension?
# How to perform the slicing?
# Do we always slice in the last two dimensions?
# Not all source data will contain lat-lon slices.
# What do we do about dimensions with multiple coordinates?
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
# Build confidence by having a PP object that records which header items
# have been set, and only saves if they've all been set?
# Watch out for extra-data.
# On the flip side, record which Cube metadata has been "used" and flag up
# unused?
_ensure_save_rules_loaded()
n_dims = len(cube.shape)
if n_dims < 2:
raise ValueError('Unable to save a cube of fewer than 2 dimensions.')
if field_coords is not None:
# cast the given coord/coord names into cube coords
field_coords = cube._as_list_of_coords(field_coords)
if len(field_coords) != 2:
raise ValueError('Got %s coordinates in field_coords, expecting'
' exactly 2.' % len(field_coords))
else:
# default to the last two dimensions
# (if result of coords is an empty list, will raise an IndexError)
# NB watch out for the ordering of the dimensions
field_coords = (cube.coords(dimensions=n_dims-2)[0],
cube.coords(dimensions=n_dims-1)[0])
# Save each named or latlon slice2D in the cube
for slice2D in cube.slices(field_coords):
# Start with a blank PPField
pp_field = PPField3()
# Set all items to 0 because we need lbuser, lbtim
# and some others to be present before running the rules.
for name, positions in pp_field.HEADER_DEFN:
# Establish whether field name is integer or real
default = 0 if positions[0] <= NUM_LONG_HEADERS - \
UM_TO_PP_HEADER_OFFSET else 0.0
# Establish whether field position is scalar or composite
if len(positions) > 1:
default = [default] * len(positions)
setattr(pp_field, name, default)
# Some defaults should not be 0
pp_field.lbrel = 3 # Header release 3.
pp_field.lbcode = 1 # Grid code.
pp_field.bmks = 1.0 # Some scaley thing.
pp_field.lbproc = 0
# From UM doc F3: "Set to -99 if LBEGIN not known"
pp_field.lbuser[1] = -99
# Set the data
pp_field.data = slice2D.data
# Run the PP save rules on the slice2D, to fill the PPField,
# recording the rules that were used
rules_result = _save_rules.verify(slice2D, pp_field)
verify_rules_ran = rules_result.matching_rules
# Log the rules used
if target is None:
target = 'None'
elif not isinstance(target, six.string_types):
target = target.name
with iris.fileformats.rules._disable_deprecation_warnings():
iris.fileformats.rules.log('PP_SAVE', str(target),
verify_rules_ran)
yield (slice2D, pp_field)
def as_fields(cube, field_coords=None, target=None):
"""
Use the PP saving rules (and any user rules) to convert a cube to
an iterable of PP fields.
Args:
* cube:
A :class:`iris.cube.Cube`
Kwargs:
* field_coords:
List of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
* target:
A filename or open file handle.
"""
return (field for cube, field in save_pairs_from_cube(
cube, field_coords=field_coords, target=target))
def save_fields(fields, target, append=False):
"""
Save an iterable of PP fields to a PP file.
Args:
* fields:
An iterable of PP fields.
* target:
A filename or open file handle.
Kwargs:
* append:
Whether to start a new file afresh or add the cube(s) to the end
of the file.
Only applicable when target is a filename, not a file handle.
Default is False.
* callback:
A modifier/filter function.
See also :func:`iris.io.save`.
"""
# Open issues
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
if isinstance(target, six.string_types):
pp_file = open(target, "ab" if append else "wb")
filename = target
elif hasattr(target, "write"):
if hasattr(target, "mode") and "b" not in target.mode:
raise ValueError("Target not binary")
filename = target.name if hasattr(target, 'name') else None
pp_file = target
else:
raise ValueError("Can only save pp to filename or writable")
try:
# Save each field
for pp_field in fields:
# Write to file
pp_field.save(pp_file)
finally:
if isinstance(target, six.string_types):
pp_file.close()
| gpl-3.0 | 3,401,959,726,195,242,500 | 34.216535 | 79 | 0.55488 | false |
kgaipal/workspace | test-code/json.py | 1 | 9566 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Requirements: python packages: websocket-client
# Installation: pip install websocket-client
# https://pypi.python.org/pypi/websocket-client/
class StateTableRecord:
def __init__(self, recordId):
self.recordId = recordId
self.fields = {} # map of fieldName:fieldValue
def __str__(self):
str_field = ""
for f, v in self.fields.iteritems():
str_field += str(f) + ":" + str(v) + "\n"
return str_field
class StateTable:
def __init__(self, tableId, name, fieldsInfo):
self.tableId = tableId
self.name = name
self.fieldsInfo = fieldsInfo
self.records = {} # map of "recordId":StateTableRecord
def updateRecordField(self, recordId, field, value):
if not recordId in self.records:
self.records[recordId] = StateTableRecord(recordId)
self.records[recordId].fields[field] = value
def deleteRecord(self, recordId):
if recordId in self.records:
del self.records[recordId]
def truncate(self):
self.records = {}
def __str__(self):
header = "Table '%s'; fields: %d; records: %d\n" % (
self.name, len(self.fieldsInfo), len(self.records))
str_records = ""
for record in self.records.values():
str_records += str(record) + "\n"
return header+str_records
STATE_TABLES = {
"queue" : StateTable(
"queue", "Queue", [
("id", "ID"),
("name", "Name"),
("code_name", "Code Name"),
("type", "Type"),
("support_team_id", "Support Team ID"),
("created_timestamp", "Created Timestamp")
]),
"support_session" : StateTable(
"support_session", "Support Session", [
("id", "ID"),
("lsid", "LSID"),
("queue_id", "Queue ID"),
("queue_entry_timestamp", "Queue Entry Timestamp"),
("customer_name", "Customer Name"),
("customer_company", "Customer Company"),
("customer_company_code", "Customer Company Code"),
("customer_description", "Customer Description"),
("start_method", "Start Method"),
("priority", "Priority"),
("estimated_pickup_timestamp", "Estimated Pickup Timestamp"),
("created_timestamp", "Created Timestamp")
]),
"support_session_attribute" : StateTable(
"support_session_attribute", "Support Session Attribute", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("code_name", "Code Name"),
("value", "Value"),
("created_timestamp", "Created Timestamp")
]),
"support_session_skill" : StateTable(
"support_session_skill", "Support Session Skill", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("code_name", "Code Name"),
("value", "Value"),
("created_timestamp", "Created Timestamp")
]),
"customer_client" : StateTable(
"customer_client", "Customer Client", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("support_session_id", "Operating System"),
("hostname", "Hostname"),
("client_type", "Client Type"),
("elevated", "Elevated"),
("created_timestamp", "Created Timestamp")
]),
"representative" : StateTable(
"representative", "Representative", [
("id", "ID"),
("user_id", "User ID"),
("username", "Username"),
("private_display_name", "Private Display Name"),
("public_display_name", "Public Display Name"),
("routing_available", "Routing Available"),
("routing_idle", "Routing Idle"),
("routing_busy", "Routing Busy"),
("routing_enabled", "Routing Enabled"),
("skill_code_names", "Skill Code Names"),
("queue_id", "Queue ID"),
("created_timestamp", "Created Timestamp")
]),
"representative_queue" : StateTable(
"representative_queue", "Representative Queue", [
("id", "ID"),
("user_id", "User ID"),
("username", "Username"),
("private_display_name", "Private Display Name"),
("public_display_name", "Public Display Name"),
("routing_available", "Routing Available"),
("routing_idle", "Routing Idle"),
("routing_busy", "Routing Busy"),
("routing_enabled", "Routing Enabled"),
("skill_code_names", "Skill Code Names"),
("queue_id", "Queue ID"),
("created_timestamp", "Created Timestamp")
]),
"representative_support_session" : StateTable(
"representative_support_session", "Representative Support Session", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("representative_id", "Representative ID"),
("created_timestamp", "Created Timestamp")
])
}
import json
import websocket
class State:
DISCONNECTED = -1
APP_CHOSER = 0
AUTHENTICATE = 1
SUBSCRIBE = 2
UPDATES = 3
class Client:
def __init__(self, site, company, enable_trace=True):
self.state = State.DISCONNECTED
self.site = site
self.company = company
self.enable_trace = enable_trace
def on_open(self, ws):
self.state = State.APP_CHOSER
ws.send("NS01" + self.company + "\ningredi state api\n")
def on_close(self, ws):
self.disconnect()
raise Exception("connection closed")
def on_error(self, ws, error):
raise Exception(str(error))
def on_message(self, ws, message):
message = message.decode('utf-8', 'ignore')
if self.state == State.APP_CHOSER:
if message != "0 Application chosen\n":
raise Exception("Application choser failed")
auth = {
'type': "authenticate",
'credentials': {
'username': "kgaipal",
'password': "password"
}
}
ws.send(json.dumps(auth) + "\n")
self.state = State.AUTHENTICATE
elif self.state == State.AUTHENTICATE:
resp = json.loads(message)
if resp["success"] != True:
raise Exception("Authentication failed")
subscription = {
'type': "subscribe",
'tables': "all"
}
ws.send(json.dumps(subscription) + "\n")
self.state = State.SUBSCRIBE
elif self.state == State.SUBSCRIBE:
resp = json.loads(message)
if resp["success"] != True:
raise Exception("Subscription failed")
self.state = State.UPDATES
elif self.state == State.UPDATES:
model_update = json.loads(message)
updated = False
if model_update["type"] == "update_model":
if model_update.has_key("insert"):
self.parse_inserts(model_update["insert"])
updated = True
if model_update.has_key("update"):
self.parse_updates(model_update["update"])
updated = True
if model_update.has_key("delete"):
self.parse_deletes(model_update["delete"])
updated = True
elif model_update["type"] == "truncate_model":
for table in STATE_TABLES.values():
table.truncate()
updated = True
if updated:
print "\n"
self.printAllTables()
else:
raise Exception("Unkown state: " + str(self.state))
def printAllTables(self):
for table in STATE_TABLES.values():
if table is not None:
print str(table) + "\n**"
else:
print "<empty>\n**"
def parse_inserts(self, msg):
self.parse_updates(msg) # same structure
def parse_updates(self, msg):
for table in msg:
for recId, record in msg[table].items():
for field, value in record.items():
STATE_TABLES[table].updateRecordField(recId, field, value)
def parse_deletes(self, msg):
for table in msg:
for recId in msg[table]:
print "KGAIPAL: " + str(recId)
STATE_TABLES[table].deleteRecord(recId)
def connect(self):
if self.state != State.DISCONNECTED:
self.disconnect()
# start new connection
websocket.enableTrace(self.enable_trace)
ws = websocket.WebSocketApp(
"wss://" + self.site + "/nw",
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close,
on_open = self.on_open)
ws.run_forever()
def disconnect(self):
if self.state == State.DISCONNECTED:
raise Exception("already disconnected")
self.state = State.DISCONNECTED
print "disconnected"
if __name__ == "__main__":
try:
c = Client("kgaipalrtd.dev.bomgar.com", "kgaipalrtd", False)
c.connect()
except Exception, e:
print str(e)
c.disconnect()
| gpl-3.0 | -3,622,807,888,267,472,400 | 32.215278 | 78 | 0.523625 | false |
MrYsLab/python_banyan | projects/OneGPIO/arduino_uno/arduino_gateway.py | 1 | 22028 | """
Copyright (c) 2018-2019 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import asyncio
import logging
import pathlib
import signal
import sys
from pymata_express.private_constants import PrivateConstants
from pymata_express.pymata_express import PymataExpress
from python_banyan.gateway_base_aio import GatewayBaseAIO
# noinspection PyAbstractClass,PyMethodMayBeStatic,PyRedundantParentheses,DuplicatedCode
class ArduinoGateway(GatewayBaseAIO):
# This class implements the GatewayBase interface adapted for asyncio.
# It supports Arduino boards, tested with Uno.
# NOTE: This class requires the use of Python 3.7 or above
# serial_port = None
def __init__(self, *subscriber_list, back_plane_ip_address=None,
subscriber_port='43125',
publisher_port='43124', process_name='ArduinoGateway',
event_loop=None, keep_alive=False, com_port=None,
arduino_instance_id=None, log=False):
"""
Set up the gateway for operation
:param subscriber_list: a tuple or list of subscription topics.
:param back_plane_ip_address: ip address of backplane or none if local
:param subscriber_port: backplane subscriber port
:param publisher_port: backplane publisher port
:param process_name: name to display on the console
:param event_loop: optional parameter to pass in an asyncio
event loop
:param keep_alive: if True, enable FirmataExpress keep-alives
:param com_port: force pymata-express to use this comport
:param arduino_instance: set an arduino instance id that must
be programmed into the FirmataExpress
sketch.
:param log: enable logging
"""
# set up logging if requested
self.log = log
if self.log:
fn = str(pathlib.Path.home()) + "/ardgw.log"
self.logger = logging.getLogger(__name__)
logging.basicConfig(filename=fn, filemode='w', level=logging.DEBUG)
sys.excepthook = self.my_handler
# set the event loop to be used. accept user's if provided
self.event_loop = event_loop
# instantiate pymata express to control the arduino
# if user want to pass in a com port, then pass it in
try:
if com_port:
self.arduino = PymataExpress(loop=self.event_loop,
com_port=com_port)
# if user wants to set an instance id, then pass it in
elif arduino_instance_id:
self.arduino = PymataExpress(loop=self.event_loop,
arduino_instance_id=arduino_instance_id)
# default settings
else:
self.arduino = PymataExpress(loop=self.event_loop)
except RuntimeError:
if self.log:
logging.exception("Exception occurred", exc_info=True)
raise
# extract pin info from self.arduino
self.number_of_digital_pins = len(self.arduino.digital_pins)
self.number_of_analog_pins = len(self.arduino.analog_pins)
self.first_analog_pin = self.arduino.first_analog_pin
# Initialize the parent
super(ArduinoGateway, self).__init__(subscriber_list=subscriber_list,
event_loop=self.event_loop,
back_plane_ip_address=back_plane_ip_address,
subscriber_port=subscriber_port,
publisher_port=publisher_port,
process_name=process_name,
)
self.first_analog_pin = self.arduino.first_analog_pin
self.keep_alive = keep_alive
def init_pins_dictionary(self):
"""
This method will initialize the pins dictionary contained
in gateway base parent class. This method is called by
the gateway base parent in its init method.
NOTE: that this a a non-asyncio method.
"""
report = self.event_loop.run_until_complete(self.arduino.get_capability_report())
x = 0
pin = 0
while x < len(report):
while report[x] != 127:
mode = report[x]
if mode == PrivateConstants.INPUT:
self.pins_dictionary[pin] = \
[GatewayBaseAIO.DIGITAL_INPUT_MODE, 0, False]
elif mode == PrivateConstants.ANALOG:
self.pins_dictionary[pin + self.first_analog_pin] = \
[GatewayBaseAIO.ANALOG_INPUT_MODE, 0, False]
x += 1
x += 1
pin += 1
# set up entry for i2c as pin 200 ( a pseudo pin number)
self.pins_dictionary[200] = GatewayBaseAIO.DIGITAL_INPUT_MODE
async def main(self):
# call the inherited begin method located in banyan_base_aio
await self.begin()
# start the keep alive on the Arduino if enabled
if self.keep_alive:
await self.arduino.keep_alive()
# sit in an endless loop to receive protocol messages
while True:
await self.receive_loop()
# The following methods and are called
# by the gateway base class in its incoming_message_processing
# method. They overwrite the default methods in the gateway_base.
async def digital_write(self, topic, payload):
"""
This method performs a digital write
:param topic: message topic
:param payload: {"command": "digital_write", "pin": “PIN”, "value": “VALUE”}
"""
await self.arduino.digital_write(payload["pin"], payload['value'])
async def disable_analog_reporting(self, topic, payload):
"""
This method disables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_analog_reporting(payload["pin"])
async def disable_digital_reporting(self, topic, payload):
"""
This method disables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_digital_reporting(payload["pin"])
async def enable_analog_reporting(self, topic, payload):
"""
This method enables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_analog_reporting(payload["pin"])
async def enable_digital_reporting(self, topic, payload):
"""
This method enables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_digital_reporting(payload["pin"])
async def i2c_read(self, topic, payload):
"""
This method will perform an i2c read by specifying the i2c
device address, i2c device register and the number of bytes
to read.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_read", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"number_of_bytes": “NUMBER OF BYTES”}
:return via the i2c_callback method
"""
await self.arduino.i2c_read(payload['addr'],
payload['register'],
payload['number_of_bytes'], callback=self.i2c_callback)
async def i2c_write(self, topic, payload):
"""
This method will perform an i2c write for the i2c device with
the specified i2c device address, i2c register and a list of byte
to write.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_write", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"data": [“DATA IN LIST FORM”]}
"""
await self.arduino.i2c_write(payload['addr'], payload['data'])
async def play_tone(self, topic, payload):
"""
This method plays a tone on a piezo device connected to the selected
pin at the frequency and duration requested.
Frequency is in hz and duration in milliseconds.
Call set_mode_tone before using this method.
:param topic: message topic
:param payload: {"command": "play_tone", "pin": “PIN”, "tag": "TAG",
“freq”: ”FREQUENCY”, duration: “DURATION”}
"""
await self.arduino.play_tone(payload['pin'],
payload['freq'],
payload['duration'])
async def pwm_write(self, topic, payload):
"""
This method sets the pwm value for the selected pin.
Call set_mode_pwm before calling this method.
:param topic: message topic
:param payload: {“command”: “pwm_write”, "pin": “PIN”,
"tag":”TAG”,
“value”: “VALUE”}
"""
await self.arduino.analog_write(payload["pin"], payload['value'])
async def servo_position(self, topic, payload):
"""
This method will set a servo's position in degrees.
Call set_mode_servo first to activate the pin for
servo operation.
:param topic: message topic
:param payload: {'command': 'servo_position',
"pin": “PIN”,'tag': 'servo',
“position”: “POSITION”}
"""
await self.arduino.servo_write(payload["pin"], payload["position"])
async def set_mode_analog_input(self, topic, payload):
"""
This method sets a GPIO pin as analog input.
:param topic: message topic
:param payload: {"command": "set_mode_analog_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin + self.first_analog_pin][GatewayBaseAIO.PIN_MODE] = \
GatewayBaseAIO.ANALOG_INPUT_MODE
await self.arduino.set_pin_mode_analog_input(pin, self.analog_input_callback)
async def set_mode_digital_input(self, topic, payload):
"""
This method sets a pin as digital input.
:param topic: message topic
:param payload: {"command": "set_mode_digital_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_MODE
await self.arduino.set_pin_mode_digital_input(pin, self.digital_input_callback)
async def set_mode_digital_input_pullup(self, topic, payload):
"""
This method sets a pin as digital input with pull up enabled.
:param topic: message topic
:param payload: message payload
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_PULLUP_MODE
await self.arduino.set_pin_mode_digital_input_pullup(pin, self.digital_input_callback)
async def set_mode_digital_output(self, topic, payload):
"""
This method sets a pin as a digital output pin.
:param topic: message topic
:param payload: {"command": "set_mode_digital_output", "pin": PIN, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_OUTPUT_MODE
await self.arduino.set_pin_mode_digital_output(pin)
async def set_mode_i2c(self, topic, payload):
"""
This method sets up the i2c pins for i2c operations.
:param topic: message topic
:param payload: {"command": "set_mode_i2c"}
"""
self.pins_dictionary[200][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.I2C_MODE
await self.arduino.set_pin_mode_i2c()
async def set_mode_pwm(self, topic, payload):
"""
This method sets a GPIO pin capable of PWM for PWM operation.
:param topic: message topic
:param payload: {"command": "set_mode_pwm", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.PWM_OUTPUT_MODE
await self.arduino.set_pin_mode_pwm(pin)
async def set_mode_servo(self, topic, payload):
"""
This method establishes a GPIO pin for servo operation.
:param topic: message topic
:param payload: {"command": "set_mode_servo", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SERVO_MODE
await self.arduino.set_pin_mode_servo(pin)
async def set_mode_sonar(self, topic, payload):
"""
This method sets the trigger and echo pins for sonar operation.
:param topic: message topic
:param payload: {"command": "set_mode_sonar", "trigger_pin": “PIN”, "tag":”TAG”
"echo_pin": “PIN”"tag":”TAG” }
"""
trigger = payload["trigger_pin"]
echo = payload["echo_pin"]
self.pins_dictionary[trigger][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
self.pins_dictionary[echo][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
await self.arduino.set_pin_mode_sonar(trigger, echo, callback=self.sonar_callback)
async def set_mode_stepper(self, topic, payload):
"""
This method establishes either 2 or 4 GPIO pins to be used in stepper
motor operation.
:param topic:
:param payload:{"command": "set_mode_stepper", "pins": [“PINS”],
"steps_per_revolution": “NUMBER OF STEPS”}
"""
for pin in payload['pins']:
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.STEPPER_MODE
await self.arduino.set_pin_mode_stepper(payload['steps_per_revolution'],
payload['pins'])
async def set_mode_tone(self, topic, payload):
"""
Establish a GPIO pin for tone operation.
:param topic:
:param payload:{"command": "set_mode_tone", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.TONE_MODE
await self.arduino.set_pin_mode_tone(pin)
async def stepper_write(self, topic, payload):
"""
Move a stepper motor for the specified number of steps.
:param topic:
:param payload: {"command": "stepper_write", "motor_speed": “SPEED”,
"number_of_steps":”NUMBER OF STEPS” }
"""
await self.arduino.stepper_write(payload['motor_speed'],
payload['number_of_steps'])
# Callbacks
async def digital_input_callback(self, data):
"""
Digital input data change reported by Arduino
:param data:
:return:
"""
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'digital_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def analog_input_callback(self, data):
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1] + self.arduino.first_analog_pin][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'analog_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def i2c_callback(self, data):
"""
Analog input data change reported by Arduino
:param data:
:return:
"""
# creat a string representation of the data returned
self.pins_dictionary[200] = data[1]
report = ', '.join([str(elem) for elem in data])
payload = {'report': 'i2c_data', 'value': report}
await self.publish_payload(payload, 'from_arduino_gateway')
async def sonar_callback(self, data):
"""
Sonar data change reported by Arduino
:param data:
:return:
"""
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'sonar_data', 'value': data[2]}
await self.publish_payload(payload, 'from_arduino_gateway')
def my_handler(self, tp, value, tb):
"""
for logging uncaught exceptions
:param tp:
:param value:
:param tb:
:return:
"""
self.logger.exception("Uncaught exception: {0}".format(str(value)))
# noinspection DuplicatedCode
def arduino_gateway():
# allow user to bypass the IP address auto-discovery. This is necessary if the component resides on a computer
# other than the computing running the backplane.
parser = argparse.ArgumentParser()
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-c", dest="com_port", default="None",
help="Use this COM port instead of auto discovery")
parser.add_argument("-k", dest="keep_alive", default="True",
help="Enable firmata-express keep-alive - set to True or False - default=False")
parser.add_argument("-i", dest="arduino_instance_id", default="None",
help="Set an Arduino Instance ID and match it in FirmataExpress")
parser.add_argument("-l", dest="log", default="False",
help="Set to True to turn logging on.")
parser.add_argument("-m", dest="subscriber_list",
default="to_arduino_gateway", nargs='+',
help="Banyan topics space delimited: topic1 topic2 topic3")
parser.add_argument("-n", dest="process_name",
default="ArduinoGateway", help="Set process name in "
"banner")
parser.add_argument("-p", dest="publisher_port", default='43124',
help="Publisher IP port")
parser.add_argument("-r", dest="publisher_topic",
default="from_rpi_gpio", help="Report topic")
parser.add_argument("-s", dest="subscriber_port", default='43125',
help="Subscriber IP port")
args = parser.parse_args()
subscriber_list = args.subscriber_list
kw_options = {
'publisher_port': args.publisher_port,
'subscriber_port': args.subscriber_port,
'process_name': args.process_name,
}
keep_alive = args.keep_alive.lower()
if keep_alive == 'false':
keep_alive = False
else:
keep_alive = True
kw_options['keep_alive'] = keep_alive
log = args.log.lower()
if log == 'false':
log = False
else:
log = True
kw_options['log'] = log
if args.back_plane_ip_address != 'None':
kw_options['back_plane_ip_address'] = args.back_plane_ip_address
if args.com_port != 'None':
kw_options['com_port'] = args.com_port
if args.arduino_instance_id != 'None':
kw_options['arduino_instance_id'] = int(args.arduino_instance_id)
# get the event loop
# this is for python 3.8
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.get_event_loop()
# replace with the name of your class
app = ArduinoGateway(subscriber_list, **kw_options, event_loop=loop)
try:
loop.run_until_complete(app.main())
except (KeyboardInterrupt, asyncio.CancelledError, RuntimeError):
if app.log:
logging.exception("Exception occurred", exc_info=True)
loop.stop()
loop.close()
sys.exit(0)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal
def signal_handler(sig, frame):
print('Exiting Through Signal Handler')
raise KeyboardInterrupt
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
arduino_gateway()
| agpl-3.0 | 5,566,145,373,664,180,000 | 39.913858 | 114 | 0.597171 | false |
dwbro1/SS-Reporter | rpiGPIOActuator.py | 1 | 1544 | """
Purpose: Changes the state of the configured pin on command
"""
import sys
import time
import RPi.GPIO as GPIO
class rpiGPIOActuator:
"""Represents an actuator connected to a GPIO pin"""
def __init__(self, connection, logger, params):
"""Sets the output and changes its state when it receives a command"""
self.logger = logger
self.pin = int(params("Pin"))
GPIO.setmode(GPIO.BCM) # uses BCM numbering, not Board numbering
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.HIGH)
self.destination = params("Topic")
self.connection = connection
self.toggle = bool(params("Toggle"))
self.logger.info('----------Configuring rpiGPIOActuator: pin {0} on destination {1} with toggle {2}'.format(self.pin, self.destination, self.toggle))
self.connection.register(self.destination, self.on_message)
def on_message(self, client, userdata, msg):
"""Process a message"""
self.logger.info('Received command on {0}: {1} Toggle = {2} PIN = {3}'.format(self.destination, msg.payload, self.toggle, self.pin))
if self.toggle == "True":
self.logger.info('Toggling pin %s HIGH to LOW' % (self.pin))
GPIO.output(self.pin, GPIO.LOW)
time.sleep(.5)
GPIO.output(self.pin, GPIO.HIGH)
self.logger.info('Toggling pin %s LOW to HIGH' % (self.pin))
else:
out = GPIO.LOW if msg.payload == "ON" else GPIO.HIGH
GPIO.output(self.pin, out)
| apache-2.0 | -3,478,561,868,706,700,300 | 35.761905 | 157 | 0.620466 | false |
chrismcginlay/crazy-koala | word_search.py | 1 | 2311 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 28 09:55:21 2016
@author: chrismcginlay
"""
grid = [
list("SYNTAXQWERT"),
list("GHFPOSTKDSK"),
list("LKJHCVNBVYR"),
list("CCCBIWUISKT"),
list("LKTSOPSHDER"),
list("XZPOSTSEIGU"),
]
for row in grid:
row.insert(0,"*")
row.append("*")
width = len(grid[0])+2
grid.insert(0,list("*"*width))
grid.append(list("*"*width))
target = "POST"
letter1 = target[0]
letter2 = target[1]
row_index = 0
pass1_loci = list()
pass2_loci = list()
#get all occurences of letter1, place in list of (col, row) tuples
for row in grid:
row_loci = [i for i,x in enumerate(row) if x==letter1]
for locus in row_loci:
pass1_loci.append((locus, row_index))
row_index+=1
#pass2_loci - search box around letter1, construct list of tuples
for locus1 in pass1_loci:
pass2_loci = list()
L_one_c = locus1[0]
L_one_r = locus1[1]
#in the following note grid[r][c] -vs- pass2_loci((c,r)) transposed rc
if grid[L_one_r-1][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1, L_one_r-1))
if grid[L_one_r][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1,L_one_r))
if grid[L_one_r+1][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1,L_one_r+1))
if grid[L_one_r+1][L_one_c]==letter2:
pass2_loci.append((L_one_c,L_one_r+1))
if grid[L_one_r+1][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r+1))
if grid[L_one_r][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r))
if grid[L_one_r-1][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r-1))
if grid[L_one_r-1][L_one_c]==letter2:
pass2_loci.append((L_one_c,L_one_r-1))
for locus2 in pass2_loci:
#vector index order r,c to match grid
vector = (locus2[1]-L_one_r, locus2[0]-L_one_c)
#use vector to search for rest of target
target_found = False
#start from locus of second letter
r = locus2[1]
c = locus2[0]
for ch in target[2:]:
r+=vector[0]
c+=vector[1]
if grid[r][c]==ch:
target_found = True
else:
target_found = False
break
if target_found:
print("Found the target")
| gpl-3.0 | -3,462,465,035,189,730,000 | 29.012987 | 74 | 0.566421 | false |
ferdkuh/rlkart | src/main.py | 1 | 1996 | # create timestep counter N and network
# create environments and learner agents
# main loop:
# for t in range(0, max_episode_size):
# get a_t[], v_t[] from network for the state of each agent:
# convert a_t to a single action index
#
# parallel for i in range(0, num_agents)
# new_state, reward = perform a_t[i] in environment[i]
#
# estimate R_tmax+1 for each agent
# compute R_t for each agent
#
# train network
# needed
# network # the neural network ops
# states # array of states [N,84,84,4], shared memory
# shared_action_indices # array of int shape = [N]
# num_agents = 16
# max_episode_size = 30
# agent_manager = 0
# for t in range(0, max_episode_size):
# ops = [network.policy_out, network.value_out]
# feed_dict = { network.states: states }
# # policy_out has shape [num_agents, num_actions]
# # value out has shape [num_agents]
# policy_out, value_out = session.run(ops, feed_dict)
# # get one action index for each agent, write them to the shared memory
# shared_action_indices = sample_action_from_policy(policy_out)
# # run each environment for one timestep
# # blocks current until update is done
# agent_manager.update_agents()
# # copy results from shared array to episode buffer
import multiprocessing as mp
import numpy as np
import ctypes as C
import mariokart
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)s) %(message)s',)
def sample_action_from_policy(probabilities):
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
# as seen in: https://github.com/Alfredvc/paac/blob/master/paac.py
probabilities -= np.finfo(np.float32).epsneg
action_indices = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probabilities]
return action_indices
# where can this function live?
#return np.frombuffer(shared, dtype).reshape(shape)
NUM_ACTIONS = 8
ROM_PATH = r"../res/Mario Kart 64 (U) [!].z64"
| gpl-3.0 | -3,481,168,514,002,871,000 | 25.972973 | 90 | 0.700401 | false |
jriehl/numba | numba/targets/hashing.py | 1 | 23337 | """
Hash implementations for Numba types
"""
from __future__ import print_function, absolute_import, division
import math
import numpy as np
import sys
import ctypes
from collections import namedtuple
import llvmlite.binding as ll
import llvmlite.llvmpy.core as lc
from llvmlite import ir
from numba.extending import (
overload, overload_method, intrinsic, register_jitable)
from numba import types, errors
from numba.unsafe.bytes import grab_byte, grab_uint64_t
_py34_or_later = sys.version_info[:2] >= (3, 4)
if _py34_or_later:
# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96
_hash_width = sys.hash_info.width
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# Constants from CPython source, obtained by various means:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus)
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = sys.int_info.bits_per_digit
_Py_HASH_CUTOFF = sys.hash_info.cutoff
_Py_hashfunc_name = sys.hash_info.algorithm
else:
_hash_width = types.intp.bitwidth
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# these are largely just copied in from python 3 as reasonable defaults
_PyHASH_INF = 314159
_PyHASH_NAN = 0
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MODULUS = _Py_uhash_t((1 << _PyHASH_BITS) - 1)
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = 30 if types.intp.bitwidth == 64 else 15
_Py_HASH_CUTOFF = 0
# set this as siphash24 for py27... TODO: implement py27 string first!
_Py_hashfunc_name = "siphash24"
# hash(obj) is implemented by calling obj.__hash__()
@overload(hash)
def hash_overload(obj):
def impl(obj):
return obj.__hash__()
return impl
@register_jitable
def process_return(val):
asint = _Py_hash_t(val)
if (asint == int(-1)):
asint = int(-2)
return asint
# This is a translation of CPython's _Py_HashDouble:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_uhash_t,
'm': types.double,
'e': types.intc,
'sign': types.intc,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.intc})
def _Py_HashDouble(v):
if not np.isfinite(v):
if (np.isinf(v)):
if (v > 0):
return _PyHASH_INF
else:
return -_PyHASH_INF
else:
return _PyHASH_NAN
m, e = math.frexp(v)
sign = 1
if (m < 0):
sign = -1
m = -m
# process 28 bits at a time; this should work well both for binary
# and hexadecimal floating point.
x = 0
while (m):
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28)
m *= 268435456.0 # /* 2**28 */
e -= 28
y = int(m) # /* pull out integer part */
m -= y
x += y
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
# /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
if e >= 0:
e = e % _PyHASH_BITS
else:
e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS)
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e)
x = x * sign
return process_return(x)
@intrinsic
def _fpext(tyctx, val):
def impl(cgctx, builder, signature, args):
val = args[0]
return builder.fpext(val, lc.Type.double())
sig = types.float64(types.float32)
return sig, impl
# This is a translation of CPython's long_hash, but restricted to the numerical
# domain reachable by int64/uint64 (i.e. no BigInt like support):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989
# obdigit is a uint32_t which is typedef'd to digit
# int32_t is typedef'd to sdigit
@register_jitable(locals={'x': _Py_uhash_t,
'p1': _Py_uhash_t,
'p2': _Py_uhash_t,
'p3': _Py_uhash_t,
'p4': _Py_uhash_t,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.int32,
'_PyLong_SHIFT': types.int32,
'x.1': _Py_uhash_t})
def _long_impl(val):
# This function assumes val came from a long int repr with val being a
# uint64_t this means having to split the input into PyLong_SHIFT size
# chunks in an unsigned hash wide type, max numba can handle is a 64bit int
# mask to select low _PyLong_SHIFT bits
_tmp_shift = 32 - _PyLong_SHIFT
mask_shift = (~types.uint32(0x0)) >> _tmp_shift
# a 64bit wide max means Numba only needs 3 x 30 bit values max,
# or 5 x 15 bit values max on 32bit platforms
i = (64 // _PyLong_SHIFT) + 1
# alg as per hash_long
x = 0
p3 = (_PyHASH_BITS - _PyLong_SHIFT)
for idx in range(i - 1, -1, -1):
p1 = x << _PyLong_SHIFT
p2 = p1 & _PyHASH_MODULUS
p4 = x >> p3
x = p2 | p4
# the shift and mask splits out the `ob_digit` parts of a Long repr
x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift)
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
return _Py_hash_t(x)
# This has no CPython equivalent, CPython uses long_hash.
@overload_method(types.Integer, '__hash__')
@overload_method(types.Boolean, '__hash__')
def int_hash(val):
_HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4
# this is a bit involved due to the CPython repr of ints
def impl(val):
# If the magnitude is under PyHASH_MODULUS, if so just return the
# value itval as the has, couple of special cases if val == val:
# 1. it's 0, in which case return 0
# 2. it's signed int minimum value, return the value CPython computes
# but Numba cannot as there's no type wide enough to hold the shifts.
#
# If the magnitude is greater than PyHASH_MODULUS then... if the value
# is negative then negate it switch the sign on the hash once computed
# and use the standard wide unsigned hash implementation
mag = abs(val)
if mag < _PyHASH_MODULUS:
if val == -val:
if val == 0:
ret = 0
else: # int64 min, -0x8000000000000000
ret = _Py_hash_t(_HASH_I64_MIN)
else:
ret = _Py_hash_t(val)
else:
needs_negate = False
if val < 0:
val = -val
needs_negate = True
ret = _long_impl(val)
if needs_negate:
ret = -ret
return process_return(ret)
return impl
# This is a translation of CPython's float_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532
@overload_method(types.Float, '__hash__')
def float_hash(val):
if val.bitwidth == 64:
def impl(val):
hashed = _Py_HashDouble(val)
return hashed
else:
def impl(val):
# widen the 32bit float to 64bit
fpextended = np.float64(_fpext(val))
hashed = _Py_HashDouble(fpextended)
return hashed
return impl
# This is a translation of CPython's complex_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428
@overload_method(types.Complex, '__hash__')
def complex_hash(val):
def impl(val):
hashreal = hash(val.real)
hashimag = hash(val.imag)
# Note: if the imaginary part is 0, hashimag is 0 now,
# so the following returns hashreal unchanged. This is
# important because numbers of different types that
# compare equal must have the same hash value, so that
# hash(x + 0*j) must equal hash(x).
combined = hashreal + _PyHASH_IMAG * hashimag
return process_return(combined)
return impl
# This is a translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_hash_t,
'mult': _Py_uhash_t,
'l': _Py_hash_t, })
def _tuple_hash(tup):
tl = len(tup)
mult = _PyHASH_MULTIPLIER
x = _Py_uhash_t(0x345678)
# in C this is while(--l >= 0), i is indexing tup instead of *tup++
for i, l in enumerate(range(tl - 1, -1, -1)):
y = hash(tup[i])
xxory = (x ^ y)
x = xxory * mult
mult += _Py_hash_t((_Py_uhash_t(82520) + l + l))
x += _Py_uhash_t(97531)
return process_return(x)
# This is an obfuscated translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369
# The obfuscation occurs for a heterogeneous tuple as each tuple member needs
# a potentially different hash() function calling for it. This cannot be done at
# runtime as there's no way to iterate a heterogeneous tuple, so this is
# achieved by essentially unrolling the loop over the members and inserting a
# per-type hash function call for each member, and then simply computing the
# hash value in an inlined/rolling fashion.
@intrinsic
def _tuple_hash_resolve(tyctx, val):
def impl(cgctx, builder, signature, args):
typingctx = cgctx.typing_context
fnty = typingctx.resolve_value_type(hash)
tupty, = signature.args
tup, = args
lty = cgctx.get_value_type(signature.return_type)
x = ir.Constant(lty, 0x345678)
mult = ir.Constant(lty, _PyHASH_MULTIPLIER)
shift = ir.Constant(lty, 82520)
tl = len(tupty)
for i, packed in enumerate(zip(tupty.types, range(tl - 1, -1, -1))):
ty, l = packed
sig = fnty.get_call_type(tyctx, (ty,), {})
impl = cgctx.get_function(fnty, sig)
tuple_val = builder.extract_value(tup, i)
y = impl(builder, (tuple_val,))
xxory = builder.xor(x, y)
x = builder.mul(xxory, mult)
lconst = ir.Constant(lty, l)
mult = builder.add(mult, shift)
mult = builder.add(mult, lconst)
mult = builder.add(mult, lconst)
x = builder.add(x, ir.Constant(lty, 97531))
return x
sig = _Py_hash_t(val)
return sig, impl
@overload_method(types.BaseTuple, '__hash__')
def tuple_hash(val):
if isinstance(val, types.Sequence):
def impl(val):
return _tuple_hash(val)
return impl
else:
def impl(val):
hashed = _Py_hash_t(_tuple_hash_resolve(val))
return process_return(hashed)
return impl
# ------------------------------------------------------------------------------
# String/bytes hashing needs hashseed info, this is from:
# https://stackoverflow.com/a/41088757
# with thanks to Martijn Pieters
#
# Developer note:
# CPython makes use of an internal "hashsecret" which is essentially a struct
# containing some state that is set on CPython initialization and contains magic
# numbers used particularly in unicode/string hashing. This code binds to the
# Python runtime libraries in use by the current process and reads the
# "hashsecret" state so that it can be used by Numba. As this is done at runtime
# the behaviour and influence of the PYTHONHASHSEED environment variable is
# accommodated.
from ctypes import ( # noqa
c_size_t,
c_ubyte,
c_uint64,
pythonapi,
Structure,
Union,
) # noqa
class FNV(Structure):
_fields_ = [
('prefix', c_size_t),
('suffix', c_size_t)
]
class SIPHASH(Structure):
_fields_ = [
('k0', c_uint64),
('k1', c_uint64),
]
class DJBX33A(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('suffix', c_size_t),
]
class EXPAT(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('hashsalt', c_size_t),
]
class _Py_HashSecret_t(Union):
_fields_ = [
# ensure 24 bytes
('uc', c_ubyte * 24),
# two Py_hash_t for FNV
('fnv', FNV),
# two uint64 for SipHash24
('siphash', SIPHASH),
# a different (!) Py_hash_t for small string optimization
('djbx33a', DJBX33A),
('expat', EXPAT),
]
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
# Only a few members are needed at present
def _build_hashsecret():
"""Read hash secret from the Python process
Returns
-------
info : dict
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
- values are the namedtuple[symbol:str, value:int]
"""
# Read hashsecret and inject it into the LLVM symbol map under the
# prefix `_numba_hashsecret_`.
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
info = {}
def inject(name, val):
symbol_name = "_numba_hashsecret_{}".format(name)
val = ctypes.c_uint64(val)
addr = ctypes.addressof(val)
ll.add_symbol(symbol_name, addr)
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
inject('siphash_k0', pyhashsecret.siphash.k0)
inject('siphash_k1', pyhashsecret.siphash.k1)
return info
_hashsecret = _build_hashsecret()
# ------------------------------------------------------------------------------
if _Py_hashfunc_name == 'siphash24':
# This is a translation of CPython's siphash24 function:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413
# /* *********************************************************************
# <MIT License>
# Copyright (c) 2013 Marek Majkowski <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </MIT License>
# Original location:
# https://github.com/majek/csiphash/
# Solution inspired by code from:
# Samuel Neves (supercop/crypto_auth/siphash24/little)
#djb (supercop/crypto_auth/siphash24/little2)
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
# Modified for Python by Christian Heimes:
# - C89 / MSVC compatibility
# - _rotl64() on Windows
# - letoh64() fallback
# */
@register_jitable(locals={'x': types.uint64,
'b': types.uint64, })
def _ROTATE(x, b):
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
@register_jitable(locals={'a': types.uint64,
'b': types.uint64,
'c': types.uint64,
'd': types.uint64,
's': types.uint64,
't': types.uint64, })
def _HALF_ROUND(a, b, c, d, s, t):
a += b
c += d
b = _ROTATE(b, s) ^ a
d = _ROTATE(d, t) ^ c
a = _ROTATE(a, 32)
return a, b, c, d
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _DOUBLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
return v0, v1, v2, v3
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64,
'b': types.uint64,
'mi': types.uint64,
'tmp': types.Array(types.uint64, 1, 'C'),
't': types.uint64,
'mask': types.uint64,
'jmp': types.uint64,
'ohexefef': types.uint64})
def _siphash24(k0, k1, src, src_sz):
b = types.uint64(src_sz) << 56
v0 = k0 ^ types.uint64(0x736f6d6570736575)
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
v2 = k0 ^ types.uint64(0x6c7967656e657261)
v3 = k1 ^ types.uint64(0x7465646279746573)
idx = 0
while (src_sz >= 8):
mi = grab_uint64_t(src, idx)
idx += 1
src_sz -= 8
v3 ^= mi
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= mi
# this is the switch fallthrough:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400
t = types.uint64(0x0)
boffset = idx * 8
ohexefef = types.uint64(0xff)
if src_sz >= 7:
jmp = (6 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6)) << jmp)
if src_sz >= 6:
jmp = (5 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5)) << jmp)
if src_sz >= 5:
jmp = (4 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4)) << jmp)
if src_sz >= 4:
t &= types.uint64(0xffffffff00000000)
for i in range(4):
jmp = i * 8
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i)) << jmp)
if src_sz >= 3:
jmp = (2 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2)) << jmp)
if src_sz >= 2:
jmp = (1 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1)) << jmp)
if src_sz >= 1:
mask = ~(ohexefef)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
b |= t
v3 ^= b
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= b
v2 ^= ohexefef
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
t = (v0 ^ v1) ^ (v2 ^ v3)
return t
elif _Py_hashfunc_name == 'fnv':
#TODO: Should this instead warn and switch to siphash24?
raise NotImplementedError("FNV hashing is not implemented")
else:
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
raise ValueError(msg)
@intrinsic
def _inject_hashsecret_read(tyctx, name):
"""Emit code to load the hashsecret.
"""
if not isinstance(name, types.StringLiteral):
raise errors.TypingError("requires literal string")
sym = _hashsecret[name.literal_value].symbol
resty = types.uint64
sig = resty(name)
def impl(cgctx, builder, sig, args):
mod = builder.module
try:
# Search for existing global
gv = mod.get_global(sym)
except KeyError:
# Inject the symbol if not already exist.
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
v = builder.load(gv)
return v
return sig, impl
def _load_hashsecret(name):
return _hashsecret[name].value
@overload(_load_hashsecret)
def _impl_load_hashsecret(name):
def imp(name):
return _inject_hashsecret_read(name)
return imp
# This is a translation of CPythons's _Py_HashBytes:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191
@register_jitable(locals={'_hash': _Py_uhash_t})
def _Py_HashBytes(val, _len):
if (_len == 0):
return process_return(0)
if (_len < _Py_HASH_CUTOFF):
# TODO: this branch needs testing, needs a CPython setup for it!
# /* Optimize hashing of very small strings with inline DJBX33A. */
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
for idx in range(_len):
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
_hash ^= _len
_hash ^= _load_hashsecret('djbx33a_suffix')
else:
tmp = _siphash24(types.uint64(_load_hashsecret('siphash_k0')),
types.uint64(_load_hashsecret('siphash_k1')),
val, _len)
_hash = process_return(tmp)
return process_return(_hash)
# This is an approximate translation of CPython's unicode_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663
@overload_method(types.UnicodeType, '__hash__')
def unicode_hash(val):
from numba.unicode import _kind_to_byte_width
def impl(val):
kindwidth = _kind_to_byte_width(val._kind)
_len = len(val)
# use the cache if possible
current_hash = val._hash
if current_hash != -1:
return current_hash
else:
# cannot write hash value to cache in the unicode struct due to
# pass by value on the struct making the struct member immutable
return _Py_HashBytes(val._data, kindwidth * _len)
return impl
| bsd-2-clause | 3,617,599,016,980,870,000 | 34.305598 | 119 | 0.577624 | false |
ShipleyCollege/ViPteam1 | ExtractAndAnalyzeCode/ExtractNodes.py | 1 | 2439 | import os
import glob
import BuildNode
#INPUT_FILENAME = "../Sample Blueprint code/SimpleMoveToActor-Network - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnObjectsWithForLoop - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPoint - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPointV2 - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPointV3 - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/JISCTest1.txt";
#INPUT_FILENAME = "../Sample Blueprint code/ResetLevelAfterTimeout.txt";
INPUT_FILENAME = "../Sample Blueprint code/Randomly Spawn Sound Objects - Code.txt";
OUTPUT_FOLDER = "../GeneratedCode"
WORK_FOLDERNAME = "../GeneratedCode/Temp";
# function to remove temporary nodes extracted from network
def removeTempNodes():
if not os.path.exists(WORK_FOLDERNAME):
os.makedirs(WORK_FOLDERNAME)
files = glob.glob(WORK_FOLDERNAME + '/node*.txt')
for f in files:
os.remove(f)
def DoItNow(buildMode, filename, output_folder, debug=False):
global INPUT_FILENAME
INPUT_FILENAME = filename
global OUTPUT_FOLDER
OUTPUT_FOLDER = output_folder
global WORK_FOLDERNAME
WORK_FOLDERNAME = OUTPUT_FOLDER + "/Temp"
DoIt(buildMode, debug=debug)
def DoIt(buildMode, debug=False):
# make sure we start with an empty directory
removeTempNodes()
# break network into individual nodes
# - each node starts end ends with 'Begin' and 'End' in column 0
nodeNumber = 0
print("Reading network from " + INPUT_FILENAME)
if (not os.path.isfile(INPUT_FILENAME)) or (not os.path.exists(INPUT_FILENAME)):
print("======> Input File Not Found <======")
else:
print("======> Input File Found <=======")
with open(INPUT_FILENAME) as f:
content = f.readlines()
for line in content:
if (line[0:5] == "Begin"):
text_file = open(WORK_FOLDERNAME + "/node" + str(nodeNumber) + ".txt", "w")
text_file.write(line)
if (line[0:3] == "End"):
text_file.close()
nodeNumber += 1
nrc = 0
nodeNumber = 0
files = glob.glob(WORK_FOLDERNAME + '/node*.txt')
for f in files:
print("Calling BuildNode with [" + buildMode + ", " + f + "]")
if debug:
nrc += BuildNode.doIt(OUTPUT_FOLDER, buildMode, f, str(nodeNumber))
else:
nrc += BuildNode.doIt(OUTPUT_FOLDER, buildMode, f, "")
nodeNumber += 1
print("Nodes Extracted : " + str(nodeNumber))
print("Nodes not recognized : " + str(nrc))
# removeTempNodes()
| gpl-3.0 | 5,190,332,986,550,599,000 | 28.385542 | 84 | 0.696597 | false |
ucb-bar/bar-crawl-web | flower/utils/tasks.py | 1 | 2717 | from __future__ import absolute_import
import datetime
import time
from celery.events.state import Task
from .search import satisfies_search_terms
def iter_tasks(events, limit=None, type=None, worker=None, state=None,
sort_by=None, received_start=None, received_end=None,
started_start=None, started_end=None, search_terms=None,
jobid=None):
i = 0
tasks = events.state.tasks_by_timestamp()
if sort_by is not None:
tasks = sort_tasks(tasks, sort_by)
convert = lambda x: time.mktime(
datetime.datetime.strptime(x, '%Y-%m-%d %H:%M').timetuple()
)
search_terms = search_terms or {}
any_value_search_term = search_terms.get('any', None)
result_search_term = search_terms.get('result', None)
args_search_terms = search_terms.get('args', None)
kwargs_search_terms = search_terms.get('kwargs', None)
for uuid, task in tasks:
if type and task.name != type:
continue
if worker and task.worker and task.worker.hostname != worker:
continue
if state and task.state != state:
continue
if received_start and task.received and\
task.received < convert(received_start):
continue
if received_end and task.received and\
task.received > convert(received_end):
continue
if started_start and task.started and\
task.started < convert(started_start):
continue
if started_end and task.started and\
task.started > convert(started_end):
continue
if not satisfies_search_terms(task, any_value_search_term, result_search_term, args_search_terms, kwargs_search_terms):
continue
if jobid is not None and eval(task.as_dict()['args'])[2] != jobid:
continue
yield uuid, task
i += 1
if i == limit:
break
sort_keys = {'name': str, 'state': str, 'received': float, 'started': float}
def sort_tasks(tasks, sort_by):
assert sort_by.lstrip('-') in sort_keys
reverse = False
if sort_by.startswith('-'):
sort_by = sort_by.lstrip('-')
reverse = True
for task in sorted(tasks,
key=lambda x: getattr(x[1], sort_by) or sort_keys[sort_by](),
reverse=reverse):
yield task
def get_task_by_id(events, task_id):
if hasattr(Task, '_fields'): # Old version
return events.state.tasks.get(task_id)
else:
_fields = Task._defaults.keys()
task = events.state.tasks.get(task_id)
if task is not None:
task._fields = _fields
return task
| bsd-3-clause | 4,780,227,939,877,630,000 | 34.285714 | 127 | 0.591829 | false |
megaprojectske/megaprojects.co.ke | megaprojects/articles/migrations/0002_auto__add_field_image_reviewed__add_field_article_reviewed.py | 1 | 7695 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Image.reviewed'
db.add_column(u'articles_image', 'reviewed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Article.reviewed'
db.add_column(u'articles_article', 'reviewed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Image.reviewed'
db.delete_column(u'articles_image', 'reviewed')
# Deleting field 'Article.reviewed'
db.delete_column(u'articles_article', 'reviewed')
models = {
u'articles.article': {
'Meta': {'ordering': "['-pubdate']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'drupal_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'lead': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['programs.Program']", 'null': 'True', 'blank': 'True'}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 6, 5, 0, 0)'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'articles.image': {
'Meta': {'ordering': "['-article__pubdate', '-created']", 'object_name': 'Image'},
'alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'programs.program': {
'Meta': {'ordering': "['title']", 'object_name': 'Program'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lead': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['articles']
| apache-2.0 | 464,978,031,842,884,700 | 68.324324 | 187 | 0.548538 | false |
Smile-SA/odoo_extra | web_printscreen_zb/__openerp__.py | 1 | 1611 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 ZestyBeanz Technologies Pvt. Ltd.
# (http://wwww.zbeanztech.com)
# [email protected]
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Web Printscreen ZB',
'version': '1.0',
'category': 'Web',
'description': """
Module to export current active tree view in to excel report
Update Smile:
Fix native odoo xls export
""",
'author': 'Zesty Beanz Technologies',
'website': 'http://www.zbeanztech.com',
'depends': ['web'],
'data': ['views/web_printscreen_zb.xml'],
'qweb': ['static/src/xml/web_printscreen_export.xml'],
'installable': True,
'auto_install': False,
'web_preload': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 | -5,816,478,258,936,935,000 | 35.636364 | 78 | 0.599628 | false |
adezfouli/savigp | GP/grad_checker.py | 1 | 2303 | __author__ = 'AT'
from DerApproximator import get_d1
from numpy import concatenate
from texttable import Texttable
class GradChecker:
""" A class for checking gradients. """
def __init__(self):
pass
@staticmethod
def check(f, f_grad, x0, name, verbose=False):
"""
Checks whether gradients of function ``f`` at point x0 is same as the gradients provided by ``f_grad``.
``error`` is the difference between numerical and provided gradients.
'%error' = abs(error) / numerical gradient.
Parameters
----------
f : callable
input function to check gradients against
f_grad : callable
input function which provides gradients
x0 : ndarray
the point at which gradients should be calculated
name : list
a vector with the size of the number of parameters, which provides name for each parameter. This
name will be used when generating output table
verbose : boolean
whether to print output for each parameter separately
Returns
-------
avg : float
average of the percentage error over all the parameters, i.e., mean(%error)
"""
g = f_grad(x0)
if len(g) != len(x0):
raise Exception('dimensions mismatch')
table = Texttable()
table.set_cols_align(["l", "r", "c", "c", "c"])
table.set_cols_valign(["t", "m", "b" , "r", "c"])
rows = []
rows += [["Name ", "analytical ", "numerical ", "error ", "% error "]]
if verbose:
print 'dimensions:', len(x0)
aver_error = 0
for i in range(len(x0)):
def f_i(x):
return f((concatenate((x0[:i], x, x0[(i+1):]))))
t = get_d1(f_i, [x0[i]])
p_errro=None
if t != 0:
p_errro = abs(t-g[i]) / abs(t)
rows += [[name[i], g[i], t, abs(t-g[i]), p_errro]]
if abs(g[i]) <1e-4 and abs(t) < 1e-4:
pass
else:
aver_error += abs(t-g[i]) / abs(t)
if verbose:
print 'element:', i
table.add_rows(rows)
if verbose:
print(table.draw())
return aver_error / len(x0) | apache-2.0 | -3,464,575,333,593,513,000 | 31 | 111 | 0.520625 | false |
tensorflow/federated | tensorflow_federated/python/core/backends/mapreduce/forms.py | 1 | 16487 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standardized representation of logic deployable to MapReduce-like systems."""
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl.computation import computation_impl
from tensorflow_federated.python.core.impl.types import computation_types
def _check_tensorflow_computation(label, comp):
py_typecheck.check_type(comp, computation_base.Computation, label)
comp_proto = computation_impl.ComputationImpl.get_proto(comp)
which_comp = comp_proto.WhichOneof('computation')
if which_comp != 'tensorflow':
raise TypeError('Expected all computations supplied as arguments to '
'be plain TensorFlow, found {}.'.format(which_comp))
def _is_assignable_from_or_both_none(first, second):
if first is None:
return second is None
return first.is_assignable_from(second)
def _is_two_tuple(t: computation_types.Type) -> bool:
return t.is_struct() and len(t) == 2
def _check_accepts_two_tuple(label: str, comp: computation_base.Computation):
param_type = comp.type_signature.parameter
if not _is_two_tuple(param_type):
raise TypeError(
f'The `{label}` computation accepts a parameter of type\n{param_type}\n'
'that is not a two-tuple.')
def _check_returns_two_tuple(label: str, comp: computation_base.Computation):
result_type = comp.type_signature.result
if not _is_two_tuple(result_type):
raise TypeError(
f'The `{label}` computation returns a result of type\n{result_type}\n'
'that is not a two-tuple.')
class BroadcastForm(object):
"""Standardized representation of server-to-client logic.
This class is designed to represent computations of the form:
```
server_data_type = self.compute_server_context.type_signature.parameter
client_data_type = self.client_processing.type_signature.parameter[1]
@tff.federated_computation(server_data_type, client_data_type)
def _(server_data, client_data):
# Select out the bit of server context to send to the clients.
context_at_server = tff.federated_map(
self.compute_server_context, server_data)
# Broadcast the context to the clients.
context_at_clients = tff.federated_broadcast(context_at_server)
# Compute some value on the clients based on the server context and
# the client data.
return tff.federated_map(
self.client_processing, (context_at_clients, client_data))
```
"""
def __init__(self,
compute_server_context,
client_processing,
server_data_label=None,
client_data_label=None):
for label, comp in (
('compute_server_context', compute_server_context),
('client_processing', client_processing),
):
_check_tensorflow_computation(label, comp)
_check_accepts_two_tuple('client_processing', client_processing)
client_first_arg_type = client_processing.type_signature.parameter[0]
server_context_type = compute_server_context.type_signature.result
if not _is_assignable_from_or_both_none(client_first_arg_type,
server_context_type):
raise TypeError(
'The `client_processing` computation expects an argument tuple with '
f'type\n{client_first_arg_type}\nas the first element (the context '
'type from the server), which does not match the result type\n'
f'{server_context_type}\n of `compute_server_context`.')
self._compute_server_context = compute_server_context
self._client_processing = client_processing
if server_data_label is not None:
py_typecheck.check_type(server_data_label, str)
self._server_data_label = server_data_label
if client_data_label is not None:
py_typecheck.check_type(server_data_label, str)
self._client_data_label = client_data_label
@property
def compute_server_context(self):
return self._compute_server_context
@property
def client_processing(self):
return self._client_processing
@property
def server_data_label(self):
return self._server_data_label
@property
def client_data_label(self):
return self._client_data_label
def summary(self, print_fn=print):
"""Prints a string summary of the `BroadcastForm`.
Args:
print_fn: Print function to use. It will be called on each line of the
summary in order to capture the string summary.
"""
for label, comp in (
('compute_server_context', self.compute_server_context),
('client_processing', self.client_processing),
):
# Add sufficient padding to align first column;
# len('compute_server_context') == 22
print_fn('{:<22}: {}'.format(
label, comp.type_signature.compact_representation()))
class MapReduceForm(object):
"""Standardized representation of logic deployable to MapReduce-like systems.
This class docstring describes the purpose of `MapReduceForm` as a data
structure; for a discussion of the conceptual content of an instance `mrf` of
`MapReduceForm`, including how precisely it maps to a single federated round,
see the [package-level docstring](
https://www.tensorflow.org/federated/api_docs/python/tff/backends/mapreduce).
This standardized representation can be used to describe a range of iterative
processes representable as a single round of MapReduce-like processing, and
deployable to MapReduce-like systems that are only capable of executing plain
TensorFlow code.
Non-iterative processes, or processes that do not originate at the server can
be described by `MapReduceForm`, as well as degenerate cases like computations
which use exclusively one of the two possible aggregation paths.
Instances of this class can be generated by TFF's transformation pipeline and
consumed by a variety of backends that have the ability to orchestrate their
execution in a MapReduce-like fashion. The latter can include systems that run
static data pipelines such Apache Beam or Hadoop, but also platforms like that
which has been described in the following paper:
"Towards Federated Learning at Scale: System Design"
https://arxiv.org/pdf/1902.01046.pdf
It should be noted that not every computation that proceeds in synchronous
rounds is representable as an instance of this class. In particular, this
representation is not suitable for computations that involve multiple phases
of processing, and does not generalize to arbitrary static data pipelines.
Generalized representations that can take advantage of the full expressiveness
of Apache Beam-like systems may emerge at a later time, and will be supported
by a separate set of tools, with a more expressive canonical representation.
The requirement that the variable constituents of the template be in the form
of pure TensorFlow code (not arbitrary TFF constructs) reflects the intent
for instances of this class to be easily converted into a representation that
can be compiled into a system that does *not* have the ability to interpret
the full TFF language (as defined in `computation.proto`), but that does have
the ability to run TensorFlow. Client-side logic in such systems could be
deployed in a number of ways, e.g., as shards in a MapReduce job, to mobile or
embedded devices, etc.
The individual TensorFlow computations that constitute an iterative process
in this form are supplied as constructor arguments. Generally, this class will
not be instantiated by a programmer directly but targeted by a sequence of
transformations that take a `tff.templates.IterativeProcess` and produce the
appropriate pieces of logic.
"""
def __init__(self,
initialize,
prepare,
work,
zero,
accumulate,
merge,
report,
bitwidth,
update,
server_state_label=None,
client_data_label=None):
"""Constructs a representation of a MapReduce-like iterative process.
Note: All the computations supplied here as arguments must be TensorFlow
computations, i.e., instances of `tff.Computation` constructed by the
`tff.tf_computation` decorator/wrapper.
Args:
initialize: The computation that produces the initial server state.
prepare: The computation that prepares the input for the clients.
work: The client-side work computation.
zero: The computation that produces the initial state for accumulators.
accumulate: The computation that adds a client update to an accumulator.
merge: The computation to use for merging pairs of accumulators.
report: The computation that produces the final server-side aggregate for
the top level accumulator (the global update).
bitwidth: The computation that produces the bitwidth for secure sum.
update: The computation that takes the global update and the server state
and produces the new server state, as well as server-side output.
server_state_label: Optional string label for the server state.
client_data_label: Optional string label for the client data.
Raises:
TypeError: If the Python or TFF types of the arguments are invalid or not
compatible with each other.
AssertionError: If the manner in which the given TensorFlow computations
are represented by TFF does not match what this code is expecting (this
is an internal error that requires code update).
"""
for label, comp in (
('initialize', initialize),
('prepare', prepare),
('work', work),
('zero', zero),
('accumulate', accumulate),
('merge', merge),
('report', report),
('bitwidth', bitwidth),
('update', update),
):
_check_tensorflow_computation(label, comp)
prepare_arg_type = prepare.type_signature.parameter
init_result_type = initialize.type_signature.result
if not _is_assignable_from_or_both_none(prepare_arg_type, init_result_type):
raise TypeError(
'The `prepare` computation expects an argument of type {}, '
'which does not match the result type {} of `initialize`.'.format(
prepare_arg_type, init_result_type))
_check_accepts_two_tuple('work', work)
work_2nd_arg_type = work.type_signature.parameter[1]
prepare_result_type = prepare.type_signature.result
if not _is_assignable_from_or_both_none(work_2nd_arg_type,
prepare_result_type):
raise TypeError(
'The `work` computation expects an argument tuple with type {} as '
'the second element (the initial client state from the server), '
'which does not match the result type {} of `prepare`.'.format(
work_2nd_arg_type, prepare_result_type))
_check_returns_two_tuple('work', work)
py_typecheck.check_len(accumulate.type_signature.parameter, 2)
accumulate.type_signature.parameter[0].check_assignable_from(
zero.type_signature.result)
accumulate_2nd_arg_type = accumulate.type_signature.parameter[1]
work_client_update_type = work.type_signature.result[0]
if not _is_assignable_from_or_both_none(accumulate_2nd_arg_type,
work_client_update_type):
raise TypeError(
'The `accumulate` computation expects a second argument of type {}, '
'which does not match the expected {} as implied by the type '
'signature of `work`.'.format(accumulate_2nd_arg_type,
work_client_update_type))
accumulate.type_signature.parameter[0].check_assignable_from(
accumulate.type_signature.result)
py_typecheck.check_len(merge.type_signature.parameter, 2)
merge.type_signature.parameter[0].check_assignable_from(
accumulate.type_signature.result)
merge.type_signature.parameter[1].check_assignable_from(
accumulate.type_signature.result)
merge.type_signature.parameter[0].check_assignable_from(
merge.type_signature.result)
report.type_signature.parameter.check_assignable_from(
merge.type_signature.result)
expected_update_parameter_type = computation_types.to_type([
initialize.type_signature.result,
[report.type_signature.result, work.type_signature.result[1]],
])
if not _is_assignable_from_or_both_none(update.type_signature.parameter,
expected_update_parameter_type):
raise TypeError(
'The `update` computation expects an argument of type {}, '
'which does not match the expected {} as implied by the type '
'signatures of `initialize`, `report`, and `work`.'.format(
update.type_signature.parameter, expected_update_parameter_type))
_check_returns_two_tuple('update', update)
updated_state_type = update.type_signature.result[0]
if not prepare_arg_type.is_assignable_from(updated_state_type):
raise TypeError(
'The `update` computation returns a result tuple whose first element '
f'(the updated state type of the server) is type:\n'
f'{updated_state_type}\n'
f'which is not assignable to the state parameter type of `prepare`:\n'
f'{prepare_arg_type}')
self._initialize = initialize
self._prepare = prepare
self._work = work
self._zero = zero
self._accumulate = accumulate
self._merge = merge
self._report = report
self._bitwidth = bitwidth
self._update = update
if server_state_label is not None:
py_typecheck.check_type(server_state_label, str)
self._server_state_label = server_state_label
if client_data_label is not None:
py_typecheck.check_type(client_data_label, str)
self._client_data_label = client_data_label
@property
def initialize(self):
return self._initialize
@property
def prepare(self):
return self._prepare
@property
def work(self):
return self._work
@property
def zero(self):
return self._zero
@property
def accumulate(self):
return self._accumulate
@property
def merge(self):
return self._merge
@property
def report(self):
return self._report
@property
def bitwidth(self):
return self._bitwidth
@property
def update(self):
return self._update
@property
def server_state_label(self):
return self._server_state_label
@property
def client_data_label(self):
return self._client_data_label
@property
def securely_aggregates_tensors(self) -> bool:
"""Whether the `MapReduceForm` uses secure aggregation."""
# Tensors aggregated over `federated_secure_sum_bitwidth` are output in the
# second tuple element from `work()`.
work_result_type = self.work.type_signature.result
assert len(work_result_type) == 2
return not work_result_type[1].is_equivalent_to(
computation_types.StructType([]))
def summary(self, print_fn=print):
"""Prints a string summary of the `MapReduceForm`.
Args:
print_fn: Print function to use. It will be called on each line of the
summary in order to capture the string summary.
"""
for label, comp in (
('initialize', self.initialize),
('prepare', self.prepare),
('work', self.work),
('zero', self.zero),
('accumulate', self.accumulate),
('merge', self.merge),
('report', self.report),
('bitwidth', self.bitwidth),
('update', self.update),
):
# Add sufficient padding to align first column; len('initialize') == 10
print_fn('{:<10}: {}'.format(
label, comp.type_signature.compact_representation()))
| apache-2.0 | 4,678,343,868,907,108,000 | 39.212195 | 80 | 0.691029 | false |
azam-a/gocd2gmail2slack | gocd2gmail2slack/integrations.py | 1 | 1445 |
import gmail as Gm
import messages as Msg
import slack
from cfg.config import (
WEBHOOK_URL,
GOCD_DASHBOARD_URL,
)
def main():
try:
service, labels, messages_details = initialize()
process(service, labels, messages_details)
except:
pass
def initialize():
service = Gm.get_service()
labels = Gm.get_labels(service)
initial_messages = Gm.get_messages(service, include_labels=['UNREAD'])
messages_details = Gm.get_messages_details(service, initial_messages)
return (service, labels, messages_details)
def process(service, labels, messages_details):
for item in messages_details:
subject = Msg.get_subject(item)
if Msg.is_gocd_pattern(subject):
gocd_details = Msg.get_gocd_details(subject)
if slack.is_matching_send_rule(gocd_details):
body = Msg.get_body(item)
changeset = Msg.get_changeset_info(body)
text = (slack
.message_builder(gocd_details,
changeset,
GOCD_DASHBOARD_URL))
slack.send_to_slack(text, WEBHOOK_URL)
Gm.add_label(service, Msg.get_id(item),
'SENT_TO_SLACK', labels)
Gm.remove_label(service, Msg.get_id(item),
'UNREAD', labels)
if __name__ == "__main__":
main()
| mit | 4,342,497,860,027,901,400 | 26.264151 | 74 | 0.559862 | false |
MahjongRepository/tenhou-python-bot | project/game/ai/helpers/defence.py | 1 | 16886 | from typing import Optional
class TileDanger:
IMPOSSIBLE_WAIT = {
"value": 0,
"description": "Impossible wait",
}
SAFE_AGAINST_THREATENING_HAND = {
"value": 0,
"description": "Tile can't be used by analyzed threat",
}
# honor tiles
HONOR_THIRD = {
"value": 40,
"description": "Third honor tile (early game)",
}
NON_YAKUHAI_HONOR_SECOND_EARLY = {
"value": 60,
"description": "Second non-yakuhai honor (early game)",
}
NON_YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 120,
"description": "Shonpai non-yakuhai honor (early game)",
}
YAKUHAI_HONOR_SECOND_EARLY = {
"value": 80,
"description": "Second yakuhai honor (early game)",
}
YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 160,
"description": "Shonpai yakuhai honor (early game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_EARLY = {
"value": 120,
"description": "Second double-yakuhai honor (early game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 240,
"description": "Shonpai double-yakuhai honor (early game)",
}
NON_YAKUHAI_HONOR_SECOND_MID = {
"value": 80,
"description": "Second non-yakuhai honor (mid game)",
}
NON_YAKUHAI_HONOR_SHONPAI_MID = {
"value": 160,
"description": "Shonpai non-yakuhai honor (mid game)",
}
YAKUHAI_HONOR_SECOND_MID = {
"value": 120,
"description": "Second yakuhai honor (mid game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_MID = {
"value": 200,
"description": "Second double-yakuhai honor (mid game)",
}
YAKUHAI_HONOR_SHONPAI_MID = {
"value": 240,
"description": "Shonpai yakuhai honor (mid game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_MID = {
"value": 480,
"description": "Shonpai double-yakuhai honor (mid game)",
}
NON_YAKUHAI_HONOR_SECOND_LATE = {
"value": 160,
"description": "Second non-yakuhai honor (late game)",
}
NON_YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 240,
"description": "Shonpai non-yakuhai honor (late game)",
}
YAKUHAI_HONOR_SECOND_LATE = {
"value": 200,
"description": "Second yakuhai honor (late game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_LATE = {
"value": 300,
"description": "Second double-yakuhai honor (late game)",
}
YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 400,
"description": "Shonpai yakuhai honor (late game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 600,
"description": "Shonpai double-yakuhai honor (late game)",
}
# kabe tiles
NON_SHONPAI_KABE_STRONG = {
"value": 40,
"description": "Non-shonpai strong kabe tile",
}
SHONPAI_KABE_STRONG = {
"value": 200,
"description": "Shonpai strong kabe tile",
}
NON_SHONPAI_KABE_WEAK = {
"value": 80,
"description": "Non-shonpai weak kabe tile",
}
# weak shonpai kabe is actually less suspicious then a strong one
SHONPAI_KABE_WEAK = {
"value": 120,
"description": "Shonpai weak kabe tile",
}
NON_SHONPAI_KABE_STRONG_OPEN_HAND = {
"value": 60,
"description": "Non-shonpai strong kabe tile (against open hand)",
}
SHONPAI_KABE_STRONG_OPEN_HAND = {
"value": 300,
"description": "Shonpai strong kabe tile (against open hand)",
}
NON_SHONPAI_KABE_WEAK_OPEN_HAND = {
"value": 120,
"description": "Non-shonpai weak kabe tile (against open hand)",
}
SHONPAI_KABE_WEAK_OPEN_HAND = {
"value": 200,
"description": "Shonpai weak kabe tile (against open hand)",
}
# suji tiles
SUJI_19_NOT_SHONPAI = {
"value": 40,
"description": "Non-shonpai 1 or 9 with suji",
}
SUJI_19_SHONPAI = {
"value": 80,
"description": "Shonpai 1 or 9 with suji",
}
SUJI = {
"value": 120,
"description": "Default suji",
}
SUJI_28_ON_RIICHI = {
"value": 300,
"description": "Suji on 2 or 8 on riichi declaration",
}
SUJI_37_ON_RIICHI = {
"value": 400,
"description": "Suji on 3 or 7 on riichi declaration",
}
SUJI_19_NOT_SHONPAI_OPEN_HAND = {
"value": 100,
"description": "Non-shonpai 1 or 9 with suji (against open hand)",
}
SUJI_19_SHONPAI_OPEN_HAND = {
"value": 200,
"description": "Shonpai 1 or 9 with suji (against open hand)",
}
SUJI_OPEN_HAND = {
"value": 160,
"description": "Default suji (against open hand)",
}
# possible ryanmen waits
RYANMEN_BASE_SINGLE = {
"value": 300,
"description": "Base danger for possible wait in a single ryanmen",
}
RYANMEN_BASE_DOUBLE = {
"value": 500,
"description": "Base danger for possible wait in two ryanmens",
}
# bonus dangers for possible ryanmen waits
BONUS_MATAGI_SUJI = {
"value": 80,
"description": "Additional danger for matagi-suji pattern",
}
BONUS_AIDAYONKEN = {
"value": 80,
"description": "Additional danger for aidayonken pattern",
}
BONUS_EARLY_5 = {
"value": 80,
"description": "Additional danger for 1 and 9 in case of early 5 discarded in that suit",
}
BONUS_EARLY_28 = {
"value": -80,
"description": "Negative danger for 19 after early 28",
}
BONUS_EARLY_37 = {
"value": -60,
"description": "Negative danger for 1289 after early 37",
}
# doras
DORA_BONUS = {
"value": 200,
"description": "Additional danger for tile being a dora",
}
DORA_CONNECTOR_BONUS = {
"value": 80,
"description": "Additional danger for tile being dora connector",
}
# early discards - these are considered only if ryanmen is possible
NEGATIVE_BONUS_19_EARLY_2378 = {
"value": -80,
"description": "Subtracted danger for 1 or 9 because of early 2, 3, 7 or 8 discard",
}
NEGATIVE_BONUS_28_EARLY_37 = {
"value": -40,
"description": "Subtracted danger for 2 or 8 because of early 3 or 7 discard",
}
# bonus danger for different yaku
# they may add up
HONITSU_THIRD_HONOR_BONUS_DANGER = {
"value": 80,
"description": "Additional danger for third honor against honitsu hands",
}
HONITSU_SECOND_HONOR_BONUS_DANGER = {
"value": 160,
"description": "Additional danger for second honor against honitsu hands",
}
HONITSU_SHONPAI_HONOR_BONUS_DANGER = {
"value": 280,
"description": "Additional danger for shonpai honor against honitsu hands",
}
TOITOI_SECOND_YAKUHAI_HONOR_BONUS_DANGER = {
"value": 120,
"description": "Additional danger for second honor against honitsu hands",
}
TOITOI_SHONPAI_NON_YAKUHAI_BONUS_DANGER = {
"value": 160,
"description": "Additional danger for non-yakuhai shonpai tiles agains toitoi hands",
}
TOITOI_SHONPAI_YAKUHAI_BONUS_DANGER = {
"value": 240,
"description": "Additional danger for shonpai yakuhai against toitoi hands",
}
TOITOI_SHONPAI_DORA_BONUS_DANGER = {
"value": 240,
"description": "Additional danger for shonpai dora tiles agains toitoi hands",
}
ATODZUKE_YAKUHAI_HONOR_BONUS_DANGER = {
"value": 400,
"description": "Bonus danger yakuhai tiles for atodzuke yakuhai hands",
}
###############
# The following constants don't follow the logic of other constants, so they are not dictionaries
##############
# count of possible forms
FORM_BONUS_DESCRIPTION = "Forms bonus"
FORM_BONUS_KANCHAN = 3
FORM_BONUS_PENCHAN = 3
FORM_BONUS_SYANPON = 12
FORM_BONUS_TANKI = 12
FORM_BONUS_RYANMEN = 8
# suji counting, (SUJI_COUNT_BOUNDARY - n) * SUJI_COUNT_MODIFIER
# We count how many ryanmen waits are still possible. Maximum n is 18, minimum is 1.
# If there are many possible ryanmens left, we consider situation less dangerous
# than if there are few possible ryanmens left.
# If n is 0, we don't consider this as a factor at all, because that means that wait is not ryanmen.
# Actually that should mean that non-ryanmen waits are now much more dangerous that before.
SUJI_COUNT_BOUNDARY = 10
SUJI_COUNT_MODIFIER = 20
# borders indicating late round
ALMOST_LATE_ROUND = 10
LATE_ROUND = 12
VERY_LATE_ROUND = 15
@staticmethod
def make_unverified_suji_coeff(value):
return {"value": value, "description": "Additional bonus for number of unverified suji"}
@staticmethod
def is_safe(danger):
return danger == TileDanger.IMPOSSIBLE_WAIT or danger == TileDanger.SAFE_AGAINST_THREATENING_HAND
class DangerBorder:
IGNORE = 1000000
EXTREME = 1200
VERY_HIGH = 1000
HIGH = 800
UPPER_MEDIUM = 700
MEDIUM = 600
LOWER_MEDIUM = 500
UPPER_LOW = 400
LOW = 300
VERY_LOW = 200
EXTREMELY_LOW = 120
LOWEST = 80
BETAORI = 0
one_step_down_dict = dict(
{
IGNORE: EXTREME,
EXTREME: VERY_HIGH,
VERY_HIGH: HIGH,
HIGH: UPPER_MEDIUM,
UPPER_MEDIUM: MEDIUM,
MEDIUM: LOWER_MEDIUM,
LOWER_MEDIUM: UPPER_LOW,
UPPER_LOW: LOW,
LOW: VERY_LOW,
VERY_LOW: EXTREMELY_LOW,
EXTREMELY_LOW: LOWEST,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
one_step_up_dict = dict(
{
IGNORE: IGNORE,
EXTREME: IGNORE,
VERY_HIGH: EXTREME,
HIGH: VERY_HIGH,
UPPER_MEDIUM: HIGH,
MEDIUM: UPPER_MEDIUM,
LOWER_MEDIUM: MEDIUM,
UPPER_LOW: LOWER_MEDIUM,
LOW: UPPER_LOW,
VERY_LOW: LOW,
EXTREMELY_LOW: VERY_LOW,
LOWEST: EXTREMELY_LOW,
# betaori means betaori, don't tune it up
BETAORI: BETAORI,
}
)
late_danger_dict = dict(
{
IGNORE: IGNORE,
EXTREME: VERY_HIGH,
VERY_HIGH: HIGH,
HIGH: UPPER_MEDIUM,
UPPER_MEDIUM: MEDIUM,
MEDIUM: LOWER_MEDIUM,
LOWER_MEDIUM: UPPER_LOW,
UPPER_LOW: LOW,
LOW: VERY_LOW,
VERY_LOW: EXTREMELY_LOW,
EXTREMELY_LOW: LOWEST,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
very_late_danger_dict = dict(
{
IGNORE: VERY_HIGH,
EXTREME: HIGH,
VERY_HIGH: UPPER_MEDIUM,
HIGH: MEDIUM,
UPPER_MEDIUM: LOWER_MEDIUM,
MEDIUM: UPPER_LOW,
LOWER_MEDIUM: LOW,
UPPER_LOW: VERY_LOW,
LOW: EXTREMELY_LOW,
VERY_LOW: LOWEST,
EXTREMELY_LOW: BETAORI,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
@staticmethod
def tune_down(danger_border, steps):
assert steps >= 0
for _ in range(steps):
danger_border = DangerBorder.one_step_down_dict[danger_border]
return danger_border
@staticmethod
def tune_up(danger_border, steps):
assert steps >= 0
for _ in range(steps):
danger_border = DangerBorder.one_step_up_dict[danger_border]
return danger_border
@staticmethod
def tune(danger_border, value):
if value > 0:
return DangerBorder.tune_up(danger_border, value)
elif value < 0:
return DangerBorder.tune_down(danger_border, abs(value))
return danger_border
@staticmethod
def tune_for_round(player, danger_border, shanten):
danger_border_dict = None
if shanten == 0:
if len(player.discards) > TileDanger.LATE_ROUND:
danger_border_dict = DangerBorder.late_danger_dict
if len(player.discards) > TileDanger.VERY_LATE_ROUND:
danger_border_dict = DangerBorder.very_late_danger_dict
elif shanten == 1:
if len(player.discards) > TileDanger.LATE_ROUND:
danger_border_dict = DangerBorder.very_late_danger_dict
elif shanten == 2:
if len(player.discards) > TileDanger.ALMOST_LATE_ROUND:
danger_border_dict = DangerBorder.late_danger_dict
if len(player.discards) > TileDanger.LATE_ROUND:
return DangerBorder.BETAORI
if not danger_border_dict:
return danger_border
return danger_border_dict[danger_border]
class EnemyDanger:
THREAT_RIICHI = {
"id": "threatening_riichi",
"description": "Enemy called riichi",
}
THREAT_OPEN_HAND_AND_MULTIPLE_DORA = {
"id": "threatening_open_hand_dora",
"description": "Enemy opened hand with 3+ dora and now is 6+ step",
}
THREAT_EXPENSIVE_OPEN_HAND = {
"id": "threatening_3_han_meld",
"description": "Enemy opened hand has 3+ han",
}
THREAT_OPEN_HAND_UNKNOWN_COST = {
"id": "threatening_melds",
"description": "Enemy opened hand and we are not sure if it's expensive",
}
class TileDangerHandler:
"""
Place to keep information of tile danger level for each player
"""
values: dict
weighted_cost: Optional[int]
danger_border: dict
can_be_used_for_ryanmen: bool
# if we estimate that one's threat cost is less than COST_PERCENT_THRESHOLD of other's
# we ignore it when choosing tile for fold
COST_PERCENT_THRESHOLD = 40
def __init__(self):
"""
1, 2, 3 is our opponents seats
"""
self.values = {1: [], 2: [], 3: []}
self.weighted_cost = 0
self.danger_border = {1: {}, 2: {}, 3: {}}
self.can_be_used_for_ryanmen: bool = False
def set_danger(self, player_seat, danger):
self.values[player_seat].append(danger)
def set_danger_border(self, player_seat, danger_border: int, our_hand_cost: int, enemy_hand_cost: int):
self.danger_border[player_seat] = {
"border": danger_border,
"our_hand_cost": our_hand_cost,
"enemy_hand_cost": enemy_hand_cost,
}
def get_danger_reasons(self, player_seat):
return self.values[player_seat]
def get_danger_border(self, player_seat):
return self.danger_border[player_seat]
def get_total_danger_for_player(self, player_seat):
total = sum([x["value"] for x in self.values[player_seat]])
assert total >= 0
return total
def get_max_danger(self):
return max(self._danger_array)
def get_sum_danger(self):
return sum(self._danger_array)
def get_weighted_danger(self):
costs = [
self.get_danger_border(1).get("enemy_hand_cost") or 0,
self.get_danger_border(2).get("enemy_hand_cost") or 0,
self.get_danger_border(3).get("enemy_hand_cost") or 0,
]
max_cost = max(costs)
if max_cost == 0:
return 0
dangers = self._danger_array
weighted = 0
num_dangers = 0
for cost, danger in zip(costs, dangers):
if cost * 100 / max_cost >= self.COST_PERCENT_THRESHOLD:
# divide by 8000 so it's more human-readable
weighted += cost * danger / 8000
num_dangers += 1
assert num_dangers > 0
# this way we balance out tiles that are kinda safe against all the threats
# and tiles that are genbutsu against one threat and are dangerours against the other
if num_dangers == 1:
danger_multiplier = 1
else:
danger_multiplier = 0.8
weighted *= danger_multiplier
return weighted
def get_min_danger_border(self):
return min(self._borders_array)
def clear_danger(self, player_seat):
self.values[player_seat] = []
self.danger_border[player_seat] = {}
def is_danger_acceptable(self):
for border, danger in zip(self._borders_array, self._danger_array):
if border < danger:
return False
return True
@property
def _danger_array(self):
return [
self.get_total_danger_for_player(1),
self.get_total_danger_for_player(2),
self.get_total_danger_for_player(3),
]
@property
def _borders_array(self):
return [
self.get_danger_border(1).get("border") or 0,
self.get_danger_border(2).get("border") or 0,
self.get_danger_border(3).get("border") or 0,
]
| mit | 5,193,180,897,741,196,000 | 29.261649 | 107 | 0.574973 | false |
iamsteadman/bambu-urlshortener | bambu_urlshortener/migrations/0001_initial.py | 1 | 1644 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ShortURL'
db.create_table('urlshortener_shorturl', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=255)),
('slug', self.gf('django.db.models.fields.CharField')(unique=True, max_length=7)),
('visits', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('last_visited', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('bambu_urlshortener', ['ShortURL'])
def backwards(self, orm):
# Deleting model 'ShortURL'
db.delete_table('urlshortener_shorturl')
models = {
'bambu_urlshortener.shorturl': {
'Meta': {'object_name': 'ShortURL', 'db_table': "'urlshortener_shorturl'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'visits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['bambu_urlshortener'] | apache-2.0 | -556,862,470,174,253,000 | 42.289474 | 109 | 0.600365 | false |
YannThorimbert/ThorPy-1.4.3 | thorpy/elements/slidersetter.py | 1 | 4792 | from thorpy.elements.ghost import Ghost
from thorpy.elements.slider import _SliderXSetter
from thorpy.elements.element import Element
from thorpy.miscgui import functions, style, painterstyle
from thorpy.miscgui import storage
class SliderXSetter(Ghost):
"""Set of text, slider and value"""
def __init__(self,
length,
limvals=None,
text="",
elements=None,
normal_params=None,
namestyle=None,
valuestyle=None,
type_=float,
initial_value=None):
"""Slider for choosing a value.
<length>: single int value specifying the length of slider in pixels.
<limvals>: 2-tuple specifying the min and max values.
<text>: text preceeding the element.
<type_>: the type of the number to be chosen (e.g int or float)
<initial_value>: the initial value. If None, set to minimum value.
"""
namestyle = style.STYLE_SLIDER_NAME if namestyle is None else namestyle
valuestyle=style.STYLE_SLIDER_VALUE if valuestyle is None else valuestyle
Ghost.__init__(self, elements, normal_params)
self._slider_el=_SliderXSetter(length, limvals, "", initial_value=initial_value)
self._slider_el.finish()
self.add_elements([self._slider_el])
self._value_type = type_
self._round_decimals = 2
self._name_element = self._get_name_element(text, namestyle) #herite de setter
self._value_element = self._get_value_element(valuestyle)
self.add_elements([self._name_element, self._value_element])
self._name_element.rank = 1
self._slider_el.rank = 2
self._value_element.rank = 3
self.sort_children_by_rank()
self._storer_rect = None
self._refresh_pos()
def finish(self):
Ghost.finish(self)
self._refresh_pos()
self._slider_el._drag_element.set_setter()
value = str(self._slider_el.get_value())
self._value_element.set_text(value)
def set_value(self, value):
self._slider_el.get_dragger().place_at(value)
self.refresh_value()
def show_value(self, show_value):
self._value_element.visible = show_value
def _get_name_element(self, name, namestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_NAME_PAINTER,
size=style.SIZE)
el = Element(name)
el.set_painter(painter)
if namestyle:
el.set_style(namestyle)
el.finish()
return el
def _get_value_element(self, valuestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_VALUE_PAINTER,
size=style.CHECK_SIZE)
el = Element(str(self.get_value()))
el.set_painter(painter)
if valuestyle:
el.set_style(valuestyle)
el.finish()
return el
def _refresh_pos(self):
storage.store(self, mode="h")
self.fit_children()
def refresh_value(self):
self._value_element.unblit()
self._value_element.update()
value = str(self.get_value())
self._value_element.set_text(value)
self._value_element.blit()
self._value_element.update()
def get_value(self):
value = self._slider_el.get_value()
return self._value_type(value)
def get_storer_rect(self): #!!! normalement rien besoin
tmp = self.get_value()
self._value_element.set_text(str(self._slider_el._limvals[1]))
rect = self.get_family_rect()
self._value_element.set_text(str(tmp))
return rect
## def set_font_color(self, color, state=None, center_title=True):
## """set font color for a given state"""
## self._name_element.set_font_color(color, state, center_title)
##
## def set_font_size(self, size, state=None, center_title=True):
## """set font size for a given state"""
## SliderX.set_font_size(self, size, state, center_title)
## self._name_element.set_font_size(size, state, center_title)
##
## def set_font_effects(self, biu, state=None, center=True, preserve=False):
## """biu = tuple : (bold, italic, underline)"""
## SliderX.set_font_effects(self, bio, state, center, preserve)
## self._name_element.set_font_effects(biu, state, center, preserve)
## def pix_to_val(self, pix, x0): #!!!!!
## value = SliderX.pix_to_val(self, pix, x0)
## if self._value_type is float:
## return round(value, self._round_decimals)
## elif self._value_type is int:
## return int(round(value))
def get_help_rect(self):
return self._name_element.get_help_rect()
| mit | -7,415,068,993,223,257,000 | 36.732283 | 88 | 0.603923 | false |
stnava/iTensorFlow | src/r2python/readArray.py | 1 | 1759 | import numpy as np
n = 3
filename = '/tmp/kbykbykfloat.raw'
with open(filename, 'rb') as f:
data = np.fromfile(f, dtype=np.float32)
array = np.reshape(data, [n, n, n])
# this array is the "by slice" transpose of the R result ...
# now try to read a RcppCNPy object
m = np.load("/tmp/randmat.npy")
m = np.reshape( np.load("/tmp/randmat3.npy"), [n, n, n])
# now a nifti multi-channel with nibabel
# see http://nipy.org/nibabel/gettingstarted.html
import os
import numpy as np
import nibabel as nib
img = nib.load( '/tmp/kbykbykfloat.nii.gz' )
data = img.get_data()
mimg = nib.load( '/tmp/kbykbykfloatmc.nii.gz' )
mata = mimg.get_data()
# now h5
from __future__ import print_function
import numpy as np
import h5py
fn = '/tmp/myData.h5'
with h5py.File( fn, 'r' ) as hf:
print('List of arrays in this file: \n', hf.keys())
data = hf.get('thdarr')
np_data = np.array(data)
print('Shape of the array dataset_1: \n', np_data.shape)
# now try the multi-channel image read with simpleitk
from skimage import io
import SimpleITK as sitk
fn = '/tmp/kbykbykfloatmc.nii.gz'
myimg = io.imread( fn, plugin='simpleitk').astype(float)
import numpy as np
n = 256
with open( fn, 'rb') as f:
data = np.fromfile( f, dtype=np.double )
array = np.reshape( data, [n, n] )
from PIL import Image
import numpy as np
w, h = 512, 512
data = np.zeros((h, w, 3), dtype=np.uint8)
data[256, 256] = [255, 0, 0]
img = Image.fromarray(data, 'RGB')
img.show()
w, h = 512, 512
data = np.zeros((h, w), dtype='d')
data[256, 256] = 1.5
img = Image.fromarray(data, 'F')
img.show()
from PIL import Image
import numpy as np
from scipy.misc import toimage
o = 182
m = 218
n = 182
data = np.load( ofn )
array = np.reshape( data, [ o, m, n ] )
toimage(array).show()
| apache-2.0 | 7,712,403,683,331,870,000 | 22.77027 | 60 | 0.664582 | false |
AunShiLord/sympy | sympy/physics/tests/test_pring.py | 4 | 1072 | from sympy.physics.pring import wavefunction, energy
from sympy import pi, integrate, sqrt, exp, simplify, I
from sympy.abc import m, x, r
from sympy.physics.quantum.constants import hbar
def test_wavefunction():
Psi = {
0: (1/sqrt(2 * pi)),
1: (1/sqrt(2 * pi)) * exp(I * x),
2: (1/sqrt(2 * pi)) * exp(2 * I * x),
3: (1/sqrt(2 * pi)) * exp(3 * I * x)
}
for n in Psi:
assert simplify(wavefunction(n, x) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(
wavefunction(i, x) * wavefunction(-i, x), (x, 0, 2 * pi)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i+1, n+1):
assert integrate(
wavefunction(i, x) * wavefunction(j, x), (x, 0, 2 * pi)) == 0
def test_energy(n=1):
# Maximum "n" which is tested:
for i in range(n+1):
assert simplify(
energy(i, m, r) - ((i**2 * hbar**2) / (2 * m * r**2))) == 0
| bsd-3-clause | -7,457,749,085,143,069,000 | 27.972973 | 77 | 0.528918 | false |
olebole/astrometry.net | net/sdss_image.py | 1 | 4000 | from __future__ import print_function
import math
import os
import urllib
if __name__ == '__main__':
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'astrometry.net.settings'
from astrometry.net.log import *
from astrometry.net.tmpfile import *
from astrometry.net import settings
def plot_sdss_image(wcsfn, plotfn, image_scale=1.0, debug_ps=None):
from astrometry.util import util as anutil
from astrometry.blind import plotstuff as ps
# Parse the wcs.fits file
wcs = anutil.Tan(wcsfn, 0)
# grab SDSS tiles with about the same resolution as this image.
pixscale = wcs.pixel_scale()
pixscale = pixscale / image_scale
logmsg('Original image scale is', wcs.pixel_scale(), 'arcsec/pix; scaled', image_scale, '->', pixscale)
# size of SDSS image tiles to request, in pixels
sdsssize = 512
scale = sdsssize * pixscale / 60.
# healpix-vs-north-up rotation
nside = anutil.healpix_nside_for_side_length_arcmin(scale / math.sqrt(2.))
nside = 2 ** int(math.ceil(math.log(nside)/math.log(2.)))
logmsg('Next power-of-2 nside:', nside)
ra,dec = wcs.radec_center()
logmsg('Image center is RA,Dec', ra, dec)
dirnm = os.path.join(settings.SDSS_TILE_DIR, 'nside%i'%nside)
if not os.path.exists(dirnm):
os.makedirs(dirnm)
#hp = anutil.radecdegtohealpix(ra, dec, nside)
#logmsg('Healpix of center:', hp)
radius = wcs.radius()
hps = anutil.healpix_rangesearch_radec(ra, dec, radius, nside)
logmsg('Healpixes in range:', len(hps), ': ', hps)
scale = math.sqrt(2.) * anutil.healpix_side_length_arcmin(nside) * 60. / float(sdsssize)
logmsg('Grabbing SDSS tile with scale', scale, 'arcsec/pix')
size = [int(image_scale*wcs.imagew),int(image_scale*wcs.imageh)]
plot = ps.Plotstuff(outformat='png', wcsfn=wcsfn, size=size)
plot.scale_wcs(image_scale)
img = plot.image
img.format = ps.PLOTSTUFF_FORMAT_JPG
img.resample = 1
for hp in hps:
fn = os.path.join(dirnm, '%i.jpg'%hp)
logmsg('Checking for filename', fn)
if not os.path.exists(fn):
ra,dec = anutil.healpix_to_radecdeg(hp, nside, 0.5, 0.5)
logmsg('Healpix center is RA,Dec', ra, dec)
url = ('http://skyservice.pha.jhu.edu/DR8/ImgCutout/getjpeg.aspx?' +
'ra=%f&dec=%f&scale=%f&opt=&width=%i&height=%i' %
(ra, dec, scale, sdsssize, sdsssize))
urllib.urlretrieve(url, fn)
logmsg('Wrote', fn)
swcsfn = os.path.join(dirnm, '%i.wcs'%hp)
logmsg('Checking for WCS', swcsfn)
if not os.path.exists(swcsfn):
# Create WCS header
cd = scale / 3600.
swcs = anutil.Tan(ra, dec, sdsssize/2 + 0.5, sdsssize/2 + 0.5,
-cd, 0, 0, -cd, sdsssize, sdsssize)
swcs.write_to(swcsfn)
logmsg('Wrote WCS to', swcsfn)
img.set_wcs_file(swcsfn, 0)
img.set_file(fn)
plot.plot('image')
if debug_ps is not None:
fn = debug_ps.getnext()
plot.write(fn)
print('Wrote', fn)
if debug_ps is not None:
out = plot.outline
plot.color = 'white'
plot.alpha = 0.25
for hp in hps:
swcsfn = os.path.join(dirnm, '%i.wcs'%hp)
ps.plot_outline_set_wcs_file(out, swcsfn, 0)
plot.plot('outline')
plot.write(fn)
print('Wrote', fn)
plot.write(plotfn)
if __name__ == '__main__':
import logging
from astrometry.util import util as anutil
logging.basicConfig(format='%(message)s',
level=logging.DEBUG)
wcsfn = 'wcs.fits'
outfn = 'sdss.png'
if True:
wcs = anutil.Tan(wcsfn)
scale = 640. / wcs.get_width()
print('Scale', scale)
from astrometry.util.plotutils import *
ps = PlotSequence('sdss')
plot_sdss_image(wcsfn, outfn, image_scale=scale, debug_ps=ps)
| bsd-3-clause | -633,198,197,810,560,600 | 32.333333 | 107 | 0.59475 | false |
galaxor/Nodewatcher | nodewatcher/web/nodes/management/commands/preparedb.py | 1 | 6030 | import subprocess
import time
import optparse
import os.path
import traceback
from django.conf import settings
from django.core import management
from django.core import serializers
from django.core.management import base as management_base
from django.core.management import color as management_color
from django.db import connection, transaction
# TODO: Change all prints to self.stdout.write for Django 1.3
class Command(management_base.BaseCommand):
"""
This class defines a command for manage.py which prepares
and initializes the database.
"""
args = "[dump_file]"
help = "Prepare and initialize the database. If optional dump_file is specified it is used to populate the database."
requires_model_validation = False
option_list = management_base.BaseCommand.option_list + (
optparse.make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
)
def handle(self, *args, **options):
"""
Prepares and initializes the database.
"""
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)
show_traceback = options.get('traceback', False)
def ensure_success(errcode):
if errcode != 0:
raise management_base.CommandError('Command failed to execute, aborting!')
if len(args) > 1:
raise management_base.CommandError('Too many arguments!')
elif len(args) > 0 and not os.path.exists(args[0]):
raise management_base.CommandError("Given dump file '%s' does not exist!" % args[0])
# Determine the database backend
db_backend = settings.DATABASES['default']['ENGINE']
if db_backend.find('postgresql') != -1:
db_backend = 'postgresql'
elif db_backend.find('sqlite') != -1:
db_backend = 'sqlite'
elif db_backend.find('mysql') != -1:
db_backend = 'mysql'
# Close the connection before continuing since the setup script will
# recreate databases
connection.close()
# TODO: manage.py script could be run somewhere else, with some other working directory
if os.path.isfile('scripts/%s_init.sh' % db_backend):
print "!!! NOTE: A setup script exists for your database. Be sure that it"
print "!!! does what you want! You may have to edit the script and YOU"
print "!!! MUST REVIEW IT! Otherwise the script may bork your installation."
if interactive:
print "Press CTRL + C to abort now."
try:
time.sleep(5)
except KeyboardInterrupt:
raise management_base.CommandError('Aborted by user.')
if verbosity >= 1:
print ">>> Executing database setup script 'scripts/%s_init.sh'..." % db_backend
ensure_success(subprocess.call(["scripts/%s_init.sh" % db_backend, settings.DATABASES['default']['NAME']]))
else:
print "!!! NOTE: This command assumes that you have created and configured"
print "!!! a proper database via settings.py! The database MUST be completely"
print "!!! empty (no tables or sequences should be present). If this is not"
print "!!! the case, this operation WILL FAIL!"
if db_backend == 'postgresql':
print "!!!"
print "!!! You are using a PostgreSQL database. Be sure that you have"
print "!!! installed the IP4R extension or schema sync WILL FAIL!"
print "!!! "
print "!!! More information: http://ip4r.projects.postgresql.org"
print "!!!"
if interactive:
print "Press CTRL + C to abort now."
try:
time.sleep(5)
except KeyboardInterrupt:
raise management_base.CommandError('Aborted by user.')
if len(args) > 0:
options['interactive'] = False # We will populate with our data so no need for asking about admin user
if verbosity >= 1:
print ">>> Performing initial database sync..."
management.call_command("syncdb", **options)
if len(args) < 1:
if verbosity >= 1:
print ">>> Initialization completed."
return
if verbosity >= 1:
print ">>> Performing data cleanup..."
try:
cursor = connection.cursor()
cursor.execute("DELETE FROM auth_group_permissions")
cursor.execute("DELETE FROM auth_group")
cursor.execute("DELETE FROM auth_permission")
cursor.execute("DELETE FROM auth_user")
cursor.execute("DELETE FROM django_content_type")
cursor.execute("DELETE FROM django_site")
cursor.execute("DELETE FROM policy_trafficcontrolclass")
transaction.commit_unless_managed()
except:
raise management_base.CommandError('Data cleanup operation failed, aborting!')
if db_backend == 'mysql':
connection.cursor().execute("SET FOREIGN_KEY_CHECKS = 0")
elif db_backend == 'sqlite':
connection.cursor().execute("PRAGMA foreign_keys = 0")
transaction.commit_unless_managed()
if verbosity >= 1:
print ">>> Importing data from '%s'..." % args[0]
transaction.enter_transaction_management()
transaction.managed(True)
models = set()
try:
count = 0
for holder in serializers.deserialize('json', open(args[0], 'r')):
models.add(holder.object.__class__)
holder.save()
count += 1
if verbosity >= 1:
print "Installed %d object(s)" % count
except:
transaction.rollback()
transaction.leave_transaction_management()
if show_traceback:
traceback.print_exc()
raise management_base.CommandError('Data import operation failed, aborting!')
# Reset sequences
for line in connection.ops.sequence_reset_sql(management_color.no_style(), models):
cursor.execute(line)
transaction.commit()
transaction.leave_transaction_management()
connection.close()
# Additional syncdb for fixture overrides
management.call_command("syncdb", **options)
if verbosity >= 1:
print ">>> Import completed."
| agpl-3.0 | -1,226,115,389,493,172,000 | 34.680473 | 119 | 0.656053 | false |
rmst/chi | examples/experimental/bdpg_chains2.py | 1 | 3857 | """ Bayesian Determinisitc Policy Gradient evaluated on th
didactic "chain" environment
"""
import tensorflow as tf
from gym import Wrapper
from tensorflow.python.layers.utils import smart_cond
from tensorflow.python.ops.variable_scope import get_local_variable
import chi
from chi import Experiment
from chi import experiment, model
from chi.rl import ReplayMemory
# chi.chi.tf_debug = True
from chi.rl.bdpg import BdpgAgent
from chi.rl.ddpg import DdpgAgent
from chi.rl.util import print_env
@experiment
def bdpg_chains2(self: Experiment, logdir=None, env=1, heads=3, n=50, bootstrap=False, sr=50000):
from tensorflow.contrib import layers
import gym
from gym import spaces
from gym import wrappers
import numpy as np
from tensorflow.contrib.framework import arg_scope
def gym_make(id) -> gym.Env:
return gym.make(id)
chi.set_loglevel('debug')
if env == 0:
import gym_mix
from chi.rl.wrappers import PenalizeAction
env = gym_mix.envs.ChainEnv(n)
env = PenalizeAction(env, .001, 1)
elif env == 1:
# env = gym.make('Pendulum-v0')
env = gym.make('MountainCarContinuous-v0')
if bootstrap:
class Noise(Wrapper):
def __init__(self, env):
super().__init__(env)
self.n = 3
self.observation_space = gym.spaces.Box(
np.concatenate((self.observation_space.low, np.full([self.n], -1))),
np.concatenate((self.observation_space.high, np.full([self.n], 1))))
def _reset(self):
s = super()._reset()
self.noise = np.random.uniform(-1, 1, [self.n])
s = np.concatenate([s, self.noise])
return s
def _step(self, action):
s, r, d, i = super()._step(action)
s = np.concatenate([s, self.noise])
return s, r, d, i
env = Noise(env)
print_env(env)
def pp(x):
# v = get_local_variable('noise', [x.shape[0], 100], initializer=tf.random_normal_initializer)
# y = tf.concat(x, v)
return x
def ac(x):
with tf.name_scope('actor_head'):
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
# a = layers.fully_connected(x, env.action_space.shape[0], None, weights_initializer=tf.random_normal_initializer(0, 1e-4))
a = layers.fully_connected(x, env.action_space.shape[0], None)
return a
def cr(x, a):
with tf.name_scope('critic_head'):
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
x = tf.concat([x, a], axis=1)
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
# q = layers.fully_connected(x, 1, None, weights_initializer=tf.random_normal_initializer(0, 1e-4))
q = layers.fully_connected(x, 1, None)
return tf.squeeze(q, 1)
if bootstrap:
agent = DdpgAgent(env, ac, cr, replay_start=sr, noise=lambda a: a)
else:
agent = DdpgAgent(env, ac, cr, replay_start=sr)
threshold = getattr(getattr(env, 'spec', None), 'reward_threshold', None)
for ep in range(100000):
R, info = agent.play_episode()
if ep % 20 == 0:
head = info.get('head')
print(f'Return of episode {ep} after timestep {agent.t}: {R} (head = {head}, threshold = {threshold})')
if ep % 100 == 0 and bootstrap:
pass
#
# @chi.function(logging_policy=lambda _: True)
# def plot():
# # obsp = env.observation_space
# # h = obsp.high
# # l = obsp.low
# # x, y = tf.meshgrid(tf.linspace(l[0], h[0], 100), tf.linspace(l[1], h[1], 100))
# # x = tf.reshape(x, [-1])
# # y = tf.reshape(y, [-1])
# # inp = tf.stack(x, y, axis=1)
#
# x = tf.linspace(0, 30, 100)
# x = tf.py_func(env.batch_features, x, tf.float32, stateful=False)
# s = pp(x)
# a0 = actor(s)
# tf.image
| mit | -7,825,289,444,006,958,000 | 30.104839 | 129 | 0.635468 | false |
phrack/ShootOFF-legacy | training_protocols/shoot_dont_shoot/__init__.py | 1 | 6290 | # Copyright (c) 2015 phrack. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import threading
from training_protocols.ITrainingProtocol import ITrainingProtocol
class ShootDontShoot(ITrainingProtocol):
def __init__(self, main_window, protocol_operations, targets):
self._operations = protocol_operations
self._operations.clear_shots()
self._continue_protocol = True
self._arena_dimensions = self._operations.get_projector_arena_dimensions()
self._missed_targets = 0
self._bad_hits = 0
self._current_shoot_targets = []
self._current_dont_shoot_targets = []
self._wait_event = threading.Event()
self._operations.add_shot_list_columns(("Target",), [60])
if not self._operations.projector_arena_visible():
self._operations.say("This protocol only works on the projector arena.")
else:
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
self._operations.show_text_on_feed("missed targets: 0\nbad hits: 0")
self._new_round_thread = Thread(target=self._new_round,
name="new_round_thread")
self._new_round_thread.start()
def _add_targets(self, target_list, name):
# Put up between zero and three targets
target_count = random.randrange(0, 4)
for i in range(0, target_count):
x = random.randrange(0, self._arena_dimensions[0] - 100)
y = random.randrange(0, self._arena_dimensions[1] - 100)
target_list.append(self._operations.add_projector_target(name, x, y))
def shot_listener(self, shot, shot_list_item, is_hit):
return
def hit_listener(self, region, tags, shot, shot_list_item):
if "subtarget" in tags:
target_name = self._operations.get_target_name(region)
if tags["subtarget"] == "shoot":
self._remove_target(target_name)
self._current_shoot_targets.remove(target_name)
self._operations.append_shot_item_values(shot_list_item,
(tags["subtarget"],))
elif tags["subtarget"] == "dont_shoot":
self._remove_target(target_name)
self._current_dont_shoot_targets.remove(target_name)
self._bad_hits += 1
self._operations.append_shot_item_values(shot_list_item,
(tags["subtarget"],))
self._operations.say("Bad shoot!")
def _new_round(self):
# Wait ten seconds before starting another round
self._wait_event.wait(10)
if self._continue_protocol:
missed = len(self._current_shoot_targets)
self._missed_targets += missed
if missed > 0:
self._operations.say("You missed " + str(missed) + " shoot targets.")
self._operations.clear_shots()
message = "missed targets: %d\nbad hits: %d" % (self._missed_targets, self._bad_hits)
self._operations.show_text_on_feed(message)
self._remove_old_targets(self._current_shoot_targets)
self._current_shoot_targets = []
self._remove_old_targets(self._current_dont_shoot_targets)
self._current_dont_shoot_targets = []
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
if self._continue_protocol:
self._new_round()
def _remove_target(self, target_name):
self._operations.delete_projector_target(target_name)
def _remove_old_targets(self, target_list):
for target in target_list:
self._remove_target(target)
def reset(self, targets):
self._missed_targets = 0
self._bad_hits = 0
if not self._operations.projector_arena_visible():
self._operations.say("This protocol only works on the projector arena.")
else:
self._remove_old_targets(self._current_shoot_targets)
self._current_shoot_targets = []
self._remove_old_targets(self._current_dont_shoot_targets)
self._current_dont_shoot_targets = []
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
message = "missed targets: %d\nbad hits: %d" % (self._missed_targets, self._bad_hits)
self._operations.show_text_on_feed(message)
self._new_round_thread = Thread(target=self._new_round,
name="new_round_thread")
self._new_round_thread.start()
def destroy(self):
self._continue_protocol = False
self._wait_event.set()
self._remove_old_targets(self._current_shoot_targets)
self._remove_old_targets(self._current_dont_shoot_targets)
def get_info():
protocol_info = {}
protocol_info["name"] = "Shoot Don't Shoot"
protocol_info["version"] = "1.0"
protocol_info["creator"] = "phrack"
desc = "This protocol randomly puts up targets and gives you 10 seconds"
desc += "to decide which ones to shoot and which ones to ignore. If "
desc += "you do not shoot a target you are supposed to shoot, it gets "
desc += "added to your missed targets counter and the protocol says "
desc += "how many targets you missed. If you hit a target you were not "
desc += "supposed to hit, the protocol says 'bad shoot!'. Shoot the targets "
desc += "with the red ring, don't shoot the other targets."
protocol_info["description"] = desc
return protocol_info
def load(main_window, protocol_operations, targets):
return ShootDontShoot(main_window, protocol_operations, targets)
| bsd-3-clause | 5,913,113,314,758,436,000 | 42.082192 | 122 | 0.619237 | false |
yiannist/ganeti | lib/server/noded.py | 1 | 42846 | #
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti node daemon"""
# pylint: disable=C0103
# C0103: Functions in this module need to have a given name structure,
# and the name of the daemon doesn't match
import os
import sys
import logging
import signal
import codecs
from optparse import OptionParser
from ganeti import backend
from ganeti import constants
from ganeti import objects
from ganeti import errors
from ganeti import jstore
from ganeti import daemon
from ganeti import http
from ganeti import utils
from ganeti.storage import container
from ganeti import serializer
from ganeti import netutils
from ganeti import pathutils
from ganeti import ssconf
import ganeti.http.server # pylint: disable=W0611
queue_lock = None
def _extendReasonTrail(trail, source, reason=""):
"""Extend the reason trail with noded information
The trail is extended by appending the name of the noded functionality
"""
assert trail is not None
trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
trail.append((trail_source, reason, utils.EpochNano()))
def _PrepareQueueLock():
"""Try to prepare the queue lock.
@return: None for success, otherwise an exception object
"""
global queue_lock # pylint: disable=W0603
if queue_lock is not None:
return None
# Prepare job queue
try:
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
return None
except EnvironmentError, err:
return err
def _RequireJobQueueLock(fn):
"""Decorator for job queue manipulating functions.
"""
QUEUE_LOCK_TIMEOUT = 10
def wrapper(*args, **kwargs):
# Locking in exclusive, blocking mode because there could be several
# children running at the same time. Waiting up to 10 seconds.
if _PrepareQueueLock() is not None:
raise errors.JobQueueError("Job queue failed initialization,"
" cannot update jobs")
queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
try:
return fn(*args, **kwargs)
finally:
queue_lock.Unlock()
return wrapper
def _DecodeImportExportIO(ieio, ieioargs):
"""Decodes import/export I/O information.
"""
if ieio == constants.IEIO_RAW_DISK:
assert len(ieioargs) == 1
return (objects.Disk.FromDict(ieioargs[0]), )
if ieio == constants.IEIO_SCRIPT:
assert len(ieioargs) == 2
return (objects.Disk.FromDict(ieioargs[0]), ieioargs[1])
return ieioargs
def _DefaultAlternative(value, default):
"""Returns value or, if evaluating to False, a default value.
Returns the given value, unless it evaluates to False. In the latter case the
default value is returned.
@param value: Value to return if it doesn't evaluate to False
@param default: Default value
@return: Given value or the default
"""
if value:
return value
return default
class MlockallRequestExecutor(http.server.HttpServerRequestExecutor):
"""Subclass ensuring request handlers are locked in RAM.
"""
def __init__(self, *args, **kwargs):
utils.Mlockall()
http.server.HttpServerRequestExecutor.__init__(self, *args, **kwargs)
class NodeRequestHandler(http.server.HttpServerHandler):
"""The server implementation.
This class holds all methods exposed over the RPC interface.
"""
# too many public methods, and unused args - all methods get params
# due to the API
# pylint: disable=R0904,W0613
def __init__(self):
http.server.HttpServerHandler.__init__(self)
self.noded_pid = os.getpid()
def HandleRequest(self, req):
"""Handle a request.
"""
if req.request_method.upper() != http.HTTP_POST:
raise http.HttpBadRequest("Only the POST method is supported")
path = req.request_path
if path.startswith("/"):
path = path[1:]
method = getattr(self, "perspective_%s" % path, None)
if method is None:
raise http.HttpNotFound()
try:
result = (True, method(serializer.LoadJson(req.request_body)))
except backend.RPCFail, err:
# our custom failure exception; str(err) works fine if the
# exception was constructed with a single argument, and in
# this case, err.message == err.args[0] == str(err)
result = (False, str(err))
except errors.QuitGanetiException, err:
# Tell parent to quit
logging.info("Shutting down the node daemon, arguments: %s",
str(err.args))
os.kill(self.noded_pid, signal.SIGTERM)
# And return the error's arguments, which must be already in
# correct tuple format
result = err.args
except Exception, err: # pylint: disable=W0703
logging.exception("Error in RPC call")
result = (False, "Error while executing backend function: %s" % str(err))
return serializer.DumpJson(result)
# the new block devices --------------------------
@staticmethod
def perspective_blockdev_create(params):
"""Create a block device.
"""
(bdev_s, size, owner, on_primary, info, excl_stor) = params
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevCreate(bdev, size, owner, on_primary, info,
excl_stor)
@staticmethod
def perspective_blockdev_convert(params):
"""Copy data from source block device to target.
"""
disk_src, disk_dest = params
bdev_src = objects.Disk.FromDict(disk_src)
bdev_dest = objects.Disk.FromDict(disk_dest)
return backend.BlockdevConvert(bdev_src, bdev_dest)
@staticmethod
def perspective_blockdev_pause_resume_sync(params):
"""Pause/resume sync of a block device.
"""
disks_s, pause = params
disks = [objects.Disk.FromDict(bdev_s) for bdev_s in disks_s]
return backend.BlockdevPauseResumeSync(disks, pause)
@staticmethod
def perspective_blockdev_image(params):
"""Image a block device.
"""
bdev_s, image, size = params
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevImage(bdev, image, size)
@staticmethod
def perspective_blockdev_wipe(params):
"""Wipe a block device.
"""
bdev_s, offset, size = params
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevWipe(bdev, offset, size)
@staticmethod
def perspective_blockdev_remove(params):
"""Remove a block device.
"""
bdev_s = params[0]
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevRemove(bdev)
@staticmethod
def perspective_blockdev_rename(params):
"""Remove a block device.
"""
devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params[0]]
return backend.BlockdevRename(devlist)
@staticmethod
def perspective_blockdev_assemble(params):
"""Assemble a block device.
"""
bdev_s, idict, on_primary, idx = params
bdev = objects.Disk.FromDict(bdev_s)
instance = objects.Instance.FromDict(idict)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevAssemble(bdev, instance, on_primary, idx)
@staticmethod
def perspective_blockdev_shutdown(params):
"""Shutdown a block device.
"""
bdev_s = params[0]
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevShutdown(bdev)
@staticmethod
def perspective_blockdev_addchildren(params):
"""Add a child to a mirror device.
Note: this is only valid for mirror devices. It's the caller's duty
to send a correct disk, otherwise we raise an error.
"""
bdev_s, ndev_s = params
bdev = objects.Disk.FromDict(bdev_s)
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
return backend.BlockdevAddchildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_removechildren(params):
"""Remove a child from a mirror device.
This is only valid for mirror devices, of course. It's the callers
duty to send a correct disk, otherwise we raise an error.
"""
bdev_s, ndev_s = params
bdev = objects.Disk.FromDict(bdev_s)
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
return backend.BlockdevRemovechildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_getmirrorstatus(params):
"""Return the mirror status for a list of disks.
"""
disks = [objects.Disk.FromDict(dsk_s)
for dsk_s in params[0]]
return [status.ToDict()
for status in backend.BlockdevGetmirrorstatus(disks)]
@staticmethod
def perspective_blockdev_getmirrorstatus_multi(params):
"""Return the mirror status for a list of disks.
"""
(node_disks, ) = params
disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
result = []
for (success, status) in backend.BlockdevGetmirrorstatusMulti(disks):
if success:
result.append((success, status.ToDict()))
else:
result.append((success, status))
return result
@staticmethod
def perspective_blockdev_find(params):
"""Expose the FindBlockDevice functionality for a disk.
This will try to find but not activate a disk.
"""
disk = objects.Disk.FromDict(params[0])
result = backend.BlockdevFind(disk)
if result is None:
return None
return result.ToDict()
@staticmethod
def perspective_blockdev_snapshot(params):
"""Create a snapshot device.
Note that this is only valid for LVM and ExtStorage disks, if we get passed
something else we raise an exception. The snapshot device can be
remove by calling the generic block device remove call.
"""
(disk, snap_name, snap_size) = params
cfbd = objects.Disk.FromDict(disk)
return backend.BlockdevSnapshot(cfbd, snap_name, snap_size)
@staticmethod
def perspective_blockdev_grow(params):
"""Grow a stack of devices.
"""
if len(params) < 5:
raise ValueError("Received only %s parameters in blockdev_grow,"
" old master?" % len(params))
cfbd = objects.Disk.FromDict(params[0])
amount = params[1]
dryrun = params[2]
backingstore = params[3]
excl_stor = params[4]
return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore, excl_stor)
@staticmethod
def perspective_blockdev_close(params):
"""Closes the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[1]]
return backend.BlockdevClose(params[0], disks)
@staticmethod
def perspective_blockdev_open(params):
"""Opens the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[1]]
exclusive = params[2]
return backend.BlockdevOpen(params[0], disks, exclusive)
@staticmethod
def perspective_blockdev_getdimensions(params):
"""Compute the sizes of the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[0]]
return backend.BlockdevGetdimensions(disks)
@staticmethod
def perspective_blockdev_setinfo(params):
"""Sets metadata information on the given block device.
"""
(disk, info) = params
disk = objects.Disk.FromDict(disk)
return backend.BlockdevSetInfo(disk, info)
# blockdev/drbd specific methods ----------
@staticmethod
def perspective_drbd_disconnect_net(params):
"""Disconnects the network connection of drbd disks.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdDisconnectNet(disks)
@staticmethod
def perspective_drbd_attach_net(params):
"""Attaches the network connection of drbd disks.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
disks, multimaster = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdAttachNet(disks, multimaster)
@staticmethod
def perspective_drbd_wait_sync(params):
"""Wait until DRBD disks are synched.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdWaitSync(disks)
@staticmethod
def perspective_drbd_needs_activation(params):
"""Checks if the drbd devices need activation
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdNeedsActivation(disks)
@staticmethod
def perspective_drbd_helper(_):
"""Query drbd helper.
"""
return backend.GetDrbdUsermodeHelper()
# export/import --------------------------
@staticmethod
def perspective_finalize_export(params):
"""Expose the finalize export functionality.
"""
instance = objects.Instance.FromDict(params[0])
snap_disks = []
for disk in params[1]:
if isinstance(disk, bool):
snap_disks.append(disk)
else:
snap_disks.append(objects.Disk.FromDict(disk))
return backend.FinalizeExport(instance, snap_disks)
@staticmethod
def perspective_export_info(params):
"""Query information about an existing export on this node.
The given path may not contain an export, in which case we return
None.
"""
path = params[0]
return backend.ExportInfo(path)
@staticmethod
def perspective_export_list(params):
"""List the available exports on this node.
Note that as opposed to export_info, which may query data about an
export in any path, this only queries the standard Ganeti path
(pathutils.EXPORT_DIR).
"""
return backend.ListExports()
@staticmethod
def perspective_export_remove(params):
"""Remove an export.
"""
export = params[0]
return backend.RemoveExport(export)
# block device ---------------------
@staticmethod
def perspective_bdev_sizes(params):
"""Query the list of block devices
"""
devices = params[0]
return backend.GetBlockDevSizes(devices)
# volume --------------------------
@staticmethod
def perspective_lv_list(params):
"""Query the list of logical volumes in a given volume group.
"""
vgname = params[0]
return backend.GetVolumeList(vgname)
@staticmethod
def perspective_vg_list(params):
"""Query the list of volume groups.
"""
return backend.ListVolumeGroups()
# Storage --------------------------
@staticmethod
def perspective_storage_list(params):
"""Get list of storage units.
"""
(su_name, su_args, name, fields) = params
return container.GetStorage(su_name, *su_args).List(name, fields)
@staticmethod
def perspective_storage_modify(params):
"""Modify a storage unit.
"""
(su_name, su_args, name, changes) = params
return container.GetStorage(su_name, *su_args).Modify(name, changes)
@staticmethod
def perspective_storage_execute(params):
"""Execute an operation on a storage unit.
"""
(su_name, su_args, name, op) = params
return container.GetStorage(su_name, *su_args).Execute(name, op)
# bridge --------------------------
@staticmethod
def perspective_bridges_exist(params):
"""Check if all bridges given exist on this node.
"""
bridges_list = params[0]
return backend.BridgesExist(bridges_list)
# instance --------------------------
@staticmethod
def perspective_instance_os_add(params):
"""Install an OS on a given instance.
"""
inst_s = params[0]
inst = objects.Instance.FromDict(inst_s)
reinstall = params[1]
debug = params[2]
return backend.InstanceOsAdd(inst, reinstall, debug)
@staticmethod
def perspective_instance_run_rename(params):
"""Runs the OS rename script for an instance.
"""
inst_s, old_name, debug = params
inst = objects.Instance.FromDict(inst_s)
return backend.RunRenameInstance(inst, old_name, debug)
@staticmethod
def perspective_instance_shutdown(params):
"""Shutdown an instance.
"""
instance = objects.Instance.FromDict(params[0])
timeout = params[1]
trail = params[2]
_extendReasonTrail(trail, "shutdown")
return backend.InstanceShutdown(instance, timeout, trail)
@staticmethod
def perspective_instance_start(params):
"""Start an instance.
"""
(instance_name, startup_paused, trail) = params
instance = objects.Instance.FromDict(instance_name)
_extendReasonTrail(trail, "start")
return backend.StartInstance(instance, startup_paused, trail)
@staticmethod
def perspective_hotplug_device(params):
"""Hotplugs device to a running instance.
"""
(idict, action, dev_type, ddict, extra, seq) = params
instance = objects.Instance.FromDict(idict)
if dev_type == constants.HOTPLUG_TARGET_DISK:
device = objects.Disk.FromDict(ddict)
elif dev_type == constants.HOTPLUG_TARGET_NIC:
device = objects.NIC.FromDict(ddict)
else:
assert dev_type in constants.HOTPLUG_ALL_TARGETS
return backend.HotplugDevice(instance, action, dev_type, device, extra, seq)
@staticmethod
def perspective_hotplug_supported(params):
"""Checks if hotplug is supported.
"""
instance = objects.Instance.FromDict(params[0])
return backend.HotplugSupported(instance)
@staticmethod
def perspective_instance_metadata_modify(params):
"""Modify instance metadata.
"""
instance = params[0]
return backend.ModifyInstanceMetadata(instance)
@staticmethod
def perspective_migration_info(params):
"""Gather information about an instance to be migrated.
"""
instance = objects.Instance.FromDict(params[0])
return backend.MigrationInfo(instance)
@staticmethod
def perspective_accept_instance(params):
"""Prepare the node to accept an instance.
"""
instance, info, target = params
instance = objects.Instance.FromDict(instance)
return backend.AcceptInstance(instance, info, target)
@staticmethod
def perspective_instance_finalize_migration_dst(params):
"""Finalize the instance migration on the destination node.
"""
instance, info, success = params
instance = objects.Instance.FromDict(instance)
return backend.FinalizeMigrationDst(instance, info, success)
@staticmethod
def perspective_instance_migrate(params):
"""Migrates an instance.
"""
cluster_name, instance, target, live = params
instance = objects.Instance.FromDict(instance)
return backend.MigrateInstance(cluster_name, instance, target, live)
@staticmethod
def perspective_instance_start_postcopy(params):
""" Switches a migrating instance from precopy to postcopy mode
"""
instance, = params
instance = objects.Instance.FromDict(instance)
return backend.StartPostcopy(instance)
@staticmethod
def perspective_instance_finalize_migration_src(params):
"""Finalize the instance migration on the source node.
"""
instance, success, live = params
instance = objects.Instance.FromDict(instance)
return backend.FinalizeMigrationSource(instance, success, live)
@staticmethod
def perspective_instance_get_migration_status(params):
"""Reports migration status.
"""
instance = objects.Instance.FromDict(params[0])
return backend.GetMigrationStatus(instance).ToDict()
@staticmethod
def perspective_instance_reboot(params):
"""Reboot an instance.
"""
instance = objects.Instance.FromDict(params[0])
reboot_type = params[1]
shutdown_timeout = params[2]
trail = params[3]
_extendReasonTrail(trail, "reboot")
return backend.InstanceReboot(instance, reboot_type, shutdown_timeout,
trail)
@staticmethod
def perspective_instance_balloon_memory(params):
"""Modify instance runtime memory.
"""
instance_dict, memory = params
instance = objects.Instance.FromDict(instance_dict)
return backend.InstanceBalloonMemory(instance, memory)
@staticmethod
def perspective_instance_info(params):
"""Query instance information.
"""
(instance_name, hypervisor_name, hvparams) = params
return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
@staticmethod
def perspective_instance_migratable(params):
"""Query whether the specified instance can be migrated.
"""
instance = objects.Instance.FromDict(params[0])
return backend.GetInstanceMigratable(instance)
@staticmethod
def perspective_all_instances_info(params):
"""Query information about all instances.
"""
(hypervisor_list, all_hvparams) = params
return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
@staticmethod
def perspective_instance_console_info(params):
"""Query information on how to get console access to instances
"""
return backend.GetInstanceConsoleInfo(params)
@staticmethod
def perspective_instance_list(params):
"""Query the list of running instances.
"""
(hypervisor_list, hvparams) = params
return backend.GetInstanceList(hypervisor_list, hvparams)
# node --------------------------
@staticmethod
def perspective_node_has_ip_address(params):
"""Checks if a node has the given ip address.
"""
return netutils.IPAddress.Own(params[0])
@staticmethod
def perspective_node_info(params):
"""Query node information.
"""
(storage_units, hv_specs) = params
return backend.GetNodeInfo(storage_units, hv_specs)
@staticmethod
def perspective_etc_hosts_modify(params):
"""Modify a node entry in /etc/hosts.
"""
backend.EtcHostsModify(params[0], params[1], params[2])
return True
@staticmethod
def perspective_node_verify(params):
"""Run a verify sequence on this node.
"""
(what, cluster_name, hvparams) = params
return backend.VerifyNode(what, cluster_name, hvparams)
@classmethod
def perspective_node_verify_light(cls, params):
"""Run a light verify sequence on this node.
This call is meant to perform a less strict verification of the node in
certain situations. Right now, it is invoked only when a node is just about
to be added to a cluster, and even then, it performs the same checks as
L{perspective_node_verify}.
"""
return cls.perspective_node_verify(params)
@staticmethod
def perspective_node_start_master_daemons(params):
"""Start the master daemons on this node.
"""
return backend.StartMasterDaemons(params[0])
@staticmethod
def perspective_node_activate_master_ip(params):
"""Activate the master IP on this node.
"""
master_params = objects.MasterNetworkParameters.FromDict(params[0])
return backend.ActivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_deactivate_master_ip(params):
"""Deactivate the master IP on this node.
"""
master_params = objects.MasterNetworkParameters.FromDict(params[0])
return backend.DeactivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_stop_master(params):
"""Stops master daemons on this node.
"""
return backend.StopMasterDaemons()
@staticmethod
def perspective_node_change_master_netmask(params):
"""Change the master IP netmask.
"""
return backend.ChangeMasterNetmask(params[0], params[1], params[2],
params[3])
@staticmethod
def perspective_node_leave_cluster(params):
"""Cleanup after leaving a cluster.
"""
return backend.LeaveCluster(params[0])
@staticmethod
def perspective_node_volumes(params):
"""Query the list of all logical volume groups.
"""
return backend.NodeVolumes()
@staticmethod
def perspective_node_demote_from_mc(params):
"""Demote a node from the master candidate role.
"""
return backend.DemoteFromMC()
@staticmethod
def perspective_node_powercycle(params):
"""Tries to powercycle the node.
"""
(hypervisor_type, hvparams) = params
return backend.PowercycleNode(hypervisor_type, hvparams)
@staticmethod
def perspective_node_configure_ovs(params):
"""Sets up OpenvSwitch on the node.
"""
(ovs_name, ovs_link) = params
return backend.ConfigureOVS(ovs_name, ovs_link)
@staticmethod
def perspective_node_crypto_tokens(params):
"""Gets the node's public crypto tokens.
"""
token_requests = params[0]
return backend.GetCryptoTokens(token_requests)
@staticmethod
def perspective_node_ensure_daemon(params):
"""Ensure daemon is running.
"""
(daemon_name, run) = params
return backend.EnsureDaemon(daemon_name, run)
@staticmethod
def perspective_node_ssh_key_add(params):
"""Distributes a new node's SSH key if authorized.
"""
(node_uuid, node_name, potential_master_candidates,
to_authorized_keys, to_public_keys, get_public_keys) = params
return backend.AddNodeSshKey(node_uuid, node_name,
potential_master_candidates,
to_authorized_keys=to_authorized_keys,
to_public_keys=to_public_keys,
get_public_keys=get_public_keys)
@staticmethod
def perspective_node_ssh_keys_renew(params):
"""Generates a new root SSH key pair on the node.
"""
(node_uuids, node_names, master_candidate_uuids,
potential_master_candidates, old_key_type, new_key_type,
new_key_bits) = params
return backend.RenewSshKeys(node_uuids, node_names, master_candidate_uuids,
potential_master_candidates, old_key_type,
new_key_type, new_key_bits)
@staticmethod
def perspective_node_ssh_key_remove(params):
"""Removes a node's SSH key from the other nodes' SSH files.
"""
(node_uuid, node_name,
master_candidate_uuids, potential_master_candidates,
from_authorized_keys, from_public_keys, clear_authorized_keys,
clear_public_keys, readd) = params
return backend.RemoveNodeSshKey(node_uuid, node_name,
master_candidate_uuids,
potential_master_candidates,
from_authorized_keys=from_authorized_keys,
from_public_keys=from_public_keys,
clear_authorized_keys=clear_authorized_keys,
clear_public_keys=clear_public_keys,
readd=readd)
# cluster --------------------------
@staticmethod
def perspective_version(params):
"""Query version information.
"""
return constants.PROTOCOL_VERSION
@staticmethod
def perspective_upload_file(params):
"""Upload a file.
Note that the backend implementation imposes strict rules on which
files are accepted.
"""
return backend.UploadFile(*(params[0]))
@staticmethod
def perspective_upload_file_single(params):
"""Upload a file.
Note that the backend implementation imposes strict rules on which
files are accepted.
"""
return backend.UploadFile(*params)
@staticmethod
def perspective_master_node_name(params):
"""Returns the master node name.
"""
return backend.GetMasterNodeName()
@staticmethod
def perspective_run_oob(params):
"""Runs oob on node.
"""
output = backend.RunOob(params[0], params[1], params[2], params[3])
if output:
result = serializer.LoadJson(output)
else:
result = None
return result
@staticmethod
def perspective_restricted_command(params):
"""Runs a restricted command.
"""
(cmd, ) = params
return backend.RunRestrictedCmd(cmd)
@staticmethod
def perspective_write_ssconf_files(params):
"""Write ssconf files.
"""
(values,) = params
return ssconf.WriteSsconfFiles(values)
@staticmethod
def perspective_get_watcher_pause(params):
"""Get watcher pause end.
"""
return utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE)
@staticmethod
def perspective_set_watcher_pause(params):
"""Set watcher pause.
"""
(until, ) = params
return backend.SetWatcherPause(until)
@staticmethod
def perspective_get_file_info(params):
"""Get info on whether a file exists and its properties.
"""
(path, ) = params
return backend.GetFileInfo(path)
# os -----------------------
@staticmethod
def perspective_os_diagnose(params):
"""Query detailed information about existing OSes.
"""
return backend.DiagnoseOS()
@staticmethod
def perspective_os_validate(params):
"""Run a given OS' validation routine.
"""
required, name, checks, params, force_variant = params
return backend.ValidateOS(required, name, checks, params, force_variant)
@staticmethod
def perspective_os_export(params):
"""Export an OS definition into an instance specific package.
"""
instance = objects.Instance.FromDict(params[0])
override_env = params[1]
return backend.ExportOS(instance, override_env)
# extstorage -----------------------
@staticmethod
def perspective_extstorage_diagnose(params):
"""Query detailed information about existing extstorage providers.
"""
return backend.DiagnoseExtStorage()
# hooks -----------------------
@staticmethod
def perspective_hooks_runner(params):
"""Run hook scripts.
"""
hpath, phase, env = params
hr = backend.HooksRunner()
return hr.RunHooks(hpath, phase, env)
# iallocator -----------------
@staticmethod
def perspective_iallocator_runner(params):
"""Run an iallocator script.
"""
name, idata, ial_params_dict = params
ial_params = []
for ial_param in ial_params_dict.items():
if ial_param[1] is not None:
ial_params.append("--" + ial_param[0] + "=" + ial_param[1])
else:
ial_params.append("--" + ial_param[0])
iar = backend.IAllocatorRunner()
return iar.Run(name, idata, ial_params)
# test -----------------------
@staticmethod
def perspective_test_delay(params):
"""Run test delay.
"""
duration = params[0]
status, rval = utils.TestDelay(duration)
if not status:
raise backend.RPCFail(rval)
return rval
# file storage ---------------
@staticmethod
def perspective_file_storage_dir_create(params):
"""Create the file storage directory.
"""
file_storage_dir = params[0]
return backend.CreateFileStorageDir(file_storage_dir)
@staticmethod
def perspective_file_storage_dir_remove(params):
"""Remove the file storage directory.
"""
file_storage_dir = params[0]
return backend.RemoveFileStorageDir(file_storage_dir)
@staticmethod
def perspective_file_storage_dir_rename(params):
"""Rename the file storage directory.
"""
old_file_storage_dir = params[0]
new_file_storage_dir = params[1]
return backend.RenameFileStorageDir(old_file_storage_dir,
new_file_storage_dir)
# jobs ------------------------
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_update(params):
"""Update job queue.
"""
(file_name, content) = params
return backend.JobQueueUpdate(file_name, content)
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_purge(params):
"""Purge job queue.
"""
return backend.JobQueuePurge()
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_rename(params):
"""Rename a job queue file.
"""
# TODO: What if a file fails to rename?
return [backend.JobQueueRename(old, new) for old, new in params[0]]
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_set_drain_flag(params):
"""Set job queue's drain flag.
"""
(flag, ) = params
return jstore.SetDrainFlag(flag)
# hypervisor ---------------
@staticmethod
def perspective_hypervisor_validate_params(params):
"""Validate the hypervisor parameters.
"""
(hvname, hvparams) = params
return backend.ValidateHVParams(hvname, hvparams)
# Crypto
@staticmethod
def perspective_x509_cert_create(params):
"""Creates a new X509 certificate for SSL/TLS.
"""
(validity, ) = params
return backend.CreateX509Certificate(validity)
@staticmethod
def perspective_x509_cert_remove(params):
"""Removes a X509 certificate.
"""
(name, ) = params
return backend.RemoveX509Certificate(name)
# Import and export
@staticmethod
def perspective_import_start(params):
"""Starts an import daemon.
"""
(opts_s, instance, component, (dest, dest_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
return backend.StartImportExportDaemon(constants.IEM_IMPORT, opts,
None, None,
objects.Instance.FromDict(instance),
component, dest,
_DecodeImportExportIO(dest,
dest_args))
@staticmethod
def perspective_export_start(params):
"""Starts an export daemon.
"""
(opts_s, host, port, instance, component, (source, source_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
return backend.StartImportExportDaemon(constants.IEM_EXPORT, opts,
host, port,
objects.Instance.FromDict(instance),
component, source,
_DecodeImportExportIO(source,
source_args))
@staticmethod
def perspective_impexp_status(params):
"""Retrieves the status of an import or export daemon.
"""
return backend.GetImportExportStatus(params[0])
@staticmethod
def perspective_impexp_abort(params):
"""Aborts an import or export.
"""
return backend.AbortImportExport(params[0])
@staticmethod
def perspective_impexp_cleanup(params):
"""Cleans up after an import or export.
"""
return backend.CleanupImportExport(params[0])
def CheckNoded(options, args):
"""Initial checks whether to run or exit with a failure.
"""
if args: # noded doesn't take any arguments
print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
if options.max_clients < 1:
print >> sys.stderr, ("%s --max-clients argument must be >= 1" %
sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
try:
codecs.lookup("string-escape")
except LookupError:
print >> sys.stderr, ("Can't load the string-escape code which is part"
" of the Python installation. Is your installation"
" complete/correct? Aborting.")
sys.exit(constants.EXIT_FAILURE)
def SSLVerifyPeer(conn, cert, errnum, errdepth, ok):
"""Callback function to verify a peer against the candidate cert map.
Note that we have a chicken-and-egg problem during cluster init and upgrade.
This method checks whether the incoming connection comes from a master
candidate by comparing it to the master certificate map in the cluster
configuration. However, during cluster init and cluster upgrade there
are various RPC calls done to the master node itself, before the candidate
certificate list is established and the cluster configuration is written.
In this case, we cannot check against the master candidate map.
This problem is solved by checking whether the candidate map is empty. An
initialized 2.11 or higher cluster has at least one entry for the master
node in the candidate map. If the map is empty, we know that we are still
in the bootstrap/upgrade phase. In this case, we read the server certificate
digest and compare it to the incoming request.
This means that after an upgrade of Ganeti, the system continues to operate
like before, using server certificates only. After the client certificates
are generated with ``gnt-cluster renew-crypto --new-node-certificates``,
RPC communication is switched to using client certificates and the trick of
using server certificates does not work anymore.
@type conn: C{OpenSSL.SSL.Connection}
@param conn: the OpenSSL connection object
@type cert: C{OpenSSL.X509}
@param cert: the peer's SSL certificate
@type errdepth: integer
@param errdepth: number of the step in the certificate chain starting at 0
for the actual client certificate.
"""
# some parameters are unused, but this is the API
# pylint: disable=W0613
# If we receive a certificate from the certificate chain that is higher
# than the lowest element of the chain, we have to check it against the
# server certificate.
if errdepth > 0:
server_digest = utils.GetCertificateDigest(
cert_filename=pathutils.NODED_CERT_FILE)
match = cert.digest("sha1") == server_digest
if not match:
logging.debug("Received certificate from the certificate chain, which"
" does not match the server certficate. Digest of the"
" received certificate: %s. Digest of the server"
" certificate: %s.", cert.digest("sha1"), server_digest)
return match
elif errdepth == 0:
sstore = ssconf.SimpleStore()
try:
candidate_certs = sstore.GetMasterCandidatesCertMap()
except errors.ConfigurationError:
logging.info("No candidate certificates found. Switching to "
"bootstrap/update mode.")
candidate_certs = None
if not candidate_certs:
candidate_certs = {
constants.CRYPTO_BOOTSTRAP: utils.GetCertificateDigest(
cert_filename=pathutils.NODED_CERT_FILE)}
match = cert.digest("sha1") in candidate_certs.values()
if not match:
logging.debug("Received certificate which is not a certificate of a"
" master candidate. Certificate digest: %s. List of master"
" candidate certificate digests: %s.", cert.digest("sha1"),
str(candidate_certs))
return match
else:
logging.error("Invalid errdepth value: %s.", errdepth)
return False
def PrepNoded(options, _):
"""Preparation node daemon function, executed with the PID file held.
"""
if options.mlock:
request_executor_class = MlockallRequestExecutor
try:
utils.Mlockall()
except errors.NoCtypesError:
logging.warning("Cannot set memory lock, ctypes module not found")
request_executor_class = http.server.HttpServerRequestExecutor
else:
request_executor_class = http.server.HttpServerRequestExecutor
# Read SSL certificate
if options.ssl:
ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
ssl_cert_path=options.ssl_cert)
else:
ssl_params = None
err = _PrepareQueueLock()
if err is not None:
# this might be some kind of file-system/permission error; while
# this breaks the job queue functionality, we shouldn't prevent
# startup of the whole node daemon because of this
logging.critical("Can't init/verify the queue, proceeding anyway: %s", err)
handler = NodeRequestHandler()
mainloop = daemon.Mainloop()
server = http.server.HttpServer(
mainloop, options.bind_address, options.port, options.max_clients,
handler, ssl_params=ssl_params, ssl_verify_peer=True,
request_executor_class=request_executor_class,
ssl_verify_callback=SSLVerifyPeer)
server.Start()
return (mainloop, server)
def ExecNoded(options, args, prep_data): # pylint: disable=W0613
"""Main node daemon function, executed with the PID file held.
"""
(mainloop, server) = prep_data
try:
mainloop.Run()
finally:
server.Stop()
def Main():
"""Main function for the node daemon.
"""
parser = OptionParser(description="Ganeti node daemon",
usage=("%prog [-f] [-d] [-p port] [-b ADDRESS]"
" [-i INTERFACE]"),
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("--no-mlock", dest="mlock",
help="Do not mlock the node memory in ram",
default=True, action="store_false")
parser.add_option("--max-clients", dest="max_clients",
default=20, type="int",
help="Number of simultaneous connections accepted"
" by noded")
daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
default_ssl_cert=pathutils.NODED_CERT_FILE,
default_ssl_key=pathutils.NODED_CERT_FILE,
console_logging=True,
warn_breach=True)
| bsd-2-clause | -8,683,240,034,905,567,000 | 28.346575 | 80 | 0.66697 | false |
prefetchnta/questlab | bin/x64bin/python/37/Lib/xmlrpc/server.py | 1 | 37658 | r"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan ([email protected]).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
from functools import partial
from inspect import signature
import html
import http.server
import socketserver
import sys
import os
import re
import pydoc
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None,
use_builtin_types=False):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
self.use_builtin_types = use_builtin_types
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches an XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function=None, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
# decorator factory
if function is None:
return partial(self.register_function, name=name)
if name is None:
name = function.__name__
self.funcs[name] = function
return function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return response.encode(self.encoding, 'xmlcharrefreplace')
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
try:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
try:
# call the matching registered function
func = self.funcs[method]
except KeyError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
# call the `_dispatch` method on the instance
return self.instance._dispatch(method, params)
# call the instance's method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inherited
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate, use_builtin_types)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
response = response.encode(self.encoding, 'xmlcharrefreplace')
finally:
# Break reference cycle
exc_type = exc_value = None
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if callable(object):
argspec = str(signature(object))
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(html.escape(self.server_title), documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate,
use_builtin_types)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
import datetime
class ExampleService:
def getData(self):
return '42'
class currentTime:
@staticmethod
def getCurrentTime():
return datetime.datetime.now()
with SimpleXMLRPCServer(("localhost", 8000)) as server:
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.register_instance(ExampleService(), allow_dotted_names=True)
server.register_multicall_functions()
print('Serving XML-RPC on localhost port 8000')
print('It is advisable to run this example server within a secure, closed network.')
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0)
| lgpl-2.1 | -9,123,508,251,613,869,000 | 35.923464 | 92 | 0.571831 | false |
ZoomerAnalytics/catalyst | catalyst/json_dbapi/__init__.py | 1 | 2214 | import json, datetime, os
from .sqlparser import parse
def Date(year, month, day):
raise NotImplementedError()
def Timestamp(hour, minute, second):
raise NotImplementedError()
def Timestamp(year, month, day, hour, minute, second):
raise NotImplementedError()
def DateFromTicks(ticks):
raise NotImplementedError()
def TimeFromTicks(ticks):
raise NotImplementedError()
def TimeStampFromTicks(ticks):
raise NotImplementedError()
def Binary(value):
return bytes(value)
STRING = str
BINARY = bytes
NUMBER = float
DATETIME = datetime.datetime
ROWID = int
class JsonDBAPICursor(object):
def __init__(self, owner):
self.owner = owner
self.arraysize = 1
self._results = None
@property
def description(self):
raise NotImplementedError()
@property
def rowcount(self):
raise NotImplementedError()
def close(self):
pass
def execute(self, operation, parameters=None):
stmt = parse(operation)
ret, self._results = stmt.execute(parameters)
raise Exception("Operation '%s' not supported" % operation)
def executemany(self, operation, parameter_seq):
raise NotImplementedError()
def fetchone(self):
raise NotImplementedError()
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
raise NotImplementedError()
def fetchall(self):
raise NotImplementedError()
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
class JsonDBAPIConnection(object):
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
with open(filename, "r") as f:
self.j = json.load(f)
else:
self.j = {}
def close(self):
pass
def commit(self):
raise NotImplementedError()
def cursor(self):
return JsonDBAPICursor(self)
def rollback(self):
pass
apilevel = "1.0"
threadsafety = 0
paramstyle = "format"
def connect(filename):
return JsonDBAPIConnection(filename)
Error = Exception
DatabaseError = Exception | mit | -2,250,958,578,212,698,600 | 17.771186 | 67 | 0.64318 | false |
pierky/ripe-atlas-tools | tests/commands/loading.py | 1 | 3443 | import mock
import os.path
import unittest
import tempfile
import shutil
import sys
# Python 2.7 does have io.StringIO but StringIO. is more liberal regarding str
# versus unicode inputs to write()
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ripe.atlas.tools.commands.base import Command
USER_COMMAND = """
from ripe.atlas.tools.commands.base import Command as BaseCommand
class Command(BaseCommand):
NAME = 'user-command-1'
"""
class TestCommandLoading(unittest.TestCase):
expected_builtins = [
"configure",
"go",
"measure",
"measurement-info",
"measurement-search",
"probe-info",
"probe-search",
"report",
"shibboleet",
"stream",
]
def setUp(self):
# Create a directory for storing user commands and insert the dummy
# command
self.user_command_path = tempfile.mkdtemp()
with open(
os.path.join(self.user_command_path, "user_command_1.py"),
"w"
) as f:
f.write(USER_COMMAND)
def tearDown(self):
shutil.rmtree(self.user_command_path)
@mock.patch(
"ripe.atlas.tools.commands.base.Command._get_user_command_path",
return_value=None,
)
def test_command_loading(self, _get_user_command_path):
_get_user_command_path.return_value = self.user_command_path
available_commands = Command.get_available_commands()
# Check that we have the command list that we expect
self.assertEquals(
sorted(available_commands),
sorted(
[b.replace('-', '_') for b in self.expected_builtins] +
["user_command_1"]
),
)
# Check that we can load (i.e. import) every builtin command
for expected_builtin in self.expected_builtins:
self.assertIn(expected_builtin.replace("-", "_"), available_commands)
cmd_cls = Command.load_command_class(expected_builtin)
self.assertIsNotNone(cmd_cls)
self.assertEquals(cmd_cls.get_name(), expected_builtin)
# Check that we can load the user command
user_cmd_cls = Command.load_command_class("user-command-1")
self.assertIsNotNone(user_cmd_cls)
self.assertEquals(user_cmd_cls.get_name(), "user-command-1")
# Check that load_command_class() returns None for junk commands
unexpected_cmd = Command.load_command_class("no-such-command")
self.assertIsNone(unexpected_cmd)
def test_deprecated_aliases(self):
aliases = [
("measurement", "measurement-info"),
("measurements", "measurement-search"),
("probe", "probe-info"),
("probes", "probe-search"),
]
# Check that each alias is loaded correctly and outputs a warning
stderr = sys.stderr
sys.stderr = StringIO()
try:
for alias, cmd_name in aliases:
sys.stderr.truncate()
cmd_cls = Command.load_command_class(alias)
self.assertIn(
"{} is a deprecated alias for {}".format(alias, cmd_name),
sys.stderr.getvalue(),
)
self.assertIsNotNone(cmd_cls)
self.assertEquals(cmd_cls.get_name(), cmd_name)
finally:
sys.stderr = stderr
| gpl-3.0 | 799,264,350,458,483,000 | 30.87963 | 81 | 0.596282 | false |
7WebPages/elastic | project/wsgi.py | 1 | 1595 | """
WSGI config for simpularity project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
from dj_static import Cling
CONFIG_ROOT = dirname(dirname(abspath(__file__)))
path.append(CONFIG_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 7,352,348,694,332,385,000 | 39.897436 | 79 | 0.790596 | false |
semitki/semitki | api/sonetworks/migrations/0006_auto_20170216_2125.py | 1 | 1630 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 21:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0005_auto_20170202_2319'),
]
operations = [
migrations.CreateModel(
name='GroupedSocialAccounts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameModel(
old_name='AccountsGroup',
new_name='SocialAccount',
),
migrations.RenameModel(
old_name='UserAccount',
new_name='SocialAccountsGroup',
),
migrations.RemoveField(
model_name='useraccountsgroup',
name='account_group_id',
),
migrations.RemoveField(
model_name='useraccountsgroup',
name='user_account_id',
),
migrations.DeleteModel(
name='UserAccountsGroup',
),
migrations.AddField(
model_name='groupedsocialaccounts',
name='social_account_group_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonetworks.SocialAccountsGroup'),
),
migrations.AddField(
model_name='groupedsocialaccounts',
name='social_account_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonetworks.SocialAccount'),
),
]
| mit | 490,810,210,341,913,500 | 30.960784 | 118 | 0.586503 | false |
btaylor66/SREAchievements | sreachievementswebapp/models/person.py | 1 | 1194 | """Database model for a person
"""
import collections
import operator
import json
from sreachievementswebapp.dbmodels import db
from sqlalchemy.ext.hybrid import hybrid_property
m2m_person_achievement = db.Table(
'm2m_person_achievement',
db.Column('achievement_id', db.Integer, db.ForeignKey('achievement.id')),
db.Column('person_id', db.Integer, db.ForeignKey('person.id')),
db.PrimaryKeyConstraint('achievement_id', 'person_id')
)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(50), unique=True)
fullname = db.Column(db.String(50))
# team_id = db.Column(db.Integer, db.ForeignKey('teams.team_id'), nullable=True)
achievements = db.relationship('Achievement', secondary=m2m_person_achievement, backref='Person')
# team = db.relationship("Teams", back_populates="users")
# team = db.relationship("Teams")
known_achievements = []
# def __init__(self, username, fullname, team_id, team):
def __init__(self, username, fullname):
self.username = username
self.fullname = fullname
# self.team_id = team_id
# self.team = team
| gpl-3.0 | -3,030,146,035,369,877,000 | 28.85 | 101 | 0.685092 | false |
Ensembles/ert | python/python/ert/job_queue/job_status_type_enum.py | 1 | 2741 | # Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'job_status_type_enum.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCEnum
from ert.job_queue import JOB_QUEUE_LIB
class JobStatusType(BaseCEnum):
TYPE_NAME = "job_status_type_enum"
JOB_QUEUE_NOT_ACTIVE = None # This value is used in external query routines - for jobs which are (currently) not active. */
JOB_QUEUE_WAITING = None # A node which is waiting in the internal queue.
JOB_QUEUE_SUBMITTED = None # Internal status: It has has been submitted - the next status update will (should) place it as pending or running.
JOB_QUEUE_PENDING = None # A node which is pending - a status returned by the external system. I.e LSF
JOB_QUEUE_RUNNING = None # The job is running
JOB_QUEUE_DONE = None # The job is done - but we have not yet checked if the target file is produced */
JOB_QUEUE_EXIT = None # The job has exited - check attempts to determine if we retry or go to complete_fail */
JOB_QUEUE_IS_KILLED = None # The job has been killed, following a JOB_QUEUE_DO_KILL - can restart. */
JOB_QUEUE_DO_KILL = None # The the job should be killed, either due to user request, or automated measures - the job can NOT be restarted.. */
JOB_QUEUE_SUCCESS = None
JOB_QUEUE_RUNNING_CALLBACK = None
JOB_QUEUE_FAILED = None
JOB_QUEUE_DO_KILL_NODE_FAILURE = None
JOB_QUEUE_STATUS_FAILURE = None
JobStatusType.addEnum("JOB_QUEUE_NOT_ACTIVE", 1)
JobStatusType.addEnum("JOB_QUEUE_WAITING", 4)
JobStatusType.addEnum("JOB_QUEUE_SUBMITTED", 8)
JobStatusType.addEnum("JOB_QUEUE_PENDING", 16)
JobStatusType.addEnum("JOB_QUEUE_RUNNING", 32)
JobStatusType.addEnum("JOB_QUEUE_DONE", 64)
JobStatusType.addEnum("JOB_QUEUE_EXIT", 128)
JobStatusType.addEnum("JOB_QUEUE_IS_KILLED", 4096)
JobStatusType.addEnum("JOB_QUEUE_DO_KILL", 8192)
JobStatusType.addEnum("JOB_QUEUE_SUCCESS", 16384)
JobStatusType.addEnum("JOB_QUEUE_RUNNING_CALLBACK", 32768)
JobStatusType.addEnum("JOB_QUEUE_FAILED", 65536)
JobStatusType.addEnum("JOB_QUEUE_DO_KILL_NODE_FAILURE", 131072)
JobStatusType.addEnum("JOB_QUEUE_STATUS_FAILURE", 262144)
| gpl-3.0 | 5,301,143,350,003,913,000 | 52.745098 | 152 | 0.721634 | false |
ollej/shoutbridge | src/plugins/GoldQuest.py | 1 | 17185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2011 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from sqlalchemy import Table, Column, Integer, Boolean, String, MetaData, ForeignKey, Sequence, create_engine
from sqlalchemy.orm import mapper, sessionmaker
import random
import cmd
import yaml
from utils.Conf import *
class Hero(object):
id = 0
name = ''
health = None
strength = None
hurt = None
kills = None
gold = None
level = None
alive = None
def __init__(self):
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
def reroll(self, name=None):
self.health = self.roll(20, 5)
self.strength = self.roll(20, 5)
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
if name:
self.name = name
else:
self.name = self.random_name()
def search_treasure(self):
luck = self.roll(100)
if luck > 50:
found_gold = self.roll(self.level)
self.gold = self.gold + found_gold
return found_gold
return 0
def injure(self, hurt):
self.hurt = self.hurt + hurt
if self.hurt > self.health:
self.alive = False
def fight(self, monster):
#print("Monster:", monster.health, monster.strength)
while monster.health >= 0 and self.hurt < self.health:
hit = self.roll(self.strength)
killed = monster.injure(hit)
#print("Hit:", hit, "Monster Health:", monster.health)
if not killed:
monster_hit = self.roll(monster.strength)
self.injure(monster_hit)
#print("Monster Hits:", monster_hit, "Hero Hurt:", self.hurt)
if self.hurt > self.health:
self.alive = False
else:
self.kills = self.kills + 1
return self.alive
def rest(self):
if self.hurt > 0:
heal = self.roll(10)
if heal > self.hurt:
heal = self.hurt
self.hurt = self.hurt - heal
return heal
return 0
def go_deeper(self, depth=None):
if not depth:
depth = 1
self.level = self.level + depth
return self.level
def roll(self, sides, times=1):
total = 0
for i in range(times):
total = total + random.randint(1, sides)
return total
def random_name(self):
name = random.choice(['Conan', 'Canon', 'Hercules', 'Robin', 'Dante', 'Legolas', 'Buffy', 'Xena'])
epithet = random.choice(['Barbarian', 'Invincible', 'Mighty', 'Hairy', 'Bastard', 'Slayer'])
return '%s the %s' % (name, epithet)
def get_attributes(self):
attribs = self.__dict__
attribs['status'] = ""
if not self.alive:
attribs['status'] = " (Deceased)"
#for k, v in attribs.items():
# print k, v
return attribs
#return self.__dict__
def get_charsheet(self):
msg = "%(name)s%(status)s - Strength: %(strength)d Health: %(health)d Hurt: %(hurt)d Kills: %(kills)d Gold: %(gold)d Level: %(level)d"
msg = msg % self.get_attributes()
return msg
class Monster(object):
name = None
strength = None
health = None
level = None
def __init__(self, level=None, name=None, boss=False):
if not level:
level = 1
self.strength = random.randint(1, level)
self.health = random.randint(1, level)
if boss:
self.strength = self.strength + level
self.health = self.health + level
if name:
self.name = name
else:
self.name = self.random_name()
def injure(self, hurt):
"""
Injure the monster with hurt points. Returns True if the monster died.
"""
self.health = self.health - hurt
if self.health <= 0:
self.level.kill_monster()
return True
else:
return False
def random_name(self):
return random.choice([
"an orc", "an ogre", "a bunch of goblins", "a giant spider",
"a cyclops", "a minotaur", "a horde of kobolds",
"a rattling skeleton", "a large troll", "a moaning zombie",
"a swarm of vampire bats", "a baby hydra", "a giant monster ant",
"a slithering lizard", "an angry lion", "three hungry bears",
"a hell hound", "a pack of rabid dogs", "a werewolf",
"an ice demon", "a fire wraith", "a groaning ghoul",
"two goblins", "a three-headed hyena", "a giant monster worm",
"a slobbering were-pig"
])
class Level(object):
depth = None
killed = None
looted = None
boss = None
text = None
def __init__(self, depth=None):
self.killed = 0
self.looted = 0
if depth:
self.depth = depth
else:
self.depth = 1
def get_monster(self, name):
if self.killed == self.depth - 1:
boss = True
if self.boss:
name = self.boss
else:
boss = False
if self.has_monsters():
monster = Monster(self.depth, name, boss)
monster.level = self
return monster
def get_loot(self):
loot = 0
if self.can_loot():
self.looted = self.looted + 1
luck = random.randint(1, 100)
if luck > 20:
loot = random.randint(1, self.depth)
elif luck < 5:
loot = 0 - luck
return loot
def kill_monster(self):
if self.has_monsters():
self.killed = self.killed + 1
return True
return False
def has_monsters(self):
if self.killed < self.depth:
return True
return False
def can_loot(self):
if self.looted < self.killed:
return True
return False
class GoldQuest(BridgeClass):
_gamedata = None
cfg = None
hero = None
level = None
def __init__(self, cfg):
"""
Setup Sqlite SQL tables and start a db session.
The database will be saved in C{extras/goldquest.db}
Calls L{setup_tables} to setup table metadata and L{setup_session}
to instantiate the db session.
"""
self.cfg = cfg
try:
debug = self.cfg.get_bool('debug')
except AttributeError:
debug = False
self.read_texts()
self.engine = create_engine('sqlite:///extras/quest.db', echo=debug)
self.setup_tables()
self.setup_session()
self.hero = self.get_alive_hero()
if self.hero and not self.level:
self.level = Level(self.hero.level)
def setup_session(self):
"""
Start a SQLAlchemy db session.
Saves the session instance in C{self.session}
"""
Session = sessionmaker(bind=self.engine)
self.session = Session()
def setup_tables(self):
"""
Defines the tables to use for L{Hero}
The Metadata instance is saved to C{self.metadata}
"""
self.metadata = MetaData()
hero_table = Table('hero', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('name', String(100)),
Column('health', Integer),
Column('strength', Integer),
Column('hurt', Integer),
Column('kills', Integer),
Column('gold', Integer),
Column('level', Integer),
Column('alive', Boolean),
)
mapper(Hero, hero_table)
level_table = Table('level', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('depth', Integer),
Column('killed', Integer),
Column('looted', Integer),
)
mapper(Level, level_table)
self.metadata.create_all(self.engine)
def read_texts(self):
f = open('extras/goldquest.dat')
self._gamedata = yaml.load(f)
f.close()
def get_text(self, text):
texts = self._gamedata['texts'][text]
if not texts:
return None
elif isinstance(texts, basestring):
return texts
else:
return random.choice(texts)
def get_level_texts(self, depth):
for lvl in self._gamedata['level']:
if lvl['level'] == depth:
return lvl
def get_monster(self, lvl=None):
if not lvl:
lvl = self.level.depth or 1
monsters = []
for monster in self._gamedata['monster']:
if lvl >= monster['lowlevel'] and monster['highlevel'] == 0 or lvl <= monster['highlevel']:
monsters.append(monster['name'])
if monsters:
name = random.choice(monsters)
else:
name = None
return self.level.get_monster(name)
def play(self, command):
msg = ""
command = command.strip().lower()
try:
(command, rest) = command.split(' ')
except ValueError:
rest = ""
rest = rest.strip()
if command in ['reroll']:
return self.reroll()
if not self.hero or not self.hero.alive:
return self.get_text('nochampion')
if command in ['rest', 'vila']:
msg = self.rest()
elif command in ['fight', 'kill', 'slay', u'slåss']:
msg = self.fight()
elif command in ['deeper', 'descend', 'vidare']:
msg = self.go_deeper(rest)
elif command in ['loot', 'search', u'sök', 'finna']:
msg = self.search_treasure()
elif command in ['charsheet', 'stats', u'formulär']:
msg = self.show_charsheet()
else:
return None
self.save_data()
return msg
def save_data(self):
self.session.add(self.hero)
self.session.add(self.level)
self.session.commit()
def get_alive_hero(self):
hero = self.session.query(Hero).filter_by(alive=True).first()
return hero
def get_level(self, lvl):
level = self.session.query(Level).filter_by(depth=lvl).first()
if not level:
level = Level(lvl)
texts = self.get_level_texts(lvl)
if texts:
for k, v in texts.items():
if v:
setattr(level, k , v)
if not level.boss:
level.boss = random.choice(self._gamedata['boss'])
return level
def reroll(self):
if self.hero and self.hero.alive:
msg = self.get_text('noreroll') % self.hero.get_attributes()
return msg
else:
# Delete all old Level data.
self.session.query(Level).delete()
# Reroll new hero.
self.hero = Hero()
self.hero.reroll()
self.level = self.get_level(self.hero.level)
self.save_data()
msg = self.get_text('newhero')
msg = msg % self.hero.get_attributes()
msg = msg + " " + self.level.text
return msg
def search_treasure(self):
#loot = self.hero.search_treasure()
attribs = self.hero.get_attributes()
if self.level.can_loot():
loot = self.level.get_loot()
attribs['loot'] = loot
if loot > 0:
msg = self.get_text('foundloot')
# Should be a method on Hero
self.hero.gold = self.hero.gold + loot
elif loot < 0:
attribs['trap_hurt'] = abs(loot)
self.hero.injure(attribs['trap_hurt'])
msg = self.get_text('foundtrap')
else:
msg = self.get_text('nogold')
else:
msg = self.get_text('noloot')
msg = msg % attribs
return msg
def sneak_attack(self):
if self.level.has_monsters():
#self.logprint("Monsters are available to sneak attack.")
unlucky = self.roll(100)
#self.logprint("unlucky:", unlucky)
if unlucky < 20:
#self.logprint("Sneak attack!")
monster = self.get_monster(self.level.depth)
won = self.hero.fight(monster)
if won:
msg = self.get_text('rest_attack_won')
else:
msg = self.get_text('rest_attack_lost')
attribs = self.hero.get_attributes()
attribs['monster_name'] = monster.name
msg = msg % attribs
return msg
def rest(self):
# If there are monsters alive on the level, there is a
# risk of a sneak attack while resting.
msg = self.sneak_attack()
if msg:
return msg
rested = self.hero.rest()
if rested:
if self.hero.hurt:
restmsg = self.get_text('rests')
else:
restmsg = self.get_text('healed')
else:
restmsg = self.get_text('alreadyhealed')
attribs = self.hero.get_attributes()
attribs['rested'] = rested
msg = restmsg % attribs
return msg
def go_deeper(self, levels=1):
try:
levels = int(levels)
except ValueError:
levels = 1
if levels > 10:
levels = 10
depth = self.hero.go_deeper(levels)
self.level = self.get_level(depth)
msg = self.level.text or self.get_text('deeper')
msg = msg % self.hero.get_attributes()
return msg
def fight(self):
monster = self.get_monster(self.level.depth)
attribs = self.hero.get_attributes()
if not monster:
msg = self.get_text('nomonsters')
return msg % attribs
won = self.hero.fight(monster)
if won:
msg = self.get_text('killed')
attribs['slayed'] = self.get_text('slayed')
else:
msg = self.get_text('died')
attribs['monster'] = monster.name
msg = msg % attribs
msg = self.firstupper(msg)
return msg
def roll(self, sides, times=1):
total = 0
for i in range(times):
total = total + random.randint(1, sides)
return total
def show_charsheet(self):
msg = self.get_text('charsheet')
return msg % self.hero.get_attributes()
def firstupper(self, text):
first = text[0].upper()
return first + text[1:]
class Game(cmd.Cmd):
prompt = 'GoldQuest> '
intro = "Welcome to GoldQuest!"
game = None
def preloop(self):
cfg = Conf('../config.ini', 'LOCAL')
self.game = GoldQuest(cfg)
def default(self, line):
ret = self.game.play(line)
if ret:
print ret
def do_fight(self, line):
"Find a new monster and fight it to the death!"
print self.game.play('fight')
def do_charsheet(self, line):
"Show the character sheet for the current hero."
print self.game.play('charsheet')
def do_reroll(self, line):
"Reroll a new hero if the village doesn't have one already."
print self.game.play('reroll')
def do_rest(self, line):
"Makes the hero rest for a while to regain hurt."
print self.game.play('rest')
def do_loot(self, line):
"The hero will search for loot in the hope to find gold."
print self.game.play('loot')
def do_deeper(self, line):
"Tells the hero to go deeper into the dungeon."
if line:
cmd = 'deeper %s' % line
else:
cmd = 'deeper'
print self.game.play(cmd)
def do_quit(self, line):
"Quit Game"
print "A strange game. The only winning move is not to play."
return True
if __name__ == '__main__':
Game().cmdloop()
| mit | -5,522,686,286,455,691,000 | 30.354015 | 142 | 0.545396 | false |
sixty-north/python-transducers | test/test_reducers.py | 1 | 5854 | import unittest
from transducer._util import empty_iter
from transducer.eager import transduce
from transducer.infrastructure import Transducer
from transducer.reducers import expecting_single, appending, conjoining, adding, sending, completing
from transducer.sinks import CollectingSink, SingularSink
from transducer.transducers import mapping
class TestAppending(unittest.TestCase):
def test_zero_items_returns_initial_empty_list(self):
result = transduce(Transducer,
appending(),
empty_iter())
self.assertEqual(result, [])
def test_two_items_returns_two_element_list(self):
result = transduce(Transducer,
appending(),
(23, 78))
self.assertEqual(result, [23, 78])
def test_appending_to_immutable_sequence_raises_attribute_error(self):
with self.assertRaises(AttributeError):
transduce(Transducer,
appending(),
(23, 78),
init=tuple())
class TestConjoining(unittest.TestCase):
def test_zero_items_returns_initial_empty_tuple(self):
result = transduce(Transducer,
conjoining(),
empty_iter())
self.assertEqual(result, tuple())
def test_two_items_returns_two_element_tuple(self):
result = transduce(Transducer,
conjoining(),
[23, 78])
self.assertEqual(result, (23, 78))
def test_conjoining_to_non_sequence_raises_type_error(self):
with self.assertRaises(TypeError):
transduce(Transducer,
conjoining(),
(23, 78),
init=set())
def test_conjoining_preserves_initial_sequence_type(self):
result = transduce(Transducer,
conjoining(),
(23, 78),
init=[])
self.assertEqual(result, [23, 78])
class TestAdding(unittest.TestCase):
def test_zero_items_returns_initial_empty_set(self):
result = transduce(Transducer,
adding(),
empty_iter())
self.assertEqual(result, set())
def test_two_items_returns_two_element_list(self):
result = transduce(Transducer,
adding(),
[23, 78])
self.assertEqual(result, {23, 78})
def test_adding_to_non_set_raises_attribute_error(self):
with self.assertRaises(AttributeError):
transduce(Transducer,
adding(),
(23, 78),
init=tuple())
class TestExpectingSingle(unittest.TestCase):
def test_too_few_items(self):
with self.assertRaises(RuntimeError):
transduce(mapping(lambda x: x*x),
expecting_single(),
[1, 2])
def test_exactly_one_item(self):
result = transduce(mapping(lambda x: x*x),
expecting_single(),
[42])
self.assertEqual(result, 1764)
def test_too_many_items(self):
with self.assertRaises(RuntimeError):
transduce(mapping(lambda x: x*x),
expecting_single(),
[])
class TestSending(unittest.TestCase):
def test_zero_items_returns_initial_empty_collection(self):
collection = CollectingSink()
transduce(Transducer,
sending(),
empty_iter(),
init=collection())
self.assertEqual(len(collection), 0)
def test_two_items_returns_two_element_list(self):
collection = CollectingSink()
transduce(Transducer,
sending(),
[23, 78],
init=collection())
self.assertEqual(list(collection), [23, 78])
def test_sending_to_non_sink_raises_attribute_error(self):
with self.assertRaises(AttributeError):
transduce(Transducer,
sending(),
(23, 78),
init=set())
def test_two_items_causes_completion(self):
singular_sink = SingularSink()
transduce(Transducer,
sending(),
[23, 78],
init=singular_sink())
self.assertTrue(singular_sink.has_value)
class TestCompleting(unittest.TestCase):
def test_completing_with_summing_zero_items_returns_identity(self):
def add(a, b):
return a + b
summing = completing(add, identity=0)
result = transduce(Transducer,
summing,
[])
self.assertEqual(result, 0)
def test_completing_with_summing_four_items(self):
def add(a, b):
return a + b
summing = completing(add, identity=0)
result = transduce(Transducer,
summing,
[4, 2, 1, 9])
self.assertEqual(result, 16)
def test_completing_with_multiplying_zero_items_returns_identity(self):
def multiply(a, b):
return a * b
multiplying = completing(multiply, identity=1)
result = transduce(Transducer,
multiplying,
[])
self.assertEqual(result, 1)
def test_completing_with_multiplying_four_items(self):
def multiply(a, b):
return a * b
multiplying = completing(multiply, identity=1)
result = transduce(Transducer,
multiplying,
[4, 2, 1, 9])
self.assertEqual(result, 72)
| mit | -6,821,346,721,772,110,000 | 29.489583 | 100 | 0.53109 | false |
merriam/techtree | bin/check_version.py | 1 | 7249 | #!/usr/bin/env python3
"""
A little program to check if there are later versions of packages I rely upon.
This may grow into a full fledged service, but not today.
"""
import subprocess
import re
import sys
class Program:
"""A known program that can be checked to see the installed version
number matches the published version.
A note on security: checking requires running shell programs. If
a recipe runs something bad, like 'rm foo', then that will run.
"""
def __init__(self, name, installed_command, installed_regex,
published_command, published_regex):
self.name = name
self.installed_command = installed_command
# command can have shell charcters, e.g., "cat * | grep -i version"
# must return 0 (Unix all ok code)
self.installed_regex = installed_regex
# run this regex on the output, match version as capture group 1
self.published_command = published_command
self.published_regex = published_regex
def _e_get_version_number(self, for_error_message, command, regex):
# pylint: disable=no-self-use
""" returns (err, version_number). Just internal repeated code. """
# TODO: This just doesn't cleanly grab stderr.
try:
out = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError:
return "Could not cleanly execute command to check {} version.".format(
for_error_message), None
if type(regex) == str:
out = str(out, "utf-8") # if regex was not bytes, treat output as unicode
try:
version = re.search(regex, out).group(1)
except AttributeError:
return "Could not match version number in {} command output.", None
except IndexError:
return "{} regex matched but did not have a group (parenthesis)", None
return None, version
def err_check(self):
"""return None if this program is up to date with known programs,
else returns a string with the error.
"""
err, installed_version = self._e_get_version_number(
"installed", self.installed_command, self.installed_regex)
if err:
return err
err, published_version = self._e_get_version_number(
"published", self.published_command, self.published_regex)
if err:
return err
if published_version != installed_version:
return "Versions do not match. Installed {}, but published {}".format(
installed_version, published_version)
return None
class KnownPrograms:
""".known_programs{name} is a Program that could be checked. Only
need to create a single instance."""
def __init__(self):
self.known_programs = {}
def add(self, name, installed_command, installed_regex,
published_command, published_regex):
""" Add this to list of known programs """
program = Program(name, installed_command, installed_regex,
published_command, published_regex)
self.known_programs[name] = program
@classmethod
def usual_suspects(cls):
""" return a set of the usual known programs """
known = cls()
known.add('VirtualBox',
'VirtualBox --help',
r'Oracle VM VirtualBox Manager (\d.\d.\d)',
'curl --silent https://www.virtualbox.org/wiki/Downloads',
r'OS X.*/virtualbox/(\d\.\d\.\d)/')
known.add('Docker',
'docker --version',
r'Docker version (\d.\d.\d)',
'curl --silent https://raw.github.com/dotcloud/docker/release/VERSION',
r'(\d.\d.\d)')
return known
def _add_pass(known):
known.add("_pass", "echo True", "(True)", "echo True", "(True)")
def test_simple_pass():
known = KnownPrograms()
_add_pass(known)
assert "_pass" in known.known_programs
assert "_mystery" not in known.known_programs
assert known.known_programs["_pass"].err_check() is None
def _add_failures(known):
# hate to repeat code
known.add("_version_mismatch", "echo True", "(True)", "echo False", "(False)")
known.add("_installed_will_not_run", "//bad_command", "True", "echo False",
"(False)")
known.add("_no_group_in_installed_regex", "echo True", "True", "echo True",
"(True)")
known.add("_no_group_in_publshed_regex", "echo True", "(True)", "echo True",
"True")
known.add("_installed_will_not_match", "echo True", "(bad_regex)", "echo True",
"(True)")
known.add("_published_will_not_run", "echo True", "(True)", "//bad_command",
"(True)")
known.add("_published_will_not_match", "echo True", "(True)", "echo True",
"(bad_regex)")
def test_failures():
known = KnownPrograms()
_add_failures(known)
for program in known.known_programs.values():
assert program.err_check() is not None
class ProgramSuite:
"""A set of installed programs to check.
Each program is identified by a name, which should correspond to a
list of known programs that can be checked.
There are really only a few possible errors: don't know how to
check, failed to run installed programs, failed to run published
programs, version numbers don't match. Faling to run might be in
the exec or matching the version number. These can be strings for now.
"""
def __init__(self, program_list=None):
if program_list == None:
self.programs = []
else:
self.programs = program_list.split()
def check(self, known):
""" return True if everything up to date, else false.
Print status to terminal.
"""
print("Checking versions...")
all_OK = True
for name in self.programs:
if name not in known.known_programs:
print("{}: FAIL Could not match program in list of "
"known programs".format(name))
all_OK = False
else:
err = known.known_programs[name].err_check()
if err:
print("{}: FAIL {}".format(name, err))
all_OK = False
else:
print("{}: PASS".format(name))
if all_OK:
print("Versions are all up to date.")
else:
print("Failure while checking versions.")
return all_OK
def test_suite_passes():
known = KnownPrograms()
_add_pass(known)
_add_failures(known)
assert ProgramSuite("_pass _pass _pass").check(known)
assert ProgramSuite("").check(known)
assert not ProgramSuite("_pass _version_mismatch _pass").check(known)
assert not ProgramSuite("_pass _unknown _pass").check(known)
def test_usual_suspects():
known = KnownPrograms.usual_suspects()
assert "Docker" in known.known_programs.keys()
if __name__ == "__main__":
usual = KnownPrograms.usual_suspects()
is_ok = ProgramSuite("Docker VirtualBox").check(usual)
if is_ok: # Unix has 0 as success, 1 for fail.
sys.exit(0)
else:
sys.exit(1)
| mit | -293,875,068,873,349,600 | 36.559585 | 86 | 0.599117 | false |
bgarrels/sky | sky/view/view.py | 1 | 3660 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tornado.web
import tornado.autoreload
import tornado
import os
import shutil
from sky.crawler import crawl
from sky.crawler.crawling import get_image_set
from sky.configs import DEFAULT_CRAWL_CONFIG
from sky.helper import extractDomain
from sky.scraper import Scraper
# from textblob import TextBlob
def is_numeric(x):
try:
int(x)
return True
except ValueError:
return False
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('page_template.html', items=[], cached=False)
def post(self):
CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG
CRAWL_CONFIG.update({
'collections_path': os.path.join(os.path.expanduser('~'), 'sky_collections/'),
# 'max_workers': 10,
})
args = self.request.arguments
print(args)
for arg in args:
value = args[arg][0].decode('utf8')
if value and arg != 'url' and arg != 'checkboxcache':
print('pre', arg, CRAWL_CONFIG[arg])
if isinstance(CRAWL_CONFIG[arg], list):
CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else [value]
else:
CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value
print('post', arg, CRAWL_CONFIG[arg])
url = self.get_argument('url', '')
use_cache = self.get_argument('checkboxcache', '')
domain = extractDomain(url)
CRAWL_CONFIG['seed_urls'] = [url]
CRAWL_CONFIG['collection_name'] = domain[7:]
if use_cache != 'on':
col_path = os.path.join(CRAWL_CONFIG['collections_path'],
CRAWL_CONFIG['collection_name'])
print(col_path)
if os.path.exists(col_path):
shutil.rmtree(col_path)
crawl.start(CRAWL_CONFIG)
SCRAPE_CONFIG = CRAWL_CONFIG.copy()
SCRAPE_CONFIG.update({
'template_proportion': 0.4,
'max_templates': 100
})
skindex = Scraper(SCRAPE_CONFIG)
skindex.load_local_pages()
skindex.add_template_elements()
res = skindex.process_all(remove_visuals=True,
maxn=CRAWL_CONFIG['max_saved_responses'])
items = []
for num, url in enumerate(res):
if num == CRAWL_CONFIG['max_saved_responses']:
break
dc = res[url]
dc['url'] = url
dc['source_name'] = domain
dc['images'] = [x for x in reversed(dc['images'][:5])]
# dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
items.append(dc)
# this is quite out of place like this
print('num unique images', len(get_image_set({x['url']: x for x in items})))
if items and 'money' in items[0]:
items = sorted(items, key=lambda x: len(x['money']), reverse=True)
self.render('page_template.html', items=items, cached=False)
settings = {
'template_path': os.path.join(os.path.dirname(__file__), 'templates'),
'static_path': os.path.join(os.path.dirname(__file__), 'static')
}
def main(host='localhost', port=7900):
# to run the server, type-in $ python view.py
application = tornado.web.Application([
(r"/", MainHandler),
], **settings)
application.listen(int(port), host)
ioloop = tornado.ioloop.IOLoop().instance()
print('serving skyViewer at "{}:{}" from file: {}'.format(host, port, __file__))
ioloop.start()
if __name__ == '__main__':
main()
| bsd-3-clause | -3,173,711,786,345,593,300 | 28.28 | 90 | 0.565574 | false |
jws325/interview_questions | interview_questions.py | 1 | 9719 | """simple answers to interview questions"""
from collections import namedtuple
# 1. reverse an array
def test_reverse():
def reverse(a):
for index, value in enumerate(a):
if index >= len(a) / 2:
break
replace_index = len(a) - 1 - index
temp = a[replace_index]
a[replace_index] = value
a[index] = temp
return a
# test
l = [1, 2, 3]
assert(reverse(l) == [3, 2, 1])
# 2. create and reverse a singly linked list
def test_ll():
class LL(object):
next = None
val = None
def __init__(self, val):
self.val = val
super(LL, self).__init__()
def set_val(self, val):
self.val = val
def get_val(self):
return self.val
def set_next(self, next):
self.next = next
def get_next(self):
return self.next
def reverse_ll(head):
prev = None
cur = head
while cur is not None:
temp = cur.get_next()
cur.set_next(prev)
prev = cur
cur = temp
# test
first = LL(1)
second = LL(2)
third = LL(3)
first.set_next(second)
second.set_next(third)
reverse_ll(first)
assert(
third.get_next().get_val() == 2
and second.get_next().get_val() == 1
and first.get_next() is None
)
# 3. create a function that returns the nth zero-indexed element of the fibonacci sequence
# use dynamic programming to memoize the sub problems
def test_fibonacci():
memoized_vals = {}
def get_memoized(n):
if n in memoized_vals:
return memoized_vals[n]
else:
return_val = fibonacci(n)
memoized_vals[n] = return_val
return return_val
def fibonacci(n):
if n < 0:
raise ValueError('Negative indices are invalid')
elif n < 2:
return n
else:
return get_memoized(n-1) + get_memoized(n-2)
# test
vals_to_assert = (
(0, 0),
(1, 1),
(2, 1),
(3, 2),
(4, 3),
(5, 5),
(6, 8),
)
for n, return_val in vals_to_assert:
assert fibonacci(n) == return_val
try:
fibonacci(-1)
except ValueError:
pass
else:
raise RuntimeError('The fibonacci function failed to throw an error for the negative index')
# 4. Implement pre-order, in-order, post-order, and breadth-first recursive traversals of a binary tree
def test_binary_tree_recursive_traversals():
class BinaryTree(object):
def __init__(self, value, left_child=None, right_child=None):
self.value = value
self.left_child = left_child
self.right_child = right_child
def get_val(self):
return self.value
def get_left_child(self):
return self.left_child
def get_right_child(self):
return self.right_child
def set_left_child(self, left_child):
self.left_child = left_child
def set_right_child(self, right_child):
self.right_child = right_child
# make the tree
"""
1
2 3
4 5
"""
head = BinaryTree(
1,
left_child=BinaryTree(
2, left_child=BinaryTree(4)
),
right_child=BinaryTree(
3, right_child=BinaryTree(5)
)
)
def pre_order_depth_first_search(node, q):
if node:
q.append(node.value)
q.extend(pre_order_depth_first_search(node.get_left_child(), []))
q.extend(pre_order_depth_first_search(node.get_right_child(), []))
return q
assert pre_order_depth_first_search(head, []) == [1, 2, 4, 3, 5]
def in_order_depth_first_search(node, q):
if node:
q.extend(in_order_depth_first_search(node.get_left_child(), []))
q.append(node.value)
q.extend(in_order_depth_first_search(node.get_right_child(), []))
return q
assert in_order_depth_first_search(head, []) == [4, 2, 1, 3, 5]
def post_order_depth_first_search(node, q):
if node:
q.extend(post_order_depth_first_search(node.get_left_child(), []))
q.extend(post_order_depth_first_search(node.get_right_child(), []))
q.append(node.value)
return q
assert post_order_depth_first_search(head, []) == [4, 2, 5, 3, 1]
def breadth_first_search(node, q):
if node:
q.append(node.value)
left_children = breadth_first_search(node.get_left_child(), [])
right_children = breadth_first_search(node.get_right_child(), [])
max_child_level = max(len(left_children), len(right_children))
for x in range(max_child_level):
for children in (left_children, right_children):
if len(children) > x:
q.append(children[x])
return q
assert breadth_first_search(head, []) == [1, 2, 3, 4, 5]
#5. implement binary search
def test_binary_search():
def binary_search(iterable, search_val):
if not iterable:
return None
middle_index = len(iterable) / 2
middle_val = iterable[middle_index]
if middle_val == search_val:
return middle_index
elif middle_val < search_val:
return binary_search(iterable[middle_index + 1:], search_val)
else:
return binary_search(iterable[: middle_index], search_val)
assert binary_search([1, 2, 3, 4, 5, 6, 7], 4) == 3
#6. Implement Djikstra's algorithm
def test_djikstra():
Edge = namedtuple('Edge', ('end', 'cost'))
# straight line directed graph
# map from node to directed edges
graph = {
1: (Edge(end=2, cost=1),),
2: (Edge(end=3, cost=1),),
3: (Edge(end=4, cost=1),),
4: (Edge(end=5, cost=1),),
5: (Edge(end=6, cost=1),),
6: (Edge(end=7, cost=1),),
7: (Edge(end=8, cost=1),),
8: (Edge(end=9, cost=1),),
9: (Edge(end=10, cost=1),),
10: tuple(),
}
# given a start node and end node, return nodes on the shortest path and the total cost of that path
def get_next_node(tentative_costs, unvisited):
min_cost = float('inf')
min_cost_node = None
for node in unvisited:
temp_cost = tentative_costs[node]
if temp_cost < min_cost:
min_cost = temp_cost
min_cost_node = node
if min_cost == float('inf'):
min_cost_node = None
return min_cost_node
def get_previous_nodes(graph, current):
previous_nodes = []
for node, edges in graph.iteritems():
for edge in edges:
if edge.end == current:
previous_nodes.append(node)
break
return previous_nodes
def get_previous_node_on_path(graph, current, tentative_costs):
previous_nodes = get_previous_nodes(graph, current)
return get_next_node(tentative_costs, previous_nodes)
def djikstra(graph, start, end):
# map nodes to tentative costs
tentative_costs = {node: 0 if node == start else float('inf') for node in graph.keys()}
unvisited = [node for node in graph.keys()]
while True:
node = get_next_node(tentative_costs, unvisited)
if node is None:
break
node_index = unvisited.index(node)
node = unvisited.pop(node_index)
node_cost = tentative_costs[node]
edges = graph[node]
for end, edge_cost in edges:
current_cost = tentative_costs[end]
new_cost = node_cost + edge_cost
if new_cost < current_cost:
tentative_costs[end] = new_cost
total_cost = tentative_costs[end]
path = [end]
current = end
while current != start:
current = get_previous_node_on_path(graph, current, tentative_costs)
path.insert(0, current)
return total_cost, path
assert djikstra(graph, 1, 10) == (9, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
#7. implement a DFA state machine
def test_dfa_state_machine():
"""
This machine is idle by default.
If the button is pressed once, it starts flashing.
If the button is pressed twice, it buzzes.
If the button is pressed a third time, it goes back to idle.
If the emergency switch is pressed at any time, the machine resets its state and goes idle.
"""
dfa = {
'idle': lambda action: 'flashing' if action == 'button_press' else 'idle',
'flashing': lambda action: 'buzzing' if action == 'button_press' else 'idle',
'buzzing': lambda action: 'idle',
}
class dfa_machine(object):
def __init__(self, dfa, state):
self.dfa = dfa
self.state = state
def transition(self, action):
if action:
self.state = dfa[self.state](action)
def get_state(self):
return self.state
action_state_sequence = (
(None, 'idle'),
('button_press', 'flashing'),
(None, 'flashing'),
('button_press', 'buzzing'),
('button_press', 'idle'),
('button_press', 'flashing'),
('emergency_switch', 'idle')
)
machine = dfa_machine(dfa, 'idle')
for action, result_state in action_state_sequence:
machine.transition(action)
assert machine.get_state() == result_state() | gpl-2.0 | 1,314,242,093,735,377,200 | 25.703297 | 104 | 0.543369 | false |
hanx11/psmonitor | bottle_example.py | 1 | 1289 | from bottle import Bottle
import pymongo
load = Bottle()
conn = pymongo.MongoReplicaSetClient(
'example01.com, example02.com',
replicaSet='rs1',
)
db = conn.reports
@load.get('/<server>')
def get_loaddata(server):
cpu_user = list()
cpu_nice = list()
cpu_system = list()
cpu_idle = list()
cpu_irq = list()
disk_root_free = list()
phymem_free = list()
data_cursor = list()
if server == 'example02':
data_cursor = db.example02.find()
elif server == 'example01':
data_cursor = db.example01.find()
for data in data_cursor:
date = '%s' % data['date']
cpu_user.append([date, data['cpu']['user']])
cpu_nice.append([date, data['cpu']['nice']])
cpu_system.append([date, data['cpu']['system']])
cpu_idle.append([date, data['cpu']['idle']])
cpu_irq.append([date, data['cpu']['irq']])
disk_root_free.append([date, data['disk_root']])
phymem_free.append([date, data['phymem']])
return {
'cpu_user': cpu_user,
'cpu_irq': cpu_irq,
'cpu_system': cpu_system,
'cpu_nice': cpu_nice,
'cpu_idle': cpu_idle,
'disk_root_free': disk_root_free,
'phymem_free': phymem_free,
}
| mit | -1,367,374,833,513,415,200 | 25.854167 | 56 | 0.547711 | false |
ichika/yoineko | core.py | 1 | 3654 | #!/usr/bin/env python3
import sys
import socket
import json
import db
class NodeBase:
"""base class of node"""
def call(self, addr, msg, wait=True):
"""do request to other node and return result"""
request = bytes(json.dumps(msg), 'utf-8')
print('request', request)
self.socket.sendto(request, addr)
if wait:
response, addr = self.socket.recvfrom(1024)
print('response', response)
return json.loads(response.decode())
class Watashi(NodeBase):
"""my node"""
host = 'localhost'
port = 2000
port_xmpp = 2012
def __init__(self, port=None, db_name='data'):
"""run my node"""
db.init(db_name)
if port:
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.host, self.port))
for node in db.Node.select():
print('exist', node, node.addr, node.port)
self.call(node.addr, 'hello')
#self.listen()
self.listen_xmpp()
def listen(self):
"""listen for events"""
print('listen', self.host, self.port)
while True:
response, addr = self.socket.recvfrom(1024)
print('receive', response, addr)
self.call(addr, 'asd', wait=False)
def listen_xmpp(self):
"""listen for jabber connections"""
connection_data = b'''<?xml version='1.0'?>
<stream:stream xmlns:stream='http://etherx.jabber.org/streams'id='1'
xmlns='jabber:client' from='localhost'>'''
auth1_data = b'''<iq type='result' from='localhost' id='auth_1'>
<query xmlns='jabber:iq:auth'>
<username/>
<password/>
<resource/>
</query>
</iq>'''
auth2_data = b'''<iq type='result' from='localhost' id='auth_2'/>'''
roster_data = b'''<iq id='aab2a' type='result' from='localhost'>
<query xmlns='jabber:iq:roster'>
<item jid='sabine@yak' name='sabine' subscription='both'>
<group>Family</group>
</item>
</query>
</iq>'''
list_data = b'''<iq id='aab3a' type='result'/><iq id='aab5a' type='result'/>'''
print('listen xmpp', self.host, self.port_xmpp)
self.socket_xmpp = socket.socket()
self.socket_xmpp.bind((self.host, self.port_xmpp))
self.socket_xmpp.listen(5)
connect, addr = self.socket_xmpp.accept()
print('connect xmpp', connect, addr)
# connection
data = connect.recv(1024)
print('receive', data)
connect.send(connection_data)
print('send ', connection_data)
data = connect.recv(1024)
print('receive', data)
connect.send(auth1_data)
print('send ', auth1_data)
data = connect.recv(1024)
print('receive', data)
connect.send(auth2_data)
print('send ', auth2_data)
data = connect.recv(1024)
print('receive', data)
connect.send(roster_data)
print('send ', roster_data)
data = connect.recv(1024)
print('receive', data)
connect.send(list_data)
print('send ', list_data)
data = connect.recv(1024)
print('receive', data)
data = connect.recv(1024)
print('receive', data)
class Node(NodeBase):
"""known node"""
if __name__ == '__main__':
opts = {}
if len(sys.argv) == 3:
opts['port'] = int(sys.argv[1])
opts['db_name'] = sys.argv[2]
Watashi(**opts)
| mit | 278,101,519,890,364,600 | 27.325581 | 87 | 0.536946 | false |
HaraldWeber/client | src/fa/path.py | 1 | 6571 | # -------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
import os
import sys
from PyQt4 import QtCore
import logging
import util
logger = logging.getLogger(__name__)
__author__ = 'Thygrrr, Dragonfire'
def steamPath():
try:
import _winreg
steam_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Valve\\Steam", 0, (_winreg.KEY_WOW64_64KEY + _winreg.KEY_ALL_ACCESS))
return _winreg.QueryValueEx(steam_key, "SteamPath")[0].replace("/", "\\")
except StandardError, e:
return None
def getGameFolderFA():
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("ForgedAlliance")
gameFolderFA = unicode(settings.value("app/path"))
settings.endGroup()
return fixFolderPathFA(gameFolderFA)
def setGameFolderFA(newGameFolderFA):
logger.info("Setting game path to: %s" % newGameFolderFA)
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("ForgedAlliance")
settings.setValue("app/path", newGameFolderFA)
settings.endGroup()
settings.sync()
def getGameFolderSC():
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("SupremeCommanderVanilla")
gameFolderSC = unicode(settings.value("app/path"))
settings.endGroup()
return gameFolderSC
def setGameFolderSC(newGameFolderSC):
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("SupremeCommanderVanilla")
settings.setValue("app/path", newGameFolderSC)
settings.endGroup()
settings.sync()
def fixFolderPathFA(gameFolderFA):
"""
Correct the game folder, e.g. if you selected the bin folder or exe.
"""
normPath = os.path.normpath(gameFolderFA)
notAllowed = [u'\\bin', u'\\bin\\SupremeCommander.exe']
for check in notAllowed:
if normPath.endswith(check):
newPath = normPath[:-len(check)]
# check if the new folder is valid
if validatePath(newPath):
setGameFolderFA(newPath)
return newPath
return gameFolderFA
def writeFAPathLua():
"""
Writes a small lua file to disk that helps the new SupComDataPath.lua find the actual install of the game
"""
name = os.path.join(util.APPDATA_DIR, u"fa_path.lua")
code = u"fa_path = '" + getGameFolderFA().replace(u"\\", u"\\\\") + u"'\n"
if getGameFolderSC():
code = code + u"sc_path = '" + getGameFolderSC().replace(u"\\", u"\\\\") + u"'\n"
gamepath_sc = util.settings.value("SupremeCommander/app/path", type=str)
if gamepath_sc:
code = code + u"sc_path = '" + gamepath_sc.replace(u"\\", u"\\\\") + u"'\n"
with open(name, "w+") as lua:
lua.write(code.encode("utf-8"))
lua.flush()
os.fsync(lua.fileno()) # Ensuring the file is absolutely, positively on disk.
def typicalForgedAlliancePaths():
"""
Returns a list of the most probable paths where Supreme Commander: Forged Alliance might be installed
"""
pathlist = [
getGameFolderFA(),
#Retail path
os.path.expandvars("%ProgramFiles%\\THQ\\Gas Powered Games\\Supreme Commander - Forged Alliance"),
#Direct2Drive Paths
#... allegedly identical to impulse paths - need to confirm this
#Impulse/GameStop Paths - might need confirmation yet
os.path.expandvars("%ProgramFiles%\\Supreme Commander - Forged Alliance"),
#Guessed Steam path
os.path.expandvars("%ProgramFiles%\\Steam\\steamapps\\common\\supreme commander forged alliance")
]
#Registry Steam path
steam_path = steamPath()
if steam_path:
pathlist.append(os.path.join(steam_path, "SteamApps", "common", "Supreme Commander Forged Alliance"))
return filter(validatePath, pathlist)
def typicalSupComPaths():
"""
Returns a list of the most probable paths where Supreme Commander might be installed
"""
pathlist = [
getGameFolderSC(),
#Retail path
os.path.expandvars("%ProgramFiles%\\THQ\\Gas Powered Games\\Supreme Commander"),
#Direct2Drive Paths
#... allegedly identical to impulse paths - need to confirm this
#Impulse/GameStop Paths - might need confirmation yet
os.path.expandvars("%ProgramFiles%\\Supreme Commander"),
#Guessed Steam path
os.path.expandvars("%ProgramFiles%\\Steam\\steamapps\\common\\supreme commander")
]
#Registry Steam path
steam_path = steamPath()
if steam_path:
pathlist.append(os.path.join(steam_path, "SteamApps", "common", "Supreme Commander"))
return filter(validatePath, pathlist)
def validatePath(path):
try:
# Supcom only supports Ascii Paths
if not path.decode("ascii"): return False
#We check whether the base path and a gamedata/lua.scd file exists. This is a mildly naive check, but should suffice
if not os.path.isdir(path): return False
if not os.path.isfile(os.path.join(path, r'gamedata', r'lua.scd')): return False
#Reject or fix paths that end with a slash.
#LATER: this can have all sorts of intelligent logic added
#Suggested: Check if the files are actually the right ones, if not, tell the user what's wrong with them.
if path.endswith("/"): return False
if path.endswith("\\"): return False
return True
except:
_, value, _ = sys.exc_info()
logger.error(u"Path validation failed: " + unicode(value))
return False
def autoDetectPath():
for path in typicalForgedAlliancePaths():
if validatePath(path):
return path
return None
| gpl-3.0 | 1,149,006,220,164,790,800 | 34.139037 | 143 | 0.656521 | false |
russellb/nova | nova/vnc/xvp_proxy.py | 1 | 6374 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eventlet WSGI Services to proxy VNC for XCP protocol."""
import socket
import webob
import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
from nova import context
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
xvp_proxy_opts = [
cfg.IntOpt('xvpvncproxy_port',
default=6081,
help='Port that the XCP VNC proxy should bind to'),
cfg.StrOpt('xvpvncproxy_host',
default='0.0.0.0',
help='Address that the XCP VNC proxy should bind to'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(xvp_proxy_opts)
flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
class XCPVNCProxy(object):
"""Class to use the xvp auth protocol to proxy instance vnc consoles."""
def one_way_proxy(self, source, dest):
"""Proxy tcp connection from source to dest."""
while True:
try:
d = source.recv(32384)
except Exception as e:
d = None
# If recv fails, send a write shutdown the other direction
if d is None or len(d) == 0:
dest.shutdown(socket.SHUT_WR)
break
# If send fails, terminate proxy in both directions
try:
# sendall raises an exception on write error, unlike send
dest.sendall(d)
except Exception as e:
source.close()
dest.close()
break
def handshake(self, req, connect_info, sockets):
"""Execute hypervisor-specific vnc auth handshaking (if needed)."""
host = connect_info['host']
port = int(connect_info['port'])
server = eventlet.connect((host, port))
# Handshake as necessary
if connect_info.get('internal_access_path'):
server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
data = ""
while True:
b = server.recv(1)
if b:
data += b
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit(_("Error in handshake: %s"), data)
return
break
if not b or len(data) > 4096:
LOG.audit(_("Error in handshake: %s"), data)
return
client = req.environ['eventlet.input'].get_socket()
client.sendall("HTTP/1.1 200 OK\r\n\r\n")
socketsserver = None
sockets['client'] = client
sockets['server'] = server
def proxy_connection(self, req, connect_info, start_response):
"""Spawn bi-directional vnc proxy."""
sockets = {}
t0 = eventlet.spawn(self.handshake, req, connect_info, sockets)
t0.wait()
if not sockets.get('client') or not sockets.get('server'):
LOG.audit(_("Invalid request: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
client = sockets['client']
server = sockets['server']
t1 = eventlet.spawn(self.one_way_proxy, client, server)
t2 = eventlet.spawn(self.one_way_proxy, server, client)
t1.wait()
t2.wait()
# Make sure our sockets are closed
server.close()
client.close()
def __call__(self, environ, start_response):
try:
req = webob.Request(environ)
LOG.audit(_("Request: %s"), req)
token = req.params.get('token')
if not token:
LOG.audit(_("Request made with missing token: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
ctxt = context.get_admin_context()
connect_info = rpc.call(ctxt, FLAGS.consoleauth_topic,
{'method': 'check_token',
'args': {'token': token}})
if not connect_info:
LOG.audit(_("Request made with invalid token: %s"), req)
start_response('401 Not Authorized',
[('content-type', 'text/html')])
return "Not Authorized"
return self.proxy_connection(req, connect_info, start_response)
except Exception as e:
LOG.audit(_("Unexpected error: %s"), e)
class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
"""HttpProtocol wrapper to suppress IOErrors.
The proxy code above always shuts down client connections, so we catch
the IOError that raises when the SocketServer tries to flush the
connection.
"""
def finish(self):
try:
eventlet.green.BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except IOError:
pass
eventlet.greenio.shutdown_safe(self.connection)
self.connection.close()
def get_wsgi_server():
LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"),
version.version_string_with_vcs())
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
protocol=SafeHttpProtocol,
host=FLAGS.xvpvncproxy_host,
port=FLAGS.xvpvncproxy_port)
| apache-2.0 | 2,121,944,898,252,933,000 | 33.085561 | 77 | 0.564167 | false |
peragro/peragro-index | damn_index/cli.py | 1 | 3711 | from __future__ import absolute_import
from __future__ import print_function
import sys
import argparse
import json
import copy
from elasticsearch import Elasticsearch
from damn_at.utilities import unique_asset_id_reference_from_fields
'''
pt a ../peragro-test-files/mesh/blender/cube1.blend -f json-pretty\
| pt index elastic\
| pt index stats
'''
def index(asset):
es = Elasticsearch()
ret = es.index(index='damn', doc_type='asset',
id=asset['id'], body=asset)
print(ret)
def create_argparse(parser, subparsers):
subparse = subparsers.add_parser(
"index", # aliases=("i",),
help="Anything to do with indexing",
)
subsubparsers = subparse.add_subparsers(
title='subcommands',
description='valid subcommands',
help='additional help',
)
create_argparse_elastic(subparse, subsubparsers)
create_argparse_generate_search(subparse, subsubparsers)
create_argparse_stats(subparse, subsubparsers)
def create_argparse_elastic(parser, subparsers):
subparse = subparsers.add_parser(
"elastic", # aliases=("transform",),
help="index the given file description to elasticsearch",
)
subparse.add_argument(
'infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
def transform(args):
data = args.infile.read()
data = json.loads(data)
file_hash = data["file"]["hash"]
assets = []
file_copy = copy.deepcopy(data)
file_copy['metadata'] = file_copy.get('metadata', {})
del file_copy['assets']
del file_copy['metadata']
for asset in data['assets']:
subname = asset['asset']['subname']
mimetype = asset['asset']['mimetype']
id = unique_asset_id_reference_from_fields(file_hash, subname, mimetype)
a = {'id': id, 'file': file_copy}
asset['metadata'] = asset.get('metadata', {})
a.update(asset)
assets.append(a)
for asset in assets:
index(asset)
subparse.set_defaults(
func=lambda args:
transform(args),
)
def create_argparse_generate_search(parser, subparsers):
subparse = subparsers.add_parser(
"generate-search", # aliases=("transform",),
help="Generate a faceted search",
)
def search(args):
from damn_at import Analyzer
from damn_at.utilities import get_metadatavalue_fieldname
m = Analyzer().get_supported_metadata()
ret = {'aggs': {},
'query': {'match_all': {}},
'from': 3, 'size': 1, }
for mime, metas in list(m.items()):
for meta, type in metas:
field_name = get_metadatavalue_fieldname(type)
ret['aggs'][meta] = {'terms': {'field': 'metadata.'+meta+'.'+field_name}}
print(json.dumps(ret, indent=2))
subparse.set_defaults(
func=lambda args:
search(args),
)
def create_argparse_stats(parser, subparsers):
subparse = subparsers.add_parser(
"stats", # aliases=("transform",),
help="Generate stats from an ES bulk upload",
)
subparse.add_argument(
'infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
def stats(args):
data = args.infile.read()
data = json.loads(data)
print('Uploaded: {0:>6}'.format(len(data['items'])))
print('Errors: {0:>6}'.format(data['errors']))
print('took: {:>6} ms'.format(data['took']))
if data['errors']:
sys.exit(1)
subparse.set_defaults(
func=lambda args:
stats(args),
)
| bsd-3-clause | -1,764,579,313,117,547,800 | 28.927419 | 89 | 0.586904 | false |
Daniel-Brosnan-Blazquez/DIT-100 | lib/bitOps.py | 1 | 1763 | # -*- coding: utf-8 -*-
def CheckBit(value, position):
mask = 1 << position
return value & mask == mask
def SetBit(value, position):
return value | (1 << position)
def ClearBit(value, position):
return value & ~(1 << position)
def FlipBit(value, position):
return value ^ (1 << position)
def CheckBits(value, mask):
return value & mask == mask
def SetBits(value, mask):
return value | mask
def ClearBits(value, mask):
return value & (~mask)
def FlipBits(value, mask):
return value ^ mask
def SetValueUnderMask(valueToSetUnderMask, currentValue, mask):
currentValueCleared = ClearBits(currentValue, mask) # clear bits under mask
i = 0
while (mask % 2 == 0 and mask != 0x00):
mask = mask >> 1
i += 1
return SetBits(valueToSetUnderMask << i, currentValueCleared)
def GetValueUnderMask(currentValue, mask):
currentValueCleared = ClearBits(currentValue, ~mask) # clear bits not under mask
i = 0
while (mask % 2 == 0 and mask != 0x00):
mask = mask >> 1
i += 1
return currentValueCleared >> i
def GetValueUnderMaskDictMatch(currentValue, mask, dictionary):
value = GetValueUnderMask (currentValue, mask)
# Return name that maps the current value
for key in dictionary.keys():
if dictionary[key] == value:
return key
return None
def TwosComplementToByte(value):
if value >= 0 and value <= 127:
return value
else:
return value - 256
def TwosComplementToCustom(value, signBitPosition):
if value >= 0 and value <= (1<<signBitPosition)-1:
return value
else:
return value - (2 << signBitPosition)
| gpl-3.0 | -3,122,407,504,018,430,000 | 26.435484 | 84 | 0.617697 | false |
ilmir-k/website-addons | website_sales_team/website_sales_team_models.py | 1 | 2489 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp import fields
from openerp import models
class ProductTemplate(models.Model):
_inherit = 'product.template'
def _get_default_section_id(self):
return self.env.user.default_section_id
section_id = fields.Many2one('crm.case.section', 'Sales Team', default=_get_default_section_id)
section_member_ids = fields.Many2many('res.users', 'Sales Team members', related='section_id.member_ids')
section_public_categ_ids = fields.Many2many('product.public.category', related='section_id.public_categ_ids')
class CrmCaseSection(models.Model):
_inherit = "crm.case.section"
product_ids = fields.One2many('product.template', 'section_id', string='Products')
website_description = fields.Html('Description for the website', translate=True)
public_categ_ids = fields.Many2many('product.public.category', 'section_public_categ_rel', 'section_id', 'category_id', string='Allowed public categories', help='All child categories are also allowed automatically')
sale_description = fields.Char('Sale description', help='This text is added to email for customer')
class ResUsers(models.Model):
_inherit = 'res.users'
section_ids = fields.Many2many('crm.case.section', 'sale_member_rel', 'member_id', 'section_id', 'Sales Team')
def _get_group(self, cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy, group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
# dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
# result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'groups_id': _get_group,
}
class ProductPublicCategory(models.Model):
_inherit = "product.public.category"
section_ids = fields.Many2many('crm.case.section', 'section_public_categ_rel', 'category_id', 'section_id', string='Sales teams')
class SaleOrder(models.Model):
_inherit = 'sale.order'
parent_id = fields.Many2one('sale.order', 'Parent')
child_ids = fields.One2many('sale.order', 'parent_id', 'Child orders')
_track = {
'state': {'website_sales_team.mt_order_created': lambda self, cr, uid, obj, ctx=None: obj.state in ['draft']}
}
| lgpl-3.0 | 750,377,855,924,190,700 | 37.890625 | 219 | 0.672961 | false |
R2pChyou/starcheat | starcheat/gui/ship.py | 1 | 2414 | """
Qt ship management dialog
"""
import logging
from gui.common import ListEdit
import qt_ship
from PyQt5.QtWidgets import QDialog
class Ship():
def __init__(self, main_window):
self.dialog = QDialog(main_window.window)
self.ui = qt_ship.Ui_Dialog()
self.ui.setupUi(self.dialog)
self.main_window = main_window
self.assets = main_window.assets
self.player = main_window.player
self.ship_upgrades = self.player.get_ship_upgrades()
self.ai = self.player.get_ai()
self.ui.capabilities_button.clicked.connect(self.edit_capabilities)
self.ui.available_missions_button.clicked.connect(self.edit_available)
self.ui.completed_missions_button.clicked.connect(self.edit_completed)
self.update()
def update(self):
self.ui.crew_size.setValue(self.ship_upgrades["crewSize"])
self.ui.upgrade_level.setValue(self.ship_upgrades["shipLevel"])
self.ui.max_fuel.setValue(self.ship_upgrades["maxFuel"])
self.ui.capabilities.setText(", ".join(self.ship_upgrades["capabilities"]))
self.ui.available_missions.setText(", ".join(self.ai["availableMissions"]))
self.ui.completed_missions.setText(", ".join(self.ai["completedMissions"]))
def edit_capabilities(self):
edit = ListEdit(self.dialog, self.ship_upgrades["capabilities"])
ok = edit.dialog.exec()
if ok == 1:
self.ship_upgrades["capabilities"] = edit.get_list()
self.update()
def edit_available(self):
edit = ListEdit(self.dialog, self.ai["availableMissions"])
ok = edit.dialog.exec()
if ok == 1:
self.ai["availableMissions"] = edit.get_list()
self.update()
def edit_completed(self):
edit = ListEdit(self.dialog, self.ai["completedMissions"])
ok = edit.dialog.exec()
if ok == 1:
self.ai["completedMissions"] = edit.get_list()
self.update()
def write_ship(self):
self.ship_upgrades["crewSize"] = self.ui.crew_size.value()
self.ship_upgrades["maxFuel"] = self.ui.max_fuel.value()
self.ship_upgrades["shipLevel"] = self.ui.upgrade_level.value()
self.player.set_ship_upgrades(self.ship_upgrades)
self.player.set_ai(self.ai)
logging.debug("Wrote ship/ai")
self.main_window.window.setWindowModified(True)
| mit | -6,686,477,986,343,633,000 | 33.985507 | 83 | 0.637531 | false |
genesi/cardapio | src/plugins/tracker_fts.py | 1 | 4314 | #
# Copyright (C) 2010 Cardapio Team ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class CardapioPlugin(CardapioPluginInterface):
author = _('Cardapio Team')
name = _('Full-text file search')
description = _('Search <b>inside</b> local files and folders indexed with Tracker')
url = ''
help_text = ''
version = '1.43'
plugin_api_version = 1.40
search_delay_type = 'local'
default_keyword = 'ftstracker'
category_name = _('Results within files')
category_icon = 'system-search'
icon = 'system-search'
category_tooltip = _('Results found inside the files in your computer')
hide_from_sidebar = True
def __init__(self, cardapio_proxy, category):
self.c = cardapio_proxy
try:
from os.path import split
from urllib2 import quote, splittype
except Exception, exception:
self.c.write_to_log(self, 'Could not import certain modules', is_error = True)
self.c.write_to_log(self, exception, is_error = True)
self.loaded = False
return
self.split = split
self.quote = quote
self.splittype = splittype
self.tracker = None
bus = dbus.SessionBus()
if bus.request_name('org.freedesktop.Tracker1') == dbus.bus.REQUEST_NAME_REPLY_IN_QUEUE:
tracker_object = bus.get_object('org.freedesktop.Tracker1', '/org/freedesktop/Tracker1/Resources')
self.tracker = dbus.Interface(tracker_object, 'org.freedesktop.Tracker1.Resources')
else:
self.c.write_to_log(self, 'Could not connect to Tracker', is_error = True)
self.loaded = False
bus.release_name('org.freedesktop.Tracker1')
return
if (which("tracker-needle") is not None):
self.action_command = r"tracker-needle '%s'"
else:
self.action_command = r"tracker-search-tool '%s'"
self.action = {
'name' : _('Show additional results'),
'tooltip' : _('Show additional search results in the Tracker search tool'),
'icon name' : 'system-search',
'type' : 'callback',
'command' : self.more_results_action,
'context menu' : None,
}
self.loaded = True
def search(self, text, result_limit):
self.current_query = text
text = self.quote(text).lower()
self.tracker.SparqlQuery(
"""
SELECT ?uri ?mime
WHERE {
?item a nie:InformationElement;
fts:match "%s";
nie:url ?uri;
nie:mimeType ?mime;
tracker:available true.
}
LIMIT %d
"""
% (text, result_limit),
dbus_interface='org.freedesktop.Tracker1.Resources',
reply_handler=self.prepare_and_handle_search_result,
error_handler=self.handle_search_error
)
# not using: ORDER BY DESC(fts:rank(?item))
def handle_search_error(self, error):
self.c.handle_search_error(self, error)
def prepare_and_handle_search_result(self, results):
formatted_results = []
for result in results:
dummy, canonical_path = self.splittype(result[0])
parent_name, child_name = self.split(canonical_path)
icon_name = result[1]
formatted_result = {
'name' : child_name,
'icon name' : icon_name,
'tooltip' : result[0],
'command' : canonical_path,
'type' : 'xdg',
'context menu' : None,
}
formatted_results.append(formatted_result)
if results:
formatted_results.append(self.action)
self.c.handle_search_result(self, formatted_results, self.current_query)
def more_results_action(self, text):
try:
subprocess.Popen(self.action_command % text, shell = True)
except OSError, e:
self.c.write_to_log(self, 'Error launching plugin action.', is_error = True)
self.c.write_to_log(self, e, is_error = True)
| gpl-3.0 | 8,854,574,007,254,268,000 | 27.012987 | 101 | 0.659017 | false |
LaiTash/OEUO-python | profiles/default/scripts/journal_event.py | 1 | 1099 | from uo.serpent.script import ScriptBase
from uo import manager
import gevent
import re
class BindObj(object):
def __init__(self, regexp, callback):
self.regexp = re.compile(regexp)
self.callback = callback
class JournalScannerScript(ScriptBase):
script_name = "Journal scanner"
def load(self):
"""
:type manager manager
"""
global UO
UO = manager.UO
self.binds = set()
self.old_ref = 0
def bind(self, regexp, callback):
bobj = BindObj(regexp, callback)
self.binds.add(bobj)
def scan(self):
newRef, nCont = UO.ScanJournal(self.old_ref)
for line_i in xrange(nCont):
line, col = UO.GetJournal(line_i)
for bind in self.binds:
if bind.regexp.match(line):
bind.callback(line)
self.old_ref, nCont = UO.ScanJournal(newRef)
def main(self):
self.old_ref, nCont = UO.ScanJournal(self.old_ref)
while True:
self.scan()
gevent.sleep(.1)
| gpl-3.0 | 8,903,835,215,289,260,000 | 23.422222 | 58 | 0.5596 | false |
madarivi/PianoSimulation | Parameters/parametersAd5.py | 1 | 1862 | import numpy as np
# in this file the parameters used in the simulation are set
# string parameters
f1 = 934.60 # fundamental string frequency
l = 0.200 # string length
d = 0.898e-3
rhoV = 7850.
A = np.pi * (d/2.)**2
m_s = A * l * rhoV # total string mass
print m_s
b1 = 1.1 # air damping coefficient
b2 = 2.7e-4 # string internal friction coefficient
rho = m_s/l # linear string density
t_e = rho * 4. * l**2 * f1**2
print t_e
c = (t_e/rho)**.5 # wave velocity
E = 2.02e11
S = np.pi * (d/2.)**2
I = np.pi * d**4 / 64.
epsilon = (I/A) * (E*S) / (t_e*l**2)
print epsilon
kappa = epsilon*(c**2)*(l**2) # string stiffness coefficient
# sampling parameters
t = 3. # simulation time
f_s = 32*44.1e3 # sampling frequency
m = 140 # number of string segments
dx = l/m # spatial grid distance
dt = 1/f_s # time step
n_t = int(t/dt) # number of time steps
labda = c*dt/dx # cfl number
n = m+1 # number of gridpoints
# hammer parameters
m_h = 7.33e-3 # hammer mass
p = 2.793 # hammer felt stiffness exponent
b_h = 1.e-4 # fluid damping coefficient
k = 8.600e10 # hammer felt stiffness
a = 0.12 # relative striking position
v_h = 5. # initial hammer velocity
x0 = a*l # hammer impact point
n0 = int(a*n) # hammer impact index
# boundary parameters
zeta_l = 1.e20 # left end normalized impedance
zeta_b = 1000. # bridge normalized impedance
x = np.linspace(0, l, n) # spatial grid points
g = np.cos(50*np.pi*(x-x0))*(np.abs(x-x0) < .005) # hammer impact window
print "stable?", labda < 0.8, "=> labda:", labda
print f1
print c / (2*l)
| mit | 6,467,938,529,171,412,000 | 33.481481 | 75 | 0.542427 | false |
AllYarnsAreBeautiful/knittingpattern | knittingpattern/__init__.py | 1 | 4042 | """The knitting pattern module.
Load and convert knitting patterns using the convenience functions listed
below.
"""
# there should be no imports
#: the version of the knitting pattern library
__version__ = '0.1.19'
#: an empty knitting pattern set as specification
EMPTY_KNITTING_PATTERN_SET = {"version": "0.1", "type": "knitting pattern",
"patterns": []}
def load_from():
"""Create a loader to load knitting patterns with.
:return: the loader to load objects with
:rtype: knittingpattern.Loader.JSONLoader
Example:
.. code:: python
import knittingpattern, webbrowser
k = knittingpattern.load_from().example("Cafe.json")
webbrowser.open(k.to_svg(25).temporary_path(".svg"))
"""
from .ParsingSpecification import new_knitting_pattern_set_loader
return new_knitting_pattern_set_loader()
def load_from_object(object_):
"""Load a knitting pattern from an object.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().object(object_)
def load_from_string(string):
"""Load a knitting pattern from a string.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().string(string)
def load_from_file(file):
"""Load a knitting pattern from a file-like object.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().file(file)
def load_from_path(path):
"""Load a knitting pattern from a file behind located at `path`.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().path(path)
def load_from_url(url):
"""Load a knitting pattern from a url.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().url(url)
def load_from_relative_file(module, path_relative_to):
"""Load a knitting pattern from a path relative to a module.
:param str module: can be a module's file, a module's name or
a module's path.
:param str path_relative_to: is the path relative to the modules location.
The result is loaded from this.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().relative_file(module, path_relative_to)
def convert_from_image(colors=("white", "black")):
"""Convert and image to a knitting pattern.
:return: a loader
:rtype: knittingpattern.Loader.PathLoader
:param tuple colors: the colors to convert to
.. code:: python
convert_from_image().path("pattern.png").path("pattern.json")
convert_from_image().path("pattern.png").knitting_pattern()
.. seealso:: :mod:`knittingoattern.convert.image_to_knitting_pattern`
"""
from .convert.image_to_knittingpattern import \
convert_image_to_knitting_pattern
return convert_image_to_knitting_pattern(colors=colors)
def new_knitting_pattern(id_, name=None):
"""Create a new knitting pattern.
:return: a new empty knitting pattern.
:param id_: the id of the knitting pattern
:param name: the name of the knitting pattern or :obj:`None` if the
:paramref:`id_` should be used
:rtype: knittingpattern.KnittingPattern.KnittingPattern
.. seealso:: :meth:`KnittingPatternSet.add_new_pattern()
<knittingpattern.KnittingPatternSet.KnittingPatternSet.add_new_pattern>`
"""
knitting_pattern_set = new_knitting_pattern_set()
return knitting_pattern_set.add_new_pattern(id_, name)
def new_knitting_pattern_set():
"""Create a new, empty knitting pattern set.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
:return: a new, empty knitting pattern set
"""
return load_from_object(EMPTY_KNITTING_PATTERN_SET)
__all__ = ["load_from_object", "load_from_string", "load_from_file",
"load_from_path", "load_from_url", "load_from_relative_file",
"convert_from_image", "load_from", "new_knitting_pattern",
"new_knitting_pattern_set"]
| lgpl-3.0 | 2,694,559,779,322,161,700 | 29.164179 | 78 | 0.690252 | false |
joshuahellier/PhDStuff | codes/kmc/batchJobs/rateCaculation/mainStuff/tempSteadyFlow.py | 1 | 13591 | import sys
import os
import math
import shutil as sh
resultDir = os.environ.get('RESULTS')
tempDir = os.environ.get('TMPDIR')
if resultDir == None or tempDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
from RateCalc import *
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysSize = int(sys.argv[4])
analInterval = int(sys.argv[5])
numStepsEquilib = int(sys.argv[6])
numStepsSnapshot = int(sys.argv[7])
numStepsAnal = int(sys.argv[8])
numStepsReq = int(sys.argv[9])
numPasses = int(sys.argv[10])
timeInterval = float(sys.argv[11])
fileInfo = sys.argv[12]
tempFolderName = sys.argv[13]
resultsPlace = resultDir+"/"+fileInfo+"/"
tempPlace = tempDir+"/"+tempFolderName+"/"
#tempPlace = "/tmp/"+tempFolderName+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
if not os.path.exists(tempPlace):
os.makedirs(tempPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysSize = ' + str(sysSize) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 1d, so everything's a bit trivial
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = 1
yRep = 1
zRep = sysSize
numPoints = xRep*(zRep+4)*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep+4),
periodic=(False, False, True))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = ["V"]*numPoints
types[0] = "BoV"
types[1] = "BoV"
types[-2] = "ToV"
types[-1] = "ToV"
for i in range(int(zRep*avConc)):
# find a site which is not yet occupied by a "O" type.
pos = int(numpy.random.rand()*zRep+2.0)
while (types[pos] != "V"):
pos = int(numpy.random.rand()*zRep+2.0)
# Set the type.
types[pos] = "O"
"""
for i in range(2, numPoints-2):
if i < numPoints/2:
types[i] = "O"
else:
types[i] = "V"
"""
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"])
# Rates.
rateConstEmpty = 1.0
topSpawn = math.sqrt(topConc/(1.0-topConc))
botSpawn = math.sqrt(botConc/(1.0-botConc))
topDespawn = 1.0/topSpawn
botDespawn = 1.0/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up, empty.
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down, empty.
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Now for Oxygen annihilation at the top boundary
#2
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise the rate constant
# Oxygen creation at the top boundary
#3
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#4
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Obviously the rate constant will be customised
# Oxygen creation at the bottom boundary
#5
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#6
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#7
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#8
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class lolModelRates(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 0:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 1:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 2:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 4:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 3:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 5:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 6:
return botSpawn
if process_number == 7:
return botDespawn
if process_number == 8:
return topSpawn
if process_number == 9:
return topDespawn
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=lolModelRates)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/100)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/100)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/100)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(tempPlace+"equilibTraj.tr"))
with open(tempPlace+"inBot.dat", 'w') as f:
pass
with open(tempPlace+"outBot.dat", 'w') as f:
pass
with open(tempPlace+"inTop.dat", 'w') as f:
pass
with open(tempPlace+"outTop.dat", 'w') as f:
pass
if not os.path.exists(tempPlace+"composition"):
os.makedirs(tempPlace+"composition")
for passNum in range(0, numPasses):
processStatsOxInBot = RateCalc(processes=[5])
processStatsOxOutBot = RateCalc(processes=[4])
processStatsOxInTop = RateCalc(processes=[3])
processStatsOxOutTop = RateCalc(processes=[2])
compositionTracker = Composition(time_interval=timeInterval)
model.run(control_parameters_req, trajectory_filename=(tempPlace+"mainTraj.tr"))
model.run(control_parameters_anal, trajectory_filename=(tempPlace+"mainTraj.tr"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, compositionTracker])
with open(tempPlace+"inBot.dat", 'a') as f:
processStatsOxInBot.printResults(f)
with open(tempPlace+"outBot.dat", 'a') as f:
processStatsOxOutBot.printResults(f)
with open(tempPlace+"inTop.dat", 'a') as f:
processStatsOxInTop.printResults(f)
with open(tempPlace+"outTop.dat", 'a') as f:
processStatsOxOutTop.printResults(f)
with open(tempPlace+"composition/composition"+str(passNum)+".dat", 'w') as f:
compositionTracker.printResults(f)
if not os.path.exists(resultsPlace+"composition"):
os.makedirs(resultsPlace+"composition")
sh.copy(tempPlace+"inBot.dat", resultsPlace+"inBot.dat")
sh.copy(tempPlace+"outBot.dat", resultsPlace+"outBot.dat")
sh.copy(tempPlace+"inTop.dat", resultsPlace+"inTop.dat")
sh.copy(tempPlace+"outTop.dat", resultsPlace+"outTop.dat")
sh.copy(tempPlace+"mainTraj.tr", resultsPlace+"mainTraj.tr")
for passNum in range(0, numPasses):
sh.copy(tempPlace+"composition/composition"+str(passNum)+".dat", resultsPlace+"composition/composition"+str(passNum)+".dat")
sh.rmtree(tempPlace)
print("Process would appear to have succesfully terminated! How very suspicious...")
| mit | -1,052,233,033,452,451,000 | 35.534946 | 202 | 0.610919 | false |
wideioltd/mimejson | mimejson/mimetype/video_opencv.py | 1 | 3911 | # ############################################################################
# |W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|
# Copyright (c) WIDE IO LTD
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the WIDE IO LTD nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# |D|O|N|O|T|R|E|M|O|V|E|!|D|O|N|O|T|R|E|M|O|V|E|!|D|O|N|O|T|R|E|M|O|V|E|!|
# ############################################################################
import functools
import os
import cv2.cv as cv
class Serializer:
mimetype = (
"video/FMP4",
"video/DIVX"
)
@staticmethod
def can_apply(obj):
frames = None
required = ("$name$", "$fps$", "$encodage$",
"$frame_size$", "$color$", "$frames_list$")
if not isinstance(obj, dict):
return False
for check in required:
if check not in obj:
return False
frames = obj["$frames_list$"]
if frames is None or not isinstance(frames, list):
return False
for frame in frames:
if not isinstance(frame, cv.iplimage):
return False
return True
@classmethod
def serialize(cls, obj, pathdir):
fn = os.path.join(pathdir, obj["$name$"])
writer = cv.CreateVideoWriter(fn, obj["$encodage$"], obj["$fps$"],
obj["$frame_size$"], obj["$color$"])
write = functools.partial(cv.WriteFrame, writer)
map(write, obj["$frames_list$"])
return {'$path$': fn, '$length$': os.stat(fn).st_size,
'$mimetype$': obj["$mimetype$"]}
@staticmethod
def deserialize(obj, filepath):
video = cv.CaptureFromFile(obj["$path$"])
obj["$frames_list$"] = []
obj["$color$"] = 1
obj["$name$"] = os.path.basename(obj["$path$"])
obj["$fps$"] = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
obj["$encodage$"] = int(cv.GetCaptureProperty(video,
cv.CV_CAP_PROP_FOURCC))
f_w = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_WIDTH))
f_h = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_HEIGHT))
obj["$frame_size$"] = (f_w, f_h)
del obj["$path$"]
nu_frame = cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_COUNT)
for i in range(int(nu_frame)):
frame = cv.QueryFrame(video)
obj["$frames_list$"].append(cv.CloneImage(frame))
return obj
| bsd-3-clause | 678,036,403,070,895,600 | 43.443182 | 78 | 0.601892 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.