repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Starbow/StarbowWebSite | starbowmodweb/user/models.py | 1 | 3175 | from django.db import models
from django.db.models.signals import post_save
from django.conf import settings
from django.utils import timezone
from django.core import validators
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, UserManager, PermissionsMixin
from django.utils.translation import ugettext_lazy as _
import os
import binascii
def generate_auth_token():
return binascii.b2a_hex(os.urandom(15))
class User(AbstractBaseUser, PermissionsMixin):
""" We had to copy this from contrib.auth.models because we need email to be unique. """
username = models.CharField(_('username'), max_length=30,
unique=True,
help_text=_('<strong>Required!</strong><br>Your username can be composed of letters, digits, and the symbols "@", ".", "+", "-", and "_".'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')
])
email = models.EmailField(_('email address'),
unique=True,
help_text=_('<strong>Required!</strong>'),
validators=[
validators.validate_email
])
authtoken = models.CharField(_('auth token'), max_length=48,
unique=True,
help_text=_('The authentication token used to log into the client app'),
default=generate_auth_token)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'),
default=True,
help_text=_('Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, unique=True, related_name='profile')
mybb_loginkey = models.CharField(max_length=100)
mybb_uid = models.IntegerField(null=True)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
# This is supposed to use settings.USER_AUTH_MODEL but it doesn't seem to work
post_save.connect(create_user_profile, sender=User)
| mit | -6,631,454,537,142,033,000 | 32.421053 | 148 | 0.660787 | false | 4.024081 | false | false | false |
wwitzel3/awx | awx/main/utils/filters.py | 1 | 11006 | import re
from functools import reduce
from pyparsing import (
infixNotation,
opAssoc,
Optional,
Literal,
CharsNotIn,
ParseException,
)
from logging import Filter, _levelNames
import six
from django.apps import apps
from django.db import models
from django.conf import settings
from awx.main.utils.common import get_search_fields
__all__ = ['SmartFilter', 'ExternalLoggerEnabled']
class FieldFromSettings(object):
"""
Field interface - defaults to getting value from setting
if otherwise set, provided value will take precedence
over value in settings
"""
def __init__(self, setting_name):
self.setting_name = setting_name
def __get__(self, instance, type=None):
if self.setting_name in getattr(instance, 'settings_override', {}):
return instance.settings_override[self.setting_name]
return getattr(settings, self.setting_name, None)
def __set__(self, instance, value):
if value is None:
if hasattr(instance, 'settings_override'):
instance.settings_override.pop('instance', None)
else:
if not hasattr(instance, 'settings_override'):
instance.settings_override = {}
instance.settings_override[self.setting_name] = value
class ExternalLoggerEnabled(Filter):
# Prevents recursive logging loops from swamping the server
LOGGER_BLACKLIST = (
# loggers that may be called in process of emitting a log
'awx.main.utils.handlers',
'awx.main.utils.formatters',
'awx.main.utils.filters',
'awx.main.utils.encryption',
'awx.main.utils.log',
# loggers that may be called getting logging settings
'awx.conf'
)
lvl = FieldFromSettings('LOG_AGGREGATOR_LEVEL')
enabled_loggers = FieldFromSettings('LOG_AGGREGATOR_LOGGERS')
enabled_flag = FieldFromSettings('LOG_AGGREGATOR_ENABLED')
def __init__(self, **kwargs):
super(ExternalLoggerEnabled, self).__init__()
for field_name, field_value in kwargs.items():
if not isinstance(ExternalLoggerEnabled.__dict__.get(field_name, None), FieldFromSettings):
raise Exception('%s is not a valid kwarg' % field_name)
if field_value is None:
continue
setattr(self, field_name, field_value)
def filter(self, record):
"""
Uses the database settings to determine if the current
external log configuration says that this particular record
should be sent to the external log aggregator
False - should not be logged
True - should be logged
"""
# Logger exceptions
for logger_name in self.LOGGER_BLACKLIST:
if record.name.startswith(logger_name):
return False
# General enablement
if not self.enabled_flag:
return False
# Level enablement
if record.levelno < _levelNames[self.lvl]:
# logging._levelNames -> logging._nameToLevel in python 3
return False
# Logger type enablement
loggers = self.enabled_loggers
if not loggers:
return False
if record.name.startswith('awx.analytics'):
base_path, headline_name = record.name.rsplit('.', 1)
return bool(headline_name in loggers)
else:
if '.' in record.name:
base_name, trailing_path = record.name.split('.', 1)
else:
base_name = record.name
return bool(base_name in loggers)
def string_to_type(t):
if t == u'null':
return None
if t == u'true':
return True
elif t == u'false':
return False
if re.search(r'^[-+]?[0-9]+$',t):
return int(t)
if re.search(r'^[-+]?[0-9]+\.[0-9]+$',t):
return float(t)
return t
def get_model(name):
return apps.get_model('main', name)
class SmartFilter(object):
SEARCHABLE_RELATIONSHIP = 'ansible_facts'
class BoolOperand(object):
def __init__(self, t):
kwargs = dict()
k, v = self._extract_key_value(t)
k, v = self._json_path_to_contains(k, v)
Host = get_model('host')
search_kwargs = self._expand_search(k, v)
if search_kwargs:
kwargs.update(search_kwargs)
q = reduce(lambda x, y: x | y, [models.Q(**{u'%s__icontains' % _k:_v}) for _k, _v in kwargs.items()])
self.result = Host.objects.filter(q)
else:
# detect loops and restrict access to sensitive fields
# this import is intentional here to avoid a circular import
from awx.api.filters import FieldLookupBackend
FieldLookupBackend().get_field_from_lookup(Host, k)
kwargs[k] = v
self.result = Host.objects.filter(**kwargs)
def strip_quotes_traditional_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"'):
return v[1:-1]
return v
def strip_quotes_json_logic(self, v):
if type(v) is six.text_type and v.startswith('"') and v.endswith('"') and v != u'"null"':
return v[1:-1]
return v
'''
TODO: We should be able to express this in the grammar and let
pyparsing do the heavy lifting.
TODO: separate django filter requests from our custom json filter
request so we don't process the key any. This could be
accomplished using a whitelist or introspecting the
relationship refered to to see if it's a jsonb type.
'''
def _json_path_to_contains(self, k, v):
if not k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP):
v = self.strip_quotes_traditional_logic(v)
return (k, v)
# Strip off leading relationship key
if k.startswith(SmartFilter.SEARCHABLE_RELATIONSHIP + '__'):
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP) + 2
else:
strip_len = len(SmartFilter.SEARCHABLE_RELATIONSHIP)
k = k[strip_len:]
pieces = k.split(u'__')
assembled_k = u'%s__contains' % (SmartFilter.SEARCHABLE_RELATIONSHIP)
assembled_v = None
last_v = None
last_kv = None
for i, piece in enumerate(pieces):
new_kv = dict()
if piece.endswith(u'[]'):
new_v = []
new_kv[piece[0:-2]] = new_v
else:
new_v = dict()
new_kv[piece] = new_v
if last_kv is None:
assembled_v = new_kv
elif type(last_v) is list:
last_v.append(new_kv)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = new_kv
last_v = new_v
last_kv = new_kv
v = self.strip_quotes_json_logic(v)
if type(last_v) is list:
last_v.append(v)
elif type(last_v) is dict:
last_kv[last_kv.keys()[0]] = v
return (assembled_k, assembled_v)
def _extract_key_value(self, t):
t_len = len(t)
k = None
v = None
# key
# "something"=
v_offset = 2
if t_len >= 2 and t[0] == "\"" and t[2] == "\"":
k = t[1]
v_offset = 4
# something=
else:
k = t[0]
# value
# ="something"
if t_len > (v_offset + 2) and t[v_offset] == "\"" and t[v_offset + 2] == "\"":
v = u'"' + six.text_type(t[v_offset + 1]) + u'"'
#v = t[v_offset + 1]
# empty ""
elif t_len > (v_offset + 1):
v = u""
# no ""
else:
v = string_to_type(t[v_offset])
return (k, v)
def _expand_search(self, k, v):
if 'search' not in k:
return None
model, relation = None, None
if k == 'search':
model = get_model('host')
elif k.endswith('__search'):
relation = k.split('__')[0]
try:
model = get_model(relation)
except LookupError:
raise ParseException('No related field named %s' % relation)
search_kwargs = {}
if model is not None:
search_fields = get_search_fields(model)
for field in search_fields:
if relation is not None:
k = '{0}__{1}'.format(relation, field)
else:
k = field
search_kwargs[k] = v
return search_kwargs
class BoolBinOp(object):
def __init__(self, t):
self.result = None
i = 2
while i < len(t[0]):
if not self.result:
self.result = t[0][0].result
right = t[0][i].result
self.result = self.execute_logic(self.result, right)
i += 2
class BoolAnd(BoolBinOp):
def execute_logic(self, left, right):
return left & right
class BoolOr(BoolBinOp):
def execute_logic(self, left, right):
return left | right
@classmethod
def query_from_string(cls, filter_string):
'''
TODO:
* handle values with " via: a.b.c.d="hello\"world"
* handle keys with " via: a.\"b.c="yeah"
* handle key with __ in it
'''
filter_string_raw = filter_string
filter_string = six.text_type(filter_string)
unicode_spaces = list(set(six.text_type(c) for c in filter_string if c.isspace()))
unicode_spaces_other = unicode_spaces + [u'(', u')', u'=', u'"']
atom = CharsNotIn(unicode_spaces_other)
atom_inside_quotes = CharsNotIn(u'"')
atom_quoted = Literal('"') + Optional(atom_inside_quotes) + Literal('"')
EQUAL = Literal('=')
grammar = ((atom_quoted | atom) + EQUAL + Optional((atom_quoted | atom)))
grammar.setParseAction(cls.BoolOperand)
boolExpr = infixNotation(grammar, [
("and", 2, opAssoc.LEFT, cls.BoolAnd),
("or", 2, opAssoc.LEFT, cls.BoolOr),
])
try:
res = boolExpr.parseString('(' + filter_string + ')')
except ParseException:
raise RuntimeError(u"Invalid query %s" % filter_string_raw)
if len(res) > 0:
return res[0].result
raise RuntimeError("Parsing the filter_string %s went terribly wrong" % filter_string)
| apache-2.0 | 5,323,298,445,070,222,000 | 31.755952 | 117 | 0.528893 | false | 4.096018 | false | false | false |
lowRISC/edalize | tests/test_vunit/run.py | 1 | 1480 | # Auto generated by Edalize
def load_module_from_file(name, python_file):
import importlib.util
spec = importlib.util.spec_from_file_location(name, python_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def load_runner_hooks(python_file = r''):
if len(python_file) > 0:
return load_module_from_file('vunit_runner_hooks', python_file)
else:
return __import__('edalize.vunit_hooks', fromlist=['vunit_hooks'])
runner = load_runner_hooks().VUnitRunner()
# Override this hook to allow custom creation configuration of the VUnit instance:
vu = runner.create()
lib = vu.add_library("vunit_test_runner_lib")
lib.add_source_files("sv_file.sv")
lib.add_source_files("vlog_file.v")
lib.add_source_files("vlog05_file.v")
lib.add_source_files("vhdl_file.vhd")
lib.add_source_files("vhdl2008_file", vhdl_standard="2008")
# Override this hook to customize the library, e.g. compile-flags etc.
# This allows full access to vunit.ui.Library interface:
runner.handle_library("vunit_test_runner_lib", lib)
lib = vu.add_library("libx")
lib.add_source_files("vhdl_lfile")
# Override this hook to customize the library, e.g. compile-flags etc.
# This allows full access to vunit.ui.Library interface:
runner.handle_library("libx", lib)
# override this hook to perform final customization and parametrization of VUnit, custom invokation, etc.
runner.main(vu)
| bsd-2-clause | 3,431,572,838,409,874,000 | 35 | 105 | 0.712838 | false | 3.148936 | false | false | false |
pombredanne/PeachPy | peachpy/literal.py | 6 | 15735 | import six
from peachpy.c.types import Type, \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
float_, double_
from peachpy.parse import parse_assigned_variable_name
class Constant:
_supported_sizes = [1, 2, 4, 8, 16, 32, 64]
_supported_types = [uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t,
float_, double_]
def __init__(self, size, repeats, data, element_ctype, name=None, prename=None):
assert isinstance(size, six.integer_types), "Constant size must be an integer"
assert size in Constant._supported_sizes, "Unsupported size %s: the only supported sizes are %s" \
% (str(size), ", ".join(map(str, sorted(Constant._supported_sizes))))
assert isinstance(repeats, six.integer_types), "The number of contant repeats must be an integer"
assert size % repeats == 0, "The number of constant repeats must divide constant size without remainder"
assert isinstance(element_ctype, Type), "Element type must be an instance of peachpy.c.Type"
assert element_ctype in Constant._supported_types, "The only supported types are %s" \
% ", ".join(Constant._supported_types)
self.size = size
self.repeats = repeats
self.element_ctype = element_ctype
self.data = data
self.name = name
self.prename = prename
self.address = None
self.label = None
self.prefix = None
def __str__(self):
format_spec = "%%0%dX" % (self.size / self.repeats * 2)
return "<" + ", ".join(format_spec % data for data in self.data) + ">"
def __hash__(self):
return hash(self.data) ^ hash(self.size) ^ hash(self.repeats)
def __eq__(self, other):
return isinstance(other, Constant) and self.data == other.data and self.element_ctype == other.element_ctype
def encode(self, encoder):
from peachpy.encoder import Encoder
assert isinstance(encoder, Encoder)
encode_function = {
1: encoder.uint8,
2: encoder.uint16,
4: encoder.uint32,
8: encoder.uint64
}[self.size / self.repeats]
return sum([encode_function(data) for data in self.data], bytearray())
@property
def alignment(self):
if self.size == 10:
return 16
else:
return self.size
@property
def as_hex(self):
from peachpy.encoder import Encoder, Endianness
bytestring = self.encode(Encoder(Endianness.Little))
return "".join("%02X" % byte for byte in bytestring)
def format(self, assembly_format):
if assembly_format == "go":
return "const0x" + self.as_hex + "(SB)"
else:
return str(self)
@staticmethod
def _uint64xN(name, prename, n, *args):
from peachpy.util import is_int, is_int64
assert is_int(n)
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
for i, number in enumerate(args):
if not is_int(number):
raise TypeError("The value %s is not an integer" % str(number))
if not is_int64(number):
raise ValueError("The number %d is not a 64-bit integer" % number)
if number < 0:
args[i] += 0x10000000000000000
if len(args) == 1:
args = [args[0]] * n
return Constant(8 * n, n, tuple(args), uint64_t)
@staticmethod
def _uint32xN(name, prename, n, *args):
from peachpy.util import is_int, is_int32
assert is_int(n)
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
for i, number in enumerate(args):
if not is_int(number):
raise TypeError("The value %s is not an integer" % str(number))
if not is_int32(number):
raise ValueError("The number %d is not a 32-bit integer" % number)
if number < 0:
args[i] += 0x100000000
if len(args) == 1:
args = [args[0]] * n
return Constant(4 * n, n, tuple(args), uint32_t)
@staticmethod
def _float64xN(name, prename, n, *args):
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
args = [Constant._parse_float64(arg) for arg in args]
if len(args) == 1:
args = [args[0]] * n
return Constant(8 * n, n, tuple(args), double_)
@staticmethod
def _float32xN(name, prename, n, *args):
args = [arg for arg in args if arg is not None]
if len(args) == 0:
raise ValueError("At least one constant value must be specified")
if len(args) != 1 and len(args) != n:
raise ValueError("Either 1 or %d values must be specified" % n)
args = [Constant._parse_float32(arg) for arg in args]
if len(args) == 1:
args = [args[0]] * n
return Constant(4 * n, n, tuple(args), double_)
@staticmethod
def uint64(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64")
return Constant._uint64xN(name, prename, 1, number)
@staticmethod
def uint64x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x2")
return Constant._uint64xN(name, prename, 2, number1, number2)
@staticmethod
def uint64x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x4")
return Constant._uint64xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def uint64x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint64x8")
return Constant._uint64xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def uint32(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32")
return Constant._uint32xN(name, prename, 1, number)
@staticmethod
def uint32x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x2")
return Constant._uint32xN(name, prename, 2, number1, number2)
@staticmethod
def uint32x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x4")
return Constant._uint32xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def uint32x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x8")
return Constant._uint32xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def uint32x16(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
number9=None, number10=None, number11=None, number12=None,
number13=None, number14=None, number15=None, number16=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.uint32x16")
return Constant._uint32xN(name, prename, 16,
number1, number2, number3, number4, number5, number6, number7, number8,
number9, number10, number11, number12, number13, number14, number15, number16)
@staticmethod
def float64(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64")
return Constant._float64xN(name, prename, 1, number)
@staticmethod
def float64x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64x2")
return Constant._float64xN(name, prename, 2, number1, number2)
@staticmethod
def float64x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float64x4")
return Constant._float64xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def float32(number, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32")
return Constant._float32xN(name, prename, 1, number)
@staticmethod
def float32x2(number1, number2=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x2")
return Constant._float32xN(name, prename, 2, number1, number2)
@staticmethod
def float32x4(number1, number2=None, number3=None, number4=None, name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x4")
return Constant._float32xN(name, prename, 4, number1, number2, number3, number4)
@staticmethod
def float32x8(number1, number2=None, number3=None, number4=None,
number5=None, number6=None, number7=None, number8=None,
name=None):
prename = None
if name is None:
import inspect
prename = parse_assigned_variable_name(inspect.stack(), "Constant.float32x8")
return Constant._float32xN(name, prename, 8,
number1, number2, number3, number4, number5, number6, number7, number8)
@staticmethod
def _convert_to_float32(number):
import array
float_array = array.array('f', [number])
return float_array[0]
@staticmethod
def _parse_float32(number):
if isinstance(number, float):
number = float.hex(Constant._convert_to_float32(number))
elif isinstance(number, str):
# Validity check
try:
number = float.hex(Constant._convert_to_float32(float.fromhex(number)))
except ValueError:
raise ValueError("The string %s is not a hexadecimal floating-point number" % number)
else:
raise TypeError("Unsupported type of constant number %s" % str(number))
if number == "inf" or number == "+inf":
return 0x7F800000
elif number == "-inf":
return 0xFF800000
elif number == "nan":
return 0x7FC00000
is_negative = number.startswith("-")
point_position = number.index('.')
exp_position = number.rindex('p')
number_prefix = number[int(is_negative):point_position]
assert number_prefix == '0x0' or number_prefix == '0x1'
mantissa = number[point_position + 1:exp_position]
if number_prefix == '0x0' and int(mantissa) == 0:
# Zero
return int(is_negative) << 31
else:
exponent = number[exp_position + 1:]
mantissa_bits = len(mantissa) * 4
if mantissa_bits == 23:
mantissa = int(mantissa, 16)
elif mantissa_bits < 23:
mantissa = int(mantissa, 16) << (23 - mantissa_bits)
else:
mantissa = int(mantissa, 16) >> (mantissa_bits - 23)
exponent = int(exponent)
if exponent <= -127:
# Denormals
mantissa = (mantissa + (1 << 23)) >> -(exponent + 126)
exponent = -127
return mantissa + (int(exponent + 127) << 23) + (int(is_negative) << 31)
@staticmethod
def _parse_float64(number):
if isinstance(number, float):
number = float.hex(number)
elif isinstance(number, str):
# Validity check
try:
number = float.hex(float.fromhex(number))
except ValueError:
raise ValueError("The string %s is not a hexadecimal floating-point number" % number)
else:
raise TypeError("Unsupported type of constant number %s" % str(number))
if number == "inf" or number == "+inf":
return 0x7FF0000000000000
if number == "-inf":
return 0xFFF0000000000000
if number == "nan":
return 0x7FF8000000000000
is_negative = number.startswith("-")
point_position = number.index('.')
exp_position = number.rindex('p')
number_prefix = number[int(is_negative):point_position]
assert number_prefix == '0x0' or number_prefix == '0x1'
mantissa = number[point_position + 1:exp_position]
if number_prefix == '0x0':
# Zero
assert int(mantissa) == 0
return int(is_negative) << 63
else:
exponent = number[exp_position + 1:]
mantissa_bits = len(mantissa) * 4
if mantissa_bits == 52:
mantissa = int(mantissa, 16)
elif mantissa_bits < 52:
mantissa = int(mantissa, 16) << (52 - mantissa_bits)
else:
mantissa = int(mantissa, 16) >> (mantissa_bits - 52)
exponent = int(exponent)
if exponent <= -1023:
# Denormals
mantissa = (mantissa + (1 << 52)) >> -(exponent + 1022)
exponent = -1023
elif exponent > 1023:
# Infinity
mantissa = 0
exponent = 1023
return mantissa + (int(exponent + 1023) << 52) + (int(is_negative) << 63)
| bsd-2-clause | -2,774,984,178,690,938,000 | 38.734848 | 116 | 0.579981 | false | 3.851897 | false | false | false |
thinkle/gourmet | gourmet/importers/webextras.py | 1 | 3008 | import socket, gourmet.threadManager, urllib.request, urllib.parse, urllib.error
from gettext import gettext as _
DEFAULT_SOCKET_TIMEOUT=45.0
URLOPEN_SOCKET_TIMEOUT=15.0
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
class URLReader (gourmet.threadManager.SuspendableThread):
def __init__ (self, url):
self.url = url
gourmet.threadManager.SuspendableThread.__init__(
self,
name=_('Downloading %s'%url)
)
def do_run (self):
self.read()
def read (self):
message = _('Retrieving %s'%self.url)
socket.setdefaulttimeout(URLOPEN_SOCKET_TIMEOUT)
sock = urllib.request.urlopen(self.url)
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
bs = 1024 * 8 # bite size...
# Get file size so we can update progress correctly...
self.content_type = None;
if hasattr(sock,'headers'):
fs = int(sock.headers.get('content-length',-1)) # file size..
self.content_type = sock.headers.get('content-type')
print('CONTENT TYPE = ',self.content_type)
else:
fs = -1
block = sock.read(bs)
self.data = block
sofar = bs
while block:
if fs>0:
self.emit('progress',float(sofar)/fs, message)
else:
self.emit('progress',-1, message)
sofar += bs
block = sock.read(bs)
self.data += block
sock.close()
self.emit('progress',1, message)
def read_socket_w_progress (sock, suspendableThread=None, message=None):
"""Read piecemeal reporting progress via our suspendableThread
instance (most likely an importer) as we go."""
if not suspendableThread:
data = sock.read()
else:
bs = 1024 * 8 # bite size...
if hasattr(sock,'headers'):
fs = int(sock.headers.get('content-length',-1)) # file size..
else: fs = -1
block = sock.read(bs)
data = block
sofar = bs
print("FETCHING:",data)
while block:
if fs>0:
suspendableThread.emit('progress',float(sofar)/fs, message)
else:
suspendableThread.emit('progress',-1, message)
sofar += bs
block = sock.read(bs)
data += block
print("FETCHED:",block)
sock.close()
print("FETCHED ",data)
print("DONE FETCHING")
suspendableThread.emit('progress',1, message)
return data
def get_url (url, suspendableThread):
"""Return data from URL, possibly displaying progress."""
if isinstance(url, str):
socket.setdefaulttimeout(URLOPEN_SOCKET_TIMEOUT)
sock = urllib.request.urlopen(url)
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
return read_socket_w_progress(sock,suspendableThread,_('Retrieving %s'%url))
else:
sock = url
return read_socket_w_progress(sock,suspendableThread,_('Retrieving file'))
| gpl-2.0 | 7,861,758,868,572,846,000 | 33.574713 | 84 | 0.59109 | false | 3.926893 | false | false | false |
bendk/thesquirrel | mediabuilder/views.py | 1 | 1803 | # thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
import os
from django.conf import settings
from django.http import Http404, HttpResponse
from mediabuilder import bundles
def check_source_path(bundle, path):
"""Check JS Source paths
This method will raise Http404 if path is not a source path any of the
bundle.
"""
if not os.path.exists(path):
raise Http404()
for source_path in bundle.source_paths():
if os.path.samefile(path, source_path):
return
raise Http404()
def js_source(request, bundle_name, path):
bundle = bundles.JSBundle.get_bundle(bundle_name)
check_source_path(bundle, path)
path = os.path.join(settings.BASE_DIR, path)
return HttpResponse(open(path).read(),
content_type='application/javascript')
def sass_source(request, bundle_name):
# We can't just send the SASS source to the browser, so we build it here
# and output it.
bundle = bundles.SassBundle.get_bundle(bundle_name)
return HttpResponse(bundle.build_content('nested'),
content_type='text/css')
| agpl-3.0 | 894,466,664,627,895,800 | 35.795918 | 78 | 0.711592 | false | 3.852564 | false | false | false |
birgander2/PyRAT | qgis/PyRATBridge/mainPlugin.py | 1 | 12576 | """
PyRATBridge
===========
This Plugin imports the functionality of PyRAT into QGIS
:author: Felix Weinmann <[email protected]>
"""
from qgis.core import QgsTask, QgsTaskManager, Qgis, QgsProject
from qgis.PyQt.QtWidgets import QAction, QFileDialog, QInputDialog, QDockWidget
from qgis.PyQt.QtCore import Qt
from qgis.utils import iface
import copy
import numpy as np
from os import path
try:
import pyrat
from pyrat.viewer.Dialogs import FlexInputDialog, LayerTreeWidget
pyratImport = True
except ImportError:
pyratImport = False
qgis_types = {
0: None, # UnknownDataType
1: "int8",
2: "uint16",
3: "int16",
4: "uint32",
5: "int32",
6: "float32",
7: "float64",
8: None, # CInt16
9: None, # CInt32
10: "complex32",
11: "complex64",
12: None, # ARGB32. Color, alpha, red, green, blue
13: None, # ARGB32_Premultiplied alpha, red, green, blue
}
class PyRATBridge:
"""This is the main plugin class for GUI and the connection to PyRAT"""
def __init__(self):
self.taskManager = QgsTaskManager()
if pyratImport:
pyrat.viewer.GenPyramid = GenPyramidInterface
pyrat.tools.ProgressBar = ProgressBarToQGIS
def unload(self):
"""Cleanup when disabling the plugin"""
if pyratImport:
PyRATBridge.clearPyRAT()
self.pyratMenu.clear()
iface.removeDockWidget(self.pyratLayerTree)
self.taskManager.cancelAll()
ViewerToQGISInterface.display.clear()
def addMenuEntry(self, pyratTool):
"""Adds a PyRAT Tool to the QGIS-Menu"""
menus = pyratTool.gui['menu'].split('|')
submenu = self.pyratMenu
for menu in menus:
if menu not in [action.text() for action in submenu.actions()]:
submenu = submenu.addMenu(menu)
else:
submenu = [action.menu() for action in submenu.actions() if
action.text() == menu][0]
action = QAction(pyratTool.gui['entry'], iface.mainWindow())
action.triggered.connect(lambda:
PyRATBridge.menuAction(self, pyratTool))
submenu.addAction(action)
def initGui(self):
"""Initalise the Plugin-UI"""
if not pyratImport:
iface.messageBar().pushMessage("PyRAT not found!",
level=Qgis.Critical)
return
if 'PyRAT' not in [action.text() for action in
iface.mainWindow().menuBar().actions()]:
self.pyratMenu = iface.mainWindow().menuBar().addMenu('PyRAT')
else:
self.pyratMenu = [action.menu() for action in
iface.mainWindow().menuBar().actions() if
action.text() == 'PyRAT'][0]
action = QAction("Layer2PyRAT", iface.mainWindow())
action.triggered.connect(PyRATBridge.layerToPyrat)
self.pyratMenu.addAction(action)
action = QAction("PyRAT2Layer", iface.mainWindow())
action.triggered.connect(PyRATBridge.pyratToLayer)
self.pyratMenu.addAction(action)
action = QAction("Cleanup PyRAT", iface.mainWindow())
action.triggered.connect(PyRATBridge.clearPyRAT)
self.pyratMenu.addAction(action)
action = QAction("Show PyRAT GUI", iface.mainWindow())
action.triggered.connect(self.showPyrat)
self.pyratMenu.addAction(action)
self.pyratMenu.addSeparator()
# Init PyRAT-Tools, adapted from pyrat.viewer for qgis
from inspect import getmembers, isclass
modules = [pyrat.load, pyrat.save, pyrat.transform, pyrat.filter,
pyrat.polar, pyrat.insar, pyrat.plugins, pyrat.viewer]
for current_module in modules:
modules = getmembers(current_module, isclass)
for mod in modules:
if issubclass(mod[1], pyrat.Worker):
plugin = mod[1]
if(hasattr(plugin, 'gui') and
plugin.gui['entry'] != "Python console"):
self.addMenuEntry(plugin)
self.pyratLayerTree = QDockWidget("PyRAT Layers", iface.mainWindow())
PyRATBridge.layerTreeWidget = LayerTreeWidget(
parent=self.pyratLayerTree,
viewer=ViewerToQGISInterface)
self.pyratLayerTree.setObjectName("PyRAT Layers")
self.pyratLayerTree.setWidget(PyRATBridge.layerTreeWidget)
iface.addDockWidget(Qt.LeftDockWidgetArea, self.pyratLayerTree)
def menuAction(self, pyratTool):
"""Start pyratTool after Menu-Click"""
para_backup = copy.deepcopy(pyratTool.para)
if 'name' not in dir(pyratTool):
pyratTool.name = pyratTool.__name__
if len(pyratTool.para) > 0:
if pyratTool is pyrat.load.FSAR:
dlg = pyrat.load.FsarImportWidget(parent=iface.mainWindow())
dlg.update()
elif pyratTool is pyrat.load.ESAR:
dlg = pyrat.load.EsarImportWidget(parent=iface.mainWindow())
dlg.update()
elif pyratTool is pyrat.load.UAVSAR:
dlg = pyrat.load.UAVSARImportWidget(parent=iface.mainWindow())
dlg.update()
else:
dlg = FlexInputDialog(pyratTool.para,
parent=iface.mainWindow(),
title=pyratTool.name,
doc=pyratTool.__doc__)
if len(pyratTool.para) == 0 or dlg.exec_() == 1:
task = PyRATTask(pyratTool, para_backup)
self.taskManager.addTask(task)
def layerToPyrat():
"""Imports a QGIS-Layer into PyRAT"""
layers = list()
for layer in QgsProject.instance().layerTreeRoot().layerOrder():
# 1: QgsMapLayer.LayerType.RasterLayer
if layer.type() == 1:
layers.append(layer.name())
layername, s = QInputDialog.getItem(
iface.mainWindow(),
"Select a layer",
"Select a layer to export to PyRAT:",
layers,
editable=False)
if not s:
return
layer = QgsProject.instance().mapLayersByName(layername)[0]
dataProv = layer.dataProvider()
extent = dataProv.extent()
rows = layer.height()
cols = layer.width()
block = dataProv.block(1, extent, cols, rows)
arr = np.frombuffer(block.data(),
dtype=qgis_types[block.dataType()]
).reshape((rows, cols))
pyratlayer = pyrat.adddata(arr)
# Add metadata to the PyRAT-Layer
description = layer.crs().description()
meta = {"info": layer.name(),
"geo_min_east": extent.xMinimum(),
# Subtract 1 due to QGIS inclusive minimum
"geo_min_north": extent.yMinimum() - 1,
"geo_ps_east": layer.rasterUnitsPerPixelX(),
"geo_ps_north": layer.rasterUnitsPerPixelY()}
if description.startswith('WGS 84 / UTM zone '):
zone = int(description[:-1].rsplit(" ", 1)[1])
if description[-1] == "S":
zone = -zone
meta["geo_projection"] = 1
meta["geo_zone"] = zone
pyrat.setmeta(meta)
ViewerToQGISInterface.display[pyratlayer] = {'scaling': 'min->max',
'bwlayer': pyratlayer,
'colour': False}
PyRATBridge.layerTreeWidget.redraw()
def pyratToLayer(layerid=None):
"""Exports a PyRAT-layer into QGIS"""
if type(layerid) is str:
pyrat.data.activateLayer(layerid)
annotation = pyrat.data.getAnnotation()
if 'info' in annotation:
filename = path.join(pyrat.data.tmpdir, annotation['info'] +
".rat")
else:
filename = path.join(pyrat.data.tmpdir, "PyRAT.rat")
filename, s = QFileDialog.getSaveFileName(
iface.mainWindow(),
"Save the PyRAT-Layer",
filename,
"RAT-File (*.rat)")
if not s or filename == "":
return
pyrat.save.rat((filename, "rat"), geo_envi_hdr=True)
iface.addRasterLayer(filename, path.basename(filename).split(".")[0])
def showPyrat(self):
pyrat.show()
def clearPyRAT():
pyrat.pyrat_reset()
ViewerToQGISInterface.display.clear()
PyRATBridge.layerTreeWidget.redraw()
class ViewerToQGISInterface:
"""This Class is a 'viewer' for pyrats LayerTree Widget shown in QGIS"""
config = {'colour': False, 'bwlayer': "/Undefined",
'rgblayer': (None, None, None)}
display = {}
def updateViewer(layer=None):
pass
class GenPyramidInterface:
"""
This class replaces pyrat.viewer.GenPyramid to disable
the scaling method options in the LayerTree Widget in QGIS
"""
def __init__(self, layer=None, force=None, mode=None):
pass
def run(self):
pass
class ProgressBarToQGIS:
"""
Disables the ProgressBar to prevent crashes with opened QGIS Python Console
"""
def __init__(self, message, max, width=None):
pass
def __del__(self):
pass
def update(self, val):
pass
class PyRATTask(QgsTask):
"""This class handles the async execution of a PyRAT-Tool"""
def __init__(self, pyratTool, para_backup):
QgsTask.__init__(self)
self.pyratTool = pyratTool
self.para_backup = para_backup
self.failed = False
self.guionly = False
self.layer = None
self.existinglayers = list()
def run(self):
"""The async executed code"""
self.plugin = self.pyratTool()
self.plugin.crash_handler = self.crashHandler
self.existinglayers = pyrat.data.getLayerIDs()
self.layer = self.plugin.run()
setattr(self.pyratTool, 'para', self.para_backup)
return self.layer is not False
def crashHandler(self, ex):
"""
Overrides the PyRAT crash handler to prevent
the termination of QGIS
"""
try:
raise ex
except AttributeError:
# Gui-only Plugins
self.guionly = True
except Exception:
self.failed = True
raise ex
def finished(self, result):
"""
This function is threadsafe for GUI-Actions and
called after run terminates.
"""
if self.guionly:
self.pyratTool.guirun(iface.mainWindow())
if result and not self.failed:
iface.messageBar().pushMessage(self.pyratTool.name + " finished.",
level=Qgis.Success)
for layer in [newlayer for newlayer in pyrat.data.getLayerIDs()
if newlayer not in self.existinglayers]:
# Show the generated Layer(s) in QGIS
anno = pyrat.data.getAnnotation(layer=layer)
if 'info' not in anno:
anno['info'] = "Pyrat-Layer " + layer
pyrat.data.setAnnotation({'info': anno['info'] + "-" +
self.pyratTool.name},
layer=layer)
ViewerToQGISInterface.display[layer] = {'scaling': 'min->max',
'bwlayer': layer,
'colour': False}
PyRATBridge.pyratToLayer(self.layer)
PyRATBridge.layerTreeWidget.redraw()
else:
iface.messageBar().pushMessage(self.pyratTool.name +
" failed. Look in the (system)" +
" console for more information.",
level=Qgis.Critical)
del self.plugin
| mpl-2.0 | 4,246,527,938,640,214,000 | 34.137931 | 79 | 0.540156 | false | 4.254398 | false | false | false |
midair/Zulip-Voting-Bot | test_voting_bot.py | 1 | 7490 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from voting_bot import VotingBot
class VotingBotTest(unittest.TestCase):
def setUp(self):
self.vb = VotingBot
def tearDown(self):
del self.vb
def test_parse_public_message(self):
# voting topic
content = "votingbot new poll\none\ntwo\nthree"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: one, two, three"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll one, two, three"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one", "two", "three"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll one"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title = "topic", "new poll"
e_arg = ["one"]
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# new option for existing topic
content = "votingbot new poll\nadd: four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: add: four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: add four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll add four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll ADD four"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "option", "new poll", "Four"
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# vote
content = "votingbot new poll\n1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: 1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll 1"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "vote", "new poll", 1
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# results
content = "votingbot new poll\nresults"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll: results"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll results"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot new poll RESULTS"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "results", "new poll", None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# help
content = "votingbot\nhelp"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot: help"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot help"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
content = "votingbot HELP"
action, title, arg = self.vb._parse_public_message(content)
e_action, e_title, e_arg = "help", None, None
self.assertEqual((action, title, arg), (e_action, e_title, e_arg),
(content, action, title, arg))
# @unittest.skip("need a debug_run.py module to run this test")
# class VotingBotIntegrationTest(unittest.TestCase):
# """Integration test for VotingBot.
# It runs a test instance of the bot configured by a debug_run module not
# included in GitHub because it must contain credentials. A template of
# a debug_run for VotingBot is provided instead.
# """
# @classmethod
# def setUpClass(cls):
# from debug_run import get_voting_bot
# cls.vb = get_voting_bot()
# cls.vb.main()
# @classmethod
# def tearDownClass(cls):
# pass
# @unittest.skip("need a debug_run.py module to run this test")
# def test_complete_voting_process(self):
# stream = "test-bot"
# subject = "votingbot tests"
if __name__ == '__main__':
nose.run(defaultTest=__name__)
| mit | -255,182,878,202,769,100 | 41.316384 | 77 | 0.569693 | false | 3.531353 | true | false | false |
JensTimmerman/radical.pilot | docs/architecture/api_draft/unit_manager.py | 1 | 3311 |
from attributes import *
from constants import *
# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
"""
UnitManager class -- manages a pool
"""
# --------------------------------------------------------------------------
#
def __init__ (self, url=None, scheduler='default', session=None) :
Attributes.__init__ (self)
# --------------------------------------------------------------------------
#
def add_pilot (self, pid) :
"""
add (Compute or Data)-Pilot(s) to the pool
"""
raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_pilots (self, ptype=ANY) :
"""
List IDs of data and/or compute pilots
"""
raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def remove_pilot (self, pid, drain=False) :
"""
Remove pilot(s) (does not cancel the pilot(s), but removes all units
from the pilot(s).
`drain` determines what happens to the units which are managed by the
removed pilot(s). If `True`, the pilot removal is delayed until all
units reach a final state. If `False` (the default), then `RUNNING`
units will be canceled, and `PENDING` units will be re-assinged to the
unit managers for re-scheduling to other pilots.
"""
raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def submit_unit (self, description) :
"""
Instantiate and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_units (self, utype=ANY) :
"""
List IDs of data and/or compute units
"""
raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def get_unit (self, uids) :
"""
Reconnect to and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
"""
Wait for given unit(s) to enter given state
"""
raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def cancel_units (self, uids) :
"""
Cancel given unit(s)
"""
raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)
# ------------------------------------------------------------------------------
#
| mit | 5,006,095,443,593,076,000 | 28.5625 | 90 | 0.40592 | false | 5.117465 | false | false | false |
droptables/Gambit | nba-harvester.py | 1 | 2484 | import json,os, urllib2, uncurl, requests, pymongo, time
from clint.textui import colored
def get_season_results(team):
seasons = ['2012','2013','2014', '2015']
for year in seasons:
seasonsquery="https://erikberg.com/nba/results/"+team+".json?season="+year
print colored.yellow("[*] Getting "+seasonsquery)
r = requests.get(seasonsquery,
headers={
"Authorization": "",
'User-Agent': 'python test',
'From': ''
},
cookies={},
)
for item in r.json():
seasonresults.insert(item)
print colored.green("[+] "+team+year+" complete")
def get_team_stats(team):
teamquery = "https://erikberg.com/nba/team-stats.json?team_id="+team
print colored.yellow("[*] Getting "+teamquery)
r = requests.get(teamquery,
headers={
"Authorization": "",
'User-Agent': '',
'From': 'm'
},
cookies={},
)
teamstats.insert(r.json())
print colored.green("[+] " + teamquery+" complete")
def get_box_score(eventid):
print colored.yellow("[*] Fetching "+ eventid)
boxquery="https://erikberg.com/nba/boxscore/"+eventid+".json"
r = requests.get(boxquery,
headers={
"Authorization": "",
'User-Agent': '',
'From': ''
},
cookies={},
)
print r.headers
boxresult = r.json()
boxresult['eventkey']=eventid
boxscores.insert(boxresult)
print colored.green("[+] "+eventid+" complete.")
if __name__ == '__main__':
client = pymongo.MongoClient('localhost',27017)
db = client.nba
seasonresults = db.seasonresults
teamstats = db.teamstats
boxscores= db.boxscores
teamlist = ["atlanta-hawks", "boston-celtics", "brooklyn-nets", "charlotte-hornets", "chicago-bulls", "cleveland-cavaliers", "dallas-mavericks", "denver-nuggets", "detroit-pistons", "golden-state-warriors", "houston-rockets", "indiana-pacers", "los-angeles-clippers", "los-angeles-lakers", "memphis-grizzlies", "miami-heat", "milwaukee-bucks", "minnesota-twins", "new-orleans-pelicans", "new-york-knicks", "oklahoma-city-thunder", "orlando-magic", "philadelphia-76ers", "phoenix-suns", "portland-trail-blazers", "sacramento-kings", "san-antonio-spurs", "toronto-raptors", "utah-jazz", "washington-wizards"]
#for team in teamlist:
#get_season_results(team)
#get_team_stats(team)
# time.sleep(20)
#for eventid in seasonresults.distinct("event_id"):
for eventid in open('schedule-ids.txt').readlines():
get_box_score(eventid.rstrip())
time.sleep(12)
print colored.green("[+] Fetching complete.")
| gpl-2.0 | -5,111,145,687,165,386,000 | 31.25974 | 607 | 0.660628 | false | 2.80678 | false | false | false |
MikeWinter/bio-data-repository | bdr/forms/widgets.py | 1 | 8068 | """
This module defines customised widgets for use with forms in this application.
"""
from django.forms.widgets import CheckboxInput, MultiWidget, NumberInput, Select, TextInput
__all__ = []
__author__ = "Michael Winter ([email protected])"
__license__ = """
Biological Dataset Repository: data archival and retrieval.
Copyright (C) 2015 Michael Winter
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# noinspection PyAbstractClass
# The render method is not abstract.
class SelectableTextInput(MultiWidget):
"""
This widget combines a text box with a checkbox to create an optional
input.
If the associated checkbox is deselected when the form is submitted, the
textbox value is ignored.
This widget is intended to be used with the Bootstrap CSS framework. The
two controls are rendered as an input group with the checkbox integrated in
the `:before` position (typically the left-hand side of the text box).
"""
selection_widget = CheckboxInput
value_widget = TextInput
def __init__(self, attrs=None):
super(SelectableTextInput, self).__init__([self.selection_widget, self.value_widget], attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the state of the checkbox. The second element is
the value of the text box; this will be `None` if the checkbox was
deselected.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value:
return [True, value]
return [False, None]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
:param rendered_widgets: A list of widgets rendered in HTML.
:type rendered_widgets: list of unicode
:return: A HTML string combining each widget.
:rtype: unicode
"""
return u"""
<div class="input-group">
<span class="input-group-addon">{0}</span>
{1}
</div>
""".format(*rendered_widgets)
# noinspection PyAbstractClass
# Base class implements the render method
class ScaledNumberInput(MultiWidget):
"""
This widget combines a text box with a select menu to enable the user to
specify values at different scales.
The widget normalises the value according to the factor associated with
each scale.
This widget is intended to be used with the Bootstrap CSS framework.
"""
def __init__(self, choices, default, attrs=None):
self._choices = list(choices)
self._default = default
widgets = (NumberInput, Select(choices=choices))
super(ScaledNumberInput, self).__init__(widgets, attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the numeric value. The second element is the scale
type.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value is None or value == 0:
return 0, self._default
for factor, _ in sorted(self._choices, key=lambda x: x[0], reverse=True):
if value % factor == 0:
return [value / factor, factor]
return [value, self._default]
def value_from_datadict(self, data, files, name):
"""
Return the normalised value of this widget derived from the submitted
data dictionaries.
:param data: A dictionary of strings submitted by the user via a form.
:type data: dict of (str | unicode)
:param files: A dictionary of files uploaded by the user.
:type files: dict of str
:param name: The key name of this widget.
:type name: str
:return: The value of this widget.
:rtype: str | unicode
"""
number, interval_type = super(ScaledNumberInput, self).value_from_datadict(data, files, name)
return int(float(number) * float(interval_type))
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
:param rendered_widgets: A list of widgets rendered in HTML.
:type rendered_widgets: list of unicode
:return: A HTML string combining each widget.
:rtype: unicode
"""
return u"""
<div class="row">
<div class="col-sm-6">
{0}
</div>
<div class="col-sm-6">
{1}
</div>
</div>
""".format(*rendered_widgets)
# noinspection PyAbstractClass
# Base class implements the render method
class ComboTextInput(MultiWidget):
"""
This widget combines a select menu with a text box to create a list of
suggested values and the ability to define a custom value.
This widget is intended to be used with the Bootstrap CSS framework.
"""
def __init__(self, choices, default="", attrs=None):
if attrs is None:
attrs = {}
attrs["data-type"] = "combobox"
self._choices = choices
self._default = default
super(ComboTextInput, self).__init__([Select(choices=self._choices), TextInput], attrs)
def decompress(self, value):
"""
Return a list of decompressed values for the given compressed value.
The first element is the selected, suggested value. The second element
is the customised value.
:param value: A compressed value to be represented by this widget.
:type value: str | unicode
:return: The decompressed interpretation of the value.
:rtype: list of (bool, str | unicode)
"""
if value is None:
return [self._default, ""]
if value == "":
return ["None", ""]
for val, txt in self._choices:
if value == val:
return [value, ""]
return ["", value]
def value_from_datadict(self, data, files, name):
"""
Return the value of this widget derived from the submitted data
dictionaries.
:param data: A dictionary of strings submitted by the user via a form.
:type data: dict of (str | unicode)
:param files: A dictionary of files uploaded by the user.
:type files: dict of str
:param name: The key name of this widget.
:type name: str
:return: The value of this widget.
:rtype: str | unicode
"""
suggested, custom = super(ComboTextInput, self).value_from_datadict(data, files, name)
value = suggested if suggested != "" else custom
return value if value != "None" else None
class Media(object):
"""
Declares resources that should be included when this form is displayed.
"""
js = ("bdr/js/combo.js",)
| gpl-2.0 | -3,217,710,930,025,375,000 | 34.857778 | 101 | 0.633862 | false | 4.502232 | false | false | false |
joefutrelle/pocean-core | pocean/dsg/timeseries/cr.py | 1 | 1569 | #!python
# coding=utf-8
from pocean.cf import CFDataset
from pocean import logger
class ContiguousRaggedTimeseries(CFDataset):
@classmethod
def is_mine(cls, dsg):
try:
rvars = dsg.filter_by_attrs(cf_role='timeseries_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'timeseries'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except AssertionError:
return False
return True
def from_dataframe(self, df, variable_attributes=None, global_attributes=None):
variable_attributes = variable_attributes or {}
global_attributes = global_attributes or {}
raise NotImplementedError
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True):
# if df is None:
# df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows)
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
| mit | 7,903,866,478,454,717,000 | 31.6875 | 94 | 0.594646 | false | 4.043814 | false | false | false |
tktrungna/leetcode | Python/binary-tree-level-order-traversal-ii.py | 1 | 1989 | """
QUESTION:
Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
For example:
Given binary tree {3,9,20,#,#,15,7},
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
ANSWER:
dfs, bfs
"""
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
if not root:
return res
queue = [root]
while len(queue):
ll = [] #last level
cur = []
for q in queue:
if q.left: ll.append(q.left)
if q.right: ll.append(q.right)
cur.append(q.val)
res = [cur] + res
queue = ll
return res
def levelOrder_2(self, root):
if not root:
return []
queue = [root]
res = []
last = 0
while last != len(queue):
n = len(queue)-last
cur = []
for i in xrange(n):
node = queue[last]
last += 1
cur.append(node.val)
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
res = [cur] + res
return res
def levelOrder_3(self, root):
def dfs(root,level):
if root == None:
return
if len(res) <= level:
res.insert(0,[root.val])
else:
res[len(res)-level-1].append(root.val)
dfs(root.left,level+1)
dfs(root.right,level+1)
res = []
dfs(root,0)
return res
if __name__ == '__main__':
print
| mit | -411,411,907,483,932,700 | 22.963855 | 145 | 0.464555 | false | 3.65625 | false | false | false |
Herkemer/pynab | pynab/imdb.py | 1 | 5667 | import unicodedata
import difflib
import datetime
import regex
import requests
import pytz
from pynab.db import db_session, Release, Movie, MetaBlack, Category, DataLog, windowed_query
from pynab import log
import config
PROCESS_CHUNK_SIZE = 500
OMDB_SEARCH_URL = 'http://www.omdbapi.com/?s='
OMDB_DETAIL_URL = 'http://www.omdbapi.com/?i='
def process(limit=None, online=True):
"""Process movies without imdb data and append said data."""
expiry = datetime.datetime.now(pytz.utc) - datetime.timedelta(config.postprocess.get('fetch_blacklist_duration', 7))
with db_session() as db:
# clear expired metablacks
db.query(MetaBlack).filter(MetaBlack.movie != None).filter(MetaBlack.time <= expiry).delete(
synchronize_session='fetch')
query = db.query(Release).filter(Release.movie == None).join(Category).filter(Category.parent_id == 2000)
if online:
query = query.filter(Release.movie_metablack_id == None)
query = query.order_by(Release.posted.desc())
if limit:
releases = query.limit(limit)
else:
releases = windowed_query(query, Release.id, PROCESS_CHUNK_SIZE)
for release in releases:
name, year = parse_movie(release.search_name)
if name and year:
method = 'local'
imdb = db.query(Movie).filter(
Movie.name.ilike('%'.join(clean_name(name).split(' ')))
).filter(Movie.year == year).first()
if not imdb and online:
method = 'online'
movie = search(clean_name(name), year)
if movie and movie['Type'] == 'movie':
imdb = db.query(Movie).filter(Movie.id == movie['imdbID']).first()
if not imdb:
imdb = Movie()
imdb.id = movie['imdbID']
imdb.name = movie['Title']
imdb.year = movie['Year']
db.add(imdb)
if imdb:
log.debug('imdb: [{}] - [{}] - movie data added: {}'.format(
release.id,
release.search_name,
method
))
release.movie = imdb
release.movie_metablack_id = None
db.add(release)
elif not imdb and online:
log.debug('imdb: [{}] - movie data not found: online'.format(
release.search_name
))
mb = MetaBlack(status='ATTEMPTED', movie=release)
db.add(mb)
else:
log.debug('imdb: [{}] - [{}] - movie data not found: local'.format(
release.id,
release.search_name
))
else:
log.debug('imdb: [{}] - [{}] - movie data not found: no suitable regex for movie name'.format(
release.id,
release.search_name
))
db.add(MetaBlack(status='IMPOSSIBLE', movie=release))
db.add(DataLog(description='imdb parse_movie regex', data=release.search_name))
db.commit()
def search(name, year):
"""Search OMDB for a movie and return the IMDB ID."""
# if we managed to parse the year from the name
# include it, since it'll narrow results
if year:
year_query = '&y={}'.format(year.replace('(', '').replace(')', ''))
else:
year_query = ''
data = {}
try:
r = requests.get(OMDB_SEARCH_URL + name + year_query)
data = r.json()
except:
log.critical('There was a problem accessing the IMDB API page.')
return None
if 'Search' in data:
for movie in data['Search']:
# doublecheck, but the api should've searched properly
ratio = difflib.SequenceMatcher(None, clean_name(name), clean_name(movie['Title'])).ratio()
if ratio > 0.8 and year == movie['Year'] and movie['Type'] == 'movie':
return movie
def get_details(id):
r = requests.get(OMDB_DETAIL_URL + id)
data = r.json()
if 'Response' in data:
imdb = {
'_id': data['imdbID'],
'title': data['Title'],
'year': data['Year'],
'genre': data['Genre'].split(',')
}
return imdb
else:
return None
def parse_movie(search_name):
"""Parses a movie name into name / year."""
result = regex.search('^(?P<name>.*)[\.\-_\( ](?P<year>19\d{2}|20\d{2})', search_name, regex.I)
if result:
result = result.groupdict()
if 'year' not in result:
result = regex.search(
'^(?P<name>.*)[\.\-_ ](?:dvdrip|bdrip|brrip|bluray|hdtv|divx|xvid|proper|repack|real\.proper|sub\.?fix|sub\.?pack|ac3d|unrated|1080i|1080p|720p|810p)',
search_name, regex.I)
if result:
result = result.groupdict()
if 'name' in result:
name = regex.sub('\(.*?\)|\.|_', ' ', result['name'])
if 'year' in result:
year = result['year']
else:
year = ''
return name, year
return None, None
def clean_name(name):
"""Cleans a show name for searching (against omdb)."""
name = unicodedata.normalize('NFKD', name)
name = regex.sub('[._\-]', ' ', name)
name = regex.sub('[\':!"#*’,()?$&]', '', name)
return name
| gpl-2.0 | -2,663,193,650,727,870,500 | 33.754601 | 167 | 0.509974 | false | 4.084355 | false | false | false |
chandler14362/panda3d | tests/gobj/test_geom.py | 10 | 2483 | from panda3d import core
empty_format = core.GeomVertexFormat.get_empty()
def test_geom_decompose_in_place():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
geom.decompose_in_place()
prim = geom.get_primitive(0)
assert tuple(prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
def test_geom_decompose():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
new_geom = geom.decompose()
new_prim = new_geom.get_primitive(0)
assert tuple(new_prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
# Old primitive should still be unchanged
assert prim == geom.get_primitive(0)
def test_geom_calc_sphere_bounds():
# Ensure that it ignores NaN
data = core.GeomVertexData("", core.GeomVertexFormat.get_v3(), core.Geom.UH_static)
vertex = core.GeomVertexWriter(data, "vertex")
vertex.add_data3((float("NaN"), 0, 0))
vertex.add_data3((1, 1, 1))
vertex.add_data3((1, 1, 2))
prim = core.GeomPoints(core.Geom.UH_static)
prim.add_next_vertices(3)
geom = core.Geom(data)
geom.add_primitive(prim)
geom.set_bounds_type(core.BoundingVolume.BT_sphere)
bounds = geom.get_bounds()
assert isinstance(bounds, core.BoundingSphere)
assert bounds.get_center() == (1, 1, 1.5)
assert bounds.get_radius() == 0.5
def test_geom_calc_box_bounds():
# Ensure that it ignores NaN
data = core.GeomVertexData("", core.GeomVertexFormat.get_v3(), core.Geom.UH_static)
vertex = core.GeomVertexWriter(data, "vertex")
vertex.add_data3((float("NaN"), 0, 0))
vertex.add_data3((1, 1, 1))
vertex.add_data3((1, 1, 2))
prim = core.GeomPoints(core.Geom.UH_static)
prim.add_next_vertices(3)
geom = core.Geom(data)
geom.add_primitive(prim)
geom.set_bounds_type(core.BoundingVolume.BT_box)
bounds = geom.get_bounds()
assert isinstance(bounds, core.BoundingBox)
assert bounds.get_min() == (1, 1, 1)
assert bounds.get_max() == (1, 1, 2)
| bsd-3-clause | -4,603,771,188,745,517,600 | 28.559524 | 87 | 0.660491 | false | 2.907494 | false | false | false |
karllessard/tensorflow | tensorflow/python/eager/wrap_function.py | 6 | 26433 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(
kwargs.get("name", None), "Variable", skip_on_eager=False) as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
self._signature = signature
super(WrappedFunction, self).__init__(fn_graph, attrs=attrs)
def _call_impl(self, args, kwargs, cancellation_manager=None):
if self._arg_keywords is None:
if kwargs:
raise NotImplementedError(
"Keyword arguments not supported when calling a "
"wrap_function-decorated function.")
if self._signature is not None:
args = list(args)
for i, arg in enumerate(args):
if isinstance(self._signature[i], tensor_spec.DenseSpec):
args[i] = ops.convert_to_tensor(arg, self._signature[i].dtype)
return self._call_flat(args, self.captured_inputs)
else:
return super(WrappedFunction, self)._call_impl(
args, kwargs, cancellation_manager)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocessing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocessing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocessing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures,
base_graph=self._func_graph)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| apache-2.0 | 6,419,640,718,305,964,000 | 39.417431 | 107 | 0.689555 | false | 3.950531 | false | false | false |
autopkg/orchard-recipes | SharedProcessors/AFSAuth.py | 4 | 2219 | #! /usr/bin/env python
#
# Copyright 2016 University of Oxford
#
# Author
# Name: Gary Ballantine
# Email: gary.ballantine at it.ox.ac.uk
# GitHub: AltMeta
# Distributed under terms of the MIT license.
"""
Authorises AFS via shelling out and using a kerberos keytab
and performing aklog
"""
import os, subprocess
from autopkglib import Processor, ProcessorError
__all__ = ["AFSAuth"]
class AFSAuth(Processor):
input_variables = {
'auth_method': {
'description': 'keytab is the only option atm',
'required': False,
'default': 'keytab',
},
'aklog_path': {
'description': 'Path to aklog binary',
'required': False,
'default': '/usr/local/bin/aklog',
},
}
output_variables = {
'test': {
'description': 'for testing',
'required': False,
},
}
def gettoken(self):
keytabname = os.environ.get("KEYTABNAME", None)
principal = os.environ.get("PRINCIPAL",None)
if keytabname is None:
raise ProcessorError('Missing keytab environment variable')
self.output('Using Keytab %s with principal %s'
% (keytabname, principal), verbose_level=3)
self.output('Calling kinit ...', verbose_level=5)
try:
subprocess.call(["kinit","-t",keytabname,principal])
except Exception as kiniterror:
raise ProcessorError('Problem running kinit %s' % kiniterror)
aklog = self.env['aklog_path']
if aklog is None:
raise ProcessorError('Missing aklog_path setting')
self.output('Calling aklog %s ...' % aklog, verbose_level=5 )
try:
subprocess.call([ aklog ])
except Exception as aklogerror:
raise ProcessorError('Problem running aklog %s' % aklogerror)
def main(self):
auth_method = self.env['auth_method']
if auth_method != 'keytab':
raise ProcessorError('Unsupported authentication method: %s' % (auth_method) )
self.gettoken()
if __name__ == '__main__':
PROCESSOR = AFSAuth()
| gpl-3.0 | 1,215,392,677,082,226,000 | 27.088608 | 98 | 0.569626 | false | 3.767402 | false | false | false |
ffsdmad/test | auto_gen_module/flask2/forms.py | 1 | 2058 |
# форма для domens
class DomensForm(Form):
id = fields.TextField(u'id' , default=u'None', validators=[validators.required()])
cdate = fields.TextField(u'cdate' , default=u'None', validators=[validators.required()])
udate = fields.TextField(u'udate' , default=u'None', validators=[validators.required()])
name = fields.TextField(u'name' , default=u'None', validators=[validators.required()])
title = fields.TextField(u'title' , default=u'None', validators=[validators.required()])
counter = fields.TextAreaField (u'counter' , default=u'None', validators=[validators.required()])
description = fields.TextAreaField (u'description' , default=u'None', validators=[validators.required()])
keywords = fields.TextField(u'keywords' , default=u'None', validators=[validators.required()])
phone = fields.TextField(u'phone' , default=u'None', validators=[validators.required()])
template_path = fields.TextField(u'template_path' , default=u'None', validators=[validators.required()])
address = fields.TextField(u'address' , default=u'None', validators=[validators.required()])
# форма для users
class UsersForm(Form):
id = fields.TextField(u'id' , default=u'None', validators=[validators.required()])
first_name = fields.TextField(u'first_name' , default=u'None', validators=[validators.required()])
last_name = fields.TextField(u'last_name' , default=u'None', validators=[validators.required()])
login = fields.TextField(u'login' , default=u'None', validators=[validators.required()])
email = fields.TextField(u'email' , default=u'None', validators=[validators.required()])
password = fields.TextField(u'password' , default=u'None', validators=[validators.required()])
role = fields.TextField(u'role' , default=u'None', validators=[validators.required()])
active = fields.TextField(u'active' , default=u'None', validators=[validators.required()])
cdate = fields.TextField(u'cdate' , default=u'None', validators=[validators.required()])
| gpl-2.0 | -3,196,176,525,192,246,300 | 62.8125 | 110 | 0.704212 | false | 3.601411 | false | false | false |
batxes/4c2vhic | Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models48514.py | 4 | 13934 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((13012, 8631.32, 11090.1), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((13877, 9048.87, 10761.3), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((12113.2, 8961.57, 10062.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((10056, 8884.11, 8907.61), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((9414.66, 8846.33, 8530.96), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((9608.39, 10969.3, 8523.77), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((8060.51, 10707.2, 7411.06), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((7549.91, 12021.5, 6425.36), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6882.09, 11947.1, 4985.25), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5665.35, 11809.5, 3601.1), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((6065.3, 10648.6, 2353.08), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6710.12, 11458.7, 492.015), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((7347.21, 12348.2, -1256.37), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8371.96, 12356.3, -16.2648), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6720.45, 12319.7, -225.849), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5510.13, 11963.1, 738.339), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5387.18, 11772.2, 2205.22), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((4958.32, 11468.4, 3712.17), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((5501.74, 12814.9, 4799.53), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4487.21, 12764.1, 5971.04), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2945.66, 12866.7, 6966.53), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((1576.07, 13682.3, 7619.42), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2094.41, 12355.5, 7436.96), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2472.54, 10614, 6287.82), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((2275.44, 9200.72, 4677.96), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2110.01, 8543.95, 3872.56), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2688.2, 6479.45, 5549.21), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2346.99, 4770.64, 6172.19), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2063.94, 4918.25, 7382.58), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((898.218, 5181.26, 9371.25), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1966.32, 5287.84, 9067.58), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1619.21, 3722.08, 9071.97), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1698.9, 1985.06, 10547.8), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((3081.43, 1509.46, 10720.7), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4513.52, 1779.17, 11208.1), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5630.34, 1636.18, 12573), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((6005.41, 2413.96, 14027.9), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4718.21, 3372.32, 13784.8), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((3604.62, 2350.52, 13450.1), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((3524.31, 2154.19, 11460.5), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((3878.54, 954.22, 11318.3), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((3896.11, 2181.21, 10715.5), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((4388.44, 2667.28, 11088.9), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((3989.01, 2093.55, 10873.5), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2514.18, 2435.99, 9842.08), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2953.14, 3358.87, 7142.93), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3828.42, 2784.49, 5578.65), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((4816.17, 2393.81, 5122.71), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((5623.48, 576.744, 5103.53), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5830.56, -2063.82, 4883.1), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5146.08, -2244.87, 6415.63), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2883.08, -442.984, 6060.7), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((3395.36, -120.583, 6565.48), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3967.13, -590.962, 8337.25), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4901.43, -401.329, 9552.23), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5714.31, 1256.76, 9531.74), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | 4,177,656,246,144,539,000 | 45.758389 | 75 | 0.700014 | false | 2.625094 | false | false | false |
kambysese/mne-python | mne/datasets/brainstorm/bst_resting.py | 12 | 1477 | # Authors: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc_accept)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_resting')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetResting
- One subject
- Two runs of 10 min of resting state recordings
- Eyes open
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
*, accept=False, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_resting.tar.gz',
accept=accept)
_data_path_doc = _data_path_doc_accept.format(
name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_resting) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_resting')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_resting) dataset."""
for desc in _description.splitlines():
print(desc)
| bsd-3-clause | -2,993,587,314,765,377,000 | 30.425532 | 77 | 0.645227 | false | 3.282222 | false | false | false |
facundoq/toys | cic-fly/src/cic/ImageReader.py | 1 | 3783 | '''
Created on 14/05/2013
@author: facuq
'''
from numpy import *
from scipy import ndimage
import os;
from Utils import is_hidden
from matplotlib.image import imread
from PIL import TiffImagePlugin
def imsave(fname, arr, vmin=None, vmax=None, cmap=None, format=None,
origin=None, dpi=100):
"""
Saves a 2D :class:`numpy.array` as an image with one pixel per element.
The output formats available depend on the backend being used.
Arguments:
*fname*:
A string containing a path to a filename, or a Python file-like object.
If *format* is *None* and *fname* is a string, the output
format is deduced from the extension of the filename.
*arr*:
A 2D array.
Keyword arguments:
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* set the color scaling for the image by fixing the
values that map to the colormap color limits. If either *vmin* or *vmax*
is None, that limit is determined from the *arr* min/max value.
*cmap*:
cmap is a colors.Colormap instance, eg cm.jet.
If None, default to the rc image.cmap value.
*format*:
One of the file extensions supported by the active
backend. Most backends support png, pdf, ps, eps and svg.
*origin*
[ 'upper' | 'lower' ] Indicates where the [0,0] index of
the array is in the upper left or lower left corner of
the axes. Defaults to the rc image.origin value.
*dpi*
The DPI to store in the metadata of the file. This does not affect the
resolution of the output image.
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
figsize = [x / float(dpi) for x in (arr.shape[1], arr.shape[0])]
fig = Figure(figsize=figsize, dpi=dpi, frameon=False)
canvas = FigureCanvas(fig)
im = fig.figimage(arr, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin)
fig.savefig(fname, dpi=dpi, format=format)
class ImageReader(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def read_all(self,folder_path):
images= self.read_images(folder_path);
white=self.read_white(folder_path);
return (images,white)
def is_image(self,filepath):
path,ext= os.path.splitext(filepath)
filename=path.split('\\')[-1]
ext=ext.lower()
return filename.find("total")==-1 and filename.find("~")==-1 and not is_hidden(filepath) and ['.tif','.png'].count(ext)>0
def obtain_filenames(self,path):
def image_file(filename,filepath):
return filename.find("blanco")==-1 and self.is_image( filepath)
files = [os.path.join(path, f) for f in os.listdir(path) if image_file(f, os.path.join(path, f))]
return files
def obtain_white_filename(self,path):
def image_file(filename,filepath):
return filename.find("blanco")!=-1 and filename.find("~")==-1 and not is_hidden(filepath)
files = [os.path.join(path, f) for f in os.listdir(path) if image_file(f, os.path.join(path, f))]
if len(files)==0:
return None
else:
return files[0]
def read_image(self,path):
print path
return ndimage.rotate(imread(path),-90)[:,1:-1,:]
def read_images(self, folder_path):
files=self.obtain_filenames(folder_path)
return map(self.read_image,files )
def read_white(self, folder_path):
file_name=self.obtain_white_filename(folder_path)
if file_name==None:
return None
else:
return transpose(imread(file_name))
if __name__ == '__main__':
pass | agpl-3.0 | -4,321,829,481,087,383,600 | 33.4 | 129 | 0.618821 | false | 3.708824 | false | false | false |
cts2/rf2db | rf2db/db/RF2TransitiveChildren.py | 1 | 3835 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
""" Function to build the SNOMED CT Transitive Children file -- a helper table that
returns the number of children given a source concept and depth
"""
from rf2db.db.RF2DBConnection import RF2DBConnection
from rf2db.db.RF2FileCommon import RF2FileWrapper
from rf2db.db.RF2TransitiveClosure import TransitiveClosureDB
class TransitiveChildrenDB(RF2FileWrapper):
table = 'transitive_children'
closuredb = TransitiveClosureDB
createSTMT = ("CREATE TABLE IF NOT EXISTS %(table)s (\n"
" `parent` bigint(20) NOT NULL,\n"
" `depth` int NOT NULL,\n"
" `count` int NOT NULL DEFAULT 0,\n"
" PRIMARY KEY (parent, depth));"
)
def __init__(self, *args, **kwargs):
RF2FileWrapper.__init__(self, *args, **kwargs)
def loadTable(self, rf2file):
db = RF2DBConnection()
print("Populating transitive children table")
tcdb = self.closuredb()
if not tcdb.hascontent():
print("Error: Transitive children load requires transitive closure table")
return
tname = self._fname
tcdbname = TransitiveClosureDB.fname()
db.execute_query("""INSERT IGNORE INTO %(tname)s
SELECT DISTINCT parent, depth, 0 FROM %(tcdbname)s""" % locals())
db.commit()
print("Computing number of children")
db.execute_query("""UPDATE %(tname)s t,
(SELECT c.parent, c.depth, count(t.parent) AS dc
FROM %(tcdbname)s t, %(tname)s c
WHERE t.parent=c.parent AND t.depth<=c.depth
GROUP BY c.parent, c.depth) tc
SET t.count = tc.dc
WHERE t.parent=tc.parent AND t.depth=tc.depth""" % locals())
db.commit()
def numDescendants(self, sctid, maxDepth=0, **_):
# The following assumes that count can't increase as depth increases
query = "SELECT max(count) FROM %s WHERE parent = %s " % (self._fname, sctid)
if maxDepth:
query += " AND depth <= %s " % maxDepth
db = RF2DBConnection()
db.execute(query)
return next(db) | bsd-3-clause | -9,186,857,485,229,114,000 | 45.216867 | 87 | 0.653716 | false | 4.20966 | false | false | false |
J216/gimp_be | gimp_be/image/exif.py | 1 | 2335 | import os
import json
from time import sleep
from subprocess import Popen, PIPE
from datetime import datetime
from sys import platform
if "win" in platform:
exiftool_location="exiftool.exe"
elif "linux" in platform:
exiftool_location="exiftool"
else:
exiftool_location="exiftool.exe"
def getEXIFTags(file_name):
p = Popen([exiftool_location, '-j',file_name], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
tags=json.loads(output)[0]
p.terminate()
except:
tags="failed to load"
return tags
def setEXIFTag(file_name, tag='Comment', info='8888888-8888888-8888888-888888888888'):
cmd=exiftool_location+' -' + tag +'="'+ info.replace('"','')+'" "'+file_name+'"'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
result=(err,output,cmd)
p.terminate()
except:
result=""
if os.path.isfile(file_name+'_original'):
os.remove(file_name+'_original')
return result
def setEXIFTags(file_name, tags={"XPComment":"Test complete1!","Comment":"Test Complete2"}):
from subprocess import call
tag_string=""
for key in tags.keys():
tag_string=tag_string+' -' + key +'="'+ str(tags[key]).replace('"','')+'"'
cmd=exiftool_location+tag_string+' "'+file_name+'"'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
try:
output, err = p.communicate(b"")
result=(err,output,cmd)
p.terminate()
except:
result=""
if os.path.isfile(file_name+'_original'):
os.remove(file_name+'_original')
return result
def increaseRating(file_name):
t=getEXIFTags(file_name)
if "Rating" in t.keys():
if not str(t['Rating'])=="5":
r=str(int(t['Rating'])+1)
return setEXIFTag(file_name,"Rating",r)
else:
return setEXIFTag(file_name,"Rating","5")
def decreaseRating(file_name):
t=getEXIFTags(file_name)
if "Rating" in t.keys():
if not str(t['Rating'])=="0":
r=str(int(t['Rating'])-1)
return setEXIFTag(file_name,"Rating",r)
else:
return setEXIFTag(file_name,"Rating","0")
sleep(1)
if os.path.isfile(fn+'_original'):
os.remove(fn+'_original')
| mit | 7,110,081,704,028,435,000 | 30.133333 | 100 | 0.610278 | false | 3.284107 | false | false | false |
akbertram/appengine-mapreduce | python/test/mapreduce/operation/counters_test.py | 4 | 1244 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testlib import mox
import unittest
from mapreduce import context
from mapreduce import operation as op
class IncrementTest(unittest.TestCase):
"""Test Increment operation."""
def testIncrement(self):
"""Test applying Increment operation."""
m = mox.Mox()
ctx = context.Context(None, None)
ctx.counters = m.CreateMock(context.Counters)
operation = op.counters.Increment("test", 12)
# Record calls
ctx.counters.increment("test", 12)
m.ReplayAll()
try: # test, verify
operation(ctx)
m.VerifyAll()
finally:
m.UnsetStubs()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -271,654,941,731,302,980 | 23.392157 | 74 | 0.70418 | false | 3.92429 | true | false | false |
briansan/rams | RAMS/comm/JoystickClientInterface.py | 1 | 1996 | """Client Interface for Pygame.
Pygame used to parse keyboard and mouse commands as well as display
images in a window
"""
from PygameClientInterface import PygameClientInterface
class JoystickClientInterface(PygameClientInterface):
"""
__init__ function -> setup XMLRPC as well as pygame
"""
def __init__(self, server_uri, update_image=True, joystick_id=0,
has_control=True):
import pygame
pygame.joystick.init()
self.__joystick = pygame.joystick.Joystick(joystick_id)
self.__joystick.init()
self.__velocity_factor = 1.
self.__has_control = has_control
# Use base class to setup XMLRPC server
PygameClientInterface.__init__(self, server_uri, update_image)
def hasControl(self):
"""Return True if user wants to send control commands.
Tab key and shift-tab toggle this.
"""
# Check if control flag should be toggled.
import pygame
if pygame.key.get_pressed()[pygame.K_TAB]:
self.__has_control = not (
pygame.key.get_mods() & pygame.KMOD_LSHIFT)
if self.__has_control:
print('Take control')
else:
print('Release control')
return self.__has_control
def drive(self):
pitch = -self.__joystick.get_axis(1) * self.__velocity_factor
yaw = -self.__joystick.get_axis(2) * self.__velocity_factor
self._proxy.setVel(0, pitch, 0)
self._proxy.setRot(yaw)
def processClients(self):
exclusive_control = self.hasControl()
if exclusive_control:
self.drive()
self.setWaypoint()
return PygameClientInterface.processClients(
self, exclusive_control=exclusive_control)
def setWaypoint(self):
"""Process waypoint setting functions."""
import pygame
if pygame.key.get_pressed()[pygame.K_y]:
self._proxy.setWayPoint()
| bsd-3-clause | 1,351,942,430,450,730,000 | 26.722222 | 70 | 0.60521 | false | 4.098563 | false | false | false |
drocco007/vox_commands | archive/java_python.py | 1 | 1953 | from dragonfly import (Grammar, AppContext, MappingRule, Dictation,
Key, Text, FocusWindow, IntegerRef, Function)
#---------------------------------------------------------------------------
# Create this module's grammar and the context under which it'll be active.
context = AppContext(executable='java', title='py')
grammar = Grammar('pycharm Python commands', context=context)
#---------------------------------------------------------------------------
# Create a mapping rule which maps things you can say to actions.
#
# Note the relationship between the *mapping* and *extras* keyword
# arguments. The extras is a list of Dragonfly elements which are
# available to be used in the specs of the mapping. In this example
# the Dictation("text")* extra makes it possible to use "<text>"
# within a mapping spec and "%(text)s" within the associated action.
example_rule = MappingRule(
name='pycharm Python commands',
mapping={
'Document comment': Text('"""') + Key('enter'),
'dunder <text>': Text('__%(text)s__'),
'defun': Text('def') + Key('tab'),
'Set trace': Text('import pdb; pdb.set_trace()\n'),
'for <text> in <text2>': Text('for %(text)s in %(text2)s:') + Key('enter'),
'for <text> in X range <n>': Text('for %(text)s in xrange(%(n)d:') + Key('enter')
},
extras=[ # Special elements in the specs of the mapping.
Dictation("text"),
Dictation("text2"),
IntegerRef("n", 1, 10000), # Times to repeat the sequence.
],
)
# Add the action rule to the grammar instance.
grammar.add_rule(example_rule)
#---------------------------------------------------------------------------
# Load the grammar instance and define how to unload it.
grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
| mit | 5,275,517,119,165,565,000 | 35.849057 | 89 | 0.570405 | false | 4.120253 | false | false | false |
Telefonica/toolium-examples | web/pageobjects/tables.py | 1 | 1474 | # -*- coding: utf-8 -*-
u"""
Copyright 2016 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from selenium.webdriver.common.by import By
from toolium.pageelements import *
from toolium.pageobjects.page_object import PageObject
class Row(Group):
def init_page_elements(self):
self.last_name = Text(By.XPATH, './td[1]')
self.first_name = Text(By.XPATH, './td[2]')
self.email = Text(By.XPATH, './td[3]')
self.due = Text(By.XPATH, './td[4]')
self.web = Text(By.XPATH, './td[5]')
self.edit = Link(By.XPATH, './/a[1]')
self.delete = Link(By.XPATH, './/a[2]')
class Table(Group):
def init_page_elements(self):
self.rows = PageElements(By.XPATH, './tbody/tr', page_element_class=Row)
class TablesPageObject(PageObject):
def init_page_elements(self):
self.table1 = Table(By.ID, 'table1')
self.table2 = Table(By.ID, 'table2')
| apache-2.0 | 6,622,172,321,602,832,000 | 32.454545 | 80 | 0.684103 | false | 3.330317 | false | false | false |
paul-voyles/femsim-hrmc | potentials/reformat_potential.py | 1 | 1506 | import sys, os, re
def reformat(f):
of = 'reformatted/'+f
with open(f, 'r') as input, open(of, 'w') as output:
line = input.readline() # Comment 1
output.write(line)
line = input.readline() # Comment 2
output.write(line)
line = input.readline() # Comment 3
output.write(line)
line = input.readline() # Number of elements and elements
output.write(line)
sline = line.strip().split()
while '' in sline:
sline.remove('')
nelements = int(sline[0])
line = input.readline() # nrho drho nr dr cutoff
output.write(line)
sline = line.strip().split()
while '' in sline:
sline.remove('')
nrho = int(sline[0])
drho = float(sline[1])
nr = int(sline[2])
dr = float(sline[3])
cutoff = float(sline[4])
line = 'holder'
while line != []:
line = input.readline().strip().split()
while '' in line:
line.remove('')
try:
[float(x) for x in line]
for num in line:
output.write(num+'\n')
except:
output.write(' '.join(line) + '\n')
def main():
f = sys.argv[1]
if not os.path.exists(os.path.split(os.path.abspath(f))[0] + '/reformatted'):
os.makedirs(os.path.split(os.path.abspath(f))[0] + '/reformatted')
reformat(f)
if __name__ == '__main__':
main()
| mit | -8,091,060,057,435,095,000 | 28.529412 | 81 | 0.498008 | false | 3.628916 | false | false | false |
seppovic/check_mk-plugins | notify-via-isms/web/plugins/wato/notify-via-isms_notifications.py | 1 | 2644 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
register_notification_parameters("isms.pl",
Dictionary(
optional_keys = ["splitmax", "timeout", "debug"],
elements = [
( "ismsserver",
TextAscii(
title = _("isms-Server"),
help = _("IP or Hostname of the isms Server")
),
),
( "user",
TextAscii(
title = _("Username"),
help = _("Username used to connect to the isms Server")
),
),
( "password",
TextAscii(
title = _("Password"),
help = _("Password used to connect to the isms Server"),
default_value = ""
),
),
( "host_message",
TextAreaUnicode(
title = _("Message for notifications regarding a host"),
help = _("Here you are allowed to use all macros that are defined in the "
"notification context."),
rows = 9,
cols = 58,
monospaced = True,
default_value = """
$NOTIFICATIONTYPE$ $HOSTNAME$> is $HOSTSTATE$ /$SHORTDATETIME$/ $HOSTOUTPUT$
""",
),
),
( "service_message",
TextAreaUnicode(
title = _("Message for notifications regarding a service"),
help = _("Here you are allowed to use all macros that are defined in the "
"notification context."),
rows = 9,
cols = 58,
monospaced = True,
default_value = """
Nagios Alert Type: $NOTIFICATIONTYPE$
Host: $HOSTNAME$
Service: $SERVICEDESC$
Info: $SERVICEOUTPUT$
""",
),
),
( "timeout",
Integer(
title = _("Timeout"),
help = _("Timeout in seconds"),
default_value = 10
),
),
( "splitmax",
Integer(
title = _("Max. Messages to send for one Notification."),
help = _("Split message into 160 character pieces up to X msgs, 0 means no limitation."),
default_value = 1
),
),
( "debug",
FixedValue(
True,
title = _("debug"),
totext = _("debug messages are printed to ~/var/log/notify.log"),
)
),
])
)
| gpl-2.0 | -1,686,057,100,508,082,200 | 32.897436 | 107 | 0.417549 | false | 5.194499 | false | false | false |
skuda/client-python | kubernetes/client/models/v1_tcp_socket_action.py | 1 | 3244 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1TCPSocketAction(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, port=None):
"""
V1TCPSocketAction - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'port': 'IntstrIntOrString'
}
self.attribute_map = {
'port': 'port'
}
self._port = port
@property
def port(self):
"""
Gets the port of this V1TCPSocketAction.
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:return: The port of this V1TCPSocketAction.
:rtype: IntstrIntOrString
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this V1TCPSocketAction.
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param port: The port of this V1TCPSocketAction.
:type: IntstrIntOrString
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`")
self._port = port
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 573,503,810,028,661,440 | 26.965517 | 133 | 0.536067 | false | 4.336898 | false | false | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/menuinst/__init__.py | 4 | 2394 | # Copyright (c) 2008-2011 by Enthought, Inc.
# Copyright (c) 2013-2015 Continuum Analytics, Inc.
# All rights reserved.
from __future__ import absolute_import
import logging
import sys
import json
from os.path import abspath, basename, exists, join
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
if sys.platform.startswith('linux'):
from .linux import Menu, ShortCut
elif sys.platform == 'darwin':
from .darwin import Menu, ShortCut
elif sys.platform == 'win32':
from .win32 import Menu, ShortCut
from .win_elevate import isUserAdmin, runAsAdmin
def _install(path, remove=False, prefix=sys.prefix, mode=None):
if abspath(prefix) == abspath(sys.prefix):
env_name = None
else:
env_name = basename(prefix)
data = json.load(open(path))
try:
menu_name = data['menu_name']
except KeyError:
menu_name = 'Python-%d.%d' % sys.version_info[:2]
shortcuts = data['menu_items']
m = Menu(menu_name, prefix=prefix, env_name=env_name, mode=mode)
if remove:
for sc in shortcuts:
ShortCut(m, sc).remove()
m.remove()
else:
m.create()
for sc in shortcuts:
ShortCut(m, sc).create()
def install(path, remove=False, prefix=sys.prefix, recursing=False):
"""
install Menu and shortcuts
"""
# this sys.prefix is intentional. We want to reflect the state of the root installation.
if sys.platform == 'win32' and not exists(join(sys.prefix, '.nonadmin')):
if isUserAdmin():
_install(path, remove, prefix, mode='system')
else:
from pywintypes import error
try:
if not recursing:
retcode = runAsAdmin([join(sys.prefix, 'python'), '-c',
"import menuinst; menuinst.install(%r, %r, %r, %r)" % (
path, bool(remove), prefix, True)])
else:
retcode = 1
except error:
retcode = 1
if retcode != 0:
logging.warn("Insufficient permissions to write menu folder. "
"Falling back to user location")
_install(path, remove, prefix, mode='user')
else:
_install(path, remove, prefix, mode='user')
| apache-2.0 | -2,998,005,029,663,971,300 | 30.5 | 97 | 0.576859 | false | 3.950495 | false | false | false |
schwehr/gdal-autotest2 | python/gcore/asyncreader_test.py | 1 | 3937 | #!/usr/bin/env python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a complete rewrite of a file licensed as follows:
#
# Copyright (c) 2010, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Test async reader.
Rewrite of
http://trac.osgeo.org/gdal/browser/trunk/autotest/gcore/asyncreader.py
"""
import contextlib
import unittest
from osgeo import gdal
import unittest
from autotest2.gcore import gcore_util
from autotest2.gdrivers import gdrivers_util
@contextlib.contextmanager
def AsyncReader(src, xoff, yoff, xsize, ysize, buf=None, buf_xsize=None,
buf_ysize=None, buf_type=None, band_list=None, options=None):
options = options or []
asyncreader = src.BeginAsyncReader(xoff, yoff, xsize, ysize, buf, buf_xsize,
buf_ysize, buf_type, band_list, options)
yield asyncreader
src.EndAsyncReader(asyncreader)
class AsyncReaderTest(unittest.TestCase):
def testAsyncReader(self):
filepath = gcore_util.GetTestFilePath('rgbsmall.tif')
src = gdal.Open(filepath)
x_size = src.RasterXSize
y_size = src.RasterYSize
bands = src.RasterCount
asyncreader = src.BeginAsyncReader(0, 0, src.RasterXSize, src.RasterYSize)
buf = asyncreader.GetBuffer()
self.assertEqual(asyncreader.GetNextUpdatedRegion(0),
[gdal.GARIO_COMPLETE, 0, 0, x_size, y_size])
src.EndAsyncReader(asyncreader)
expected = [src.GetRasterBand(i).Checksum() for i in range(1, bands + 1)]
asyncreader = None
src = None
drv = gdal.GetDriverByName(gdrivers_util.GTIFF_DRIVER)
dst = drv.Create('/vsimem/asyncresult.tif', x_size, y_size, bands)
dst.WriteRaster(0, 0, x_size, y_size, buf)
checksum = [dst.GetRasterBand(i).Checksum() for i in range(1, bands + 1)]
dst = None
gdal.Unlink('/vsimem/asyncresult.tif')
self.assertEqual(checksum, expected)
def testAsyncReaderContextManager(self):
filepath = gcore_util.GetTestFilePath('rgbsmall.tif')
src = gdal.Open(filepath)
x_size = src.RasterXSize
y_size = src.RasterYSize
with AsyncReader(src, 0, 0, x_size, y_size) as asyncreader:
self.assertEqual(asyncreader.GetNextUpdatedRegion(0),
[gdal.GARIO_COMPLETE, 0, 0, x_size, y_size])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,296,637,170,337,337,900 | 35.453704 | 81 | 0.723139 | false | 3.621895 | true | false | false |
ActiveState/code | recipes/Python/578817_lrutimestamp__cache_entry_aging/recipe-578817.py | 1 | 3016 | #!/usr/bin/python3
""" Test script for lru_timestamp function.
usage: lru.py [-h] [-r REFRESH] [-s SLEEP]
optional arguments:
-h, --help show this help message and exit
-r REFRESH, --refresh REFRESH
refresh interval (default 60 min)
-s SLEEP, --sleep SLEEP
sleep interval (default 10 min)
"""
import argparse
import datetime
import functools
import random
import time
def lru_timestamp(refresh_interval=60):
""" Return a timestamp string for @lru_cache decorated functions.
The returned timestamp is used as the value of an extra parameter
to @lru_cache decorated functions, allowing for more control over
how often cache entries are refreshed. The lru_timestamp function
should be called with the same refresh_interval value for a given
@lru_cache decorated function. The returned timestamp is for the
benefit of the @lru_cache decorator and is normally not used by
the decorated function.
Positional arguments:
refresh_interval -- in minutes (default 60), values less than 1
are coerced to 1, values more than 1440 are
coerced to 1440
"""
if not isinstance(refresh_interval, int):
raise TypeError('refresh_interval must be an int from 1-1440')
dt = datetime.datetime.now()
if refresh_interval > 60:
refresh_interval = min(refresh_interval, 60*24)
fmt = '%Y%m%d'
minutes = dt.hour * 60
else:
refresh_interval = max(1, refresh_interval)
fmt = '%Y%m%d%H'
minutes = dt.minute
ts = dt.strftime(fmt)
age = minutes // refresh_interval
return '{0}:{1:d}'.format(ts, age)
@functools.lru_cache()
def calulate(x, y, timestamp):
""" Return random int for testing lru_timestamp function."""
print('performing calculation (not from cache), timestamp:', timestamp)
return random.randint(x, y)
def init():
""" Return parsed command line args."""
random.seed()
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('-r', '--refresh', type=int, dest='refresh',
default=60, help='refresh interval (default 60 min)')
parser.add_argument('-s', '--sleep', type=int, dest='sleep', default=10,
help='sleep interval (default 10 min)')
return parser.parse_args()
def main():
""" Script main."""
args = init()
print('refresh interval (min):', args.refresh)
print('sleep interval (min):', args.sleep)
print()
refresh = args.refresh
doze = args.sleep * 60
#num = calulate(1, 1000, lru_timestamp('junk'))
#num = calulate(1, 1000, lru_timestamp(1.22))
#num = calulate(1, 1000, lru_timestamp(-1))
#num = calulate(1, 1000, lru_timestamp(2000))
while True:
num = calulate(1, 1000, lru_timestamp(refresh))
print('calculation returned', num)
time.sleep(doze)
if __name__ == '__main__':
main()
| mit | 5,343,727,916,261,897,000 | 28 | 77 | 0.627984 | false | 3.891613 | false | false | false |
jondelmil/aiovectortiler | aiovectortiler/config_handler.py | 1 | 3044 | import yaml
import logging
logger = logging.getLogger(__name__)
class Configs:
server = None
recipes = {}
DB = None
plugins = None
@classmethod
def init_server_configs(cls, server_configs):
with open(server_configs) as s_c:
cls.server = yaml.load(s_c.read())
@classmethod
def init_layer_recipes(cls, recipe_configs):
recipe_name = recipe_configs.split('/')[-1]
if recipe_name[-4:] == '.yml':
recipe_name = recipe_name[:-4]
elif recipe_name[-5:] == '.yaml':
recipe_name = recipe_name[:-5]
else:
raise ValueError('File in layer recipes folder does not have a YAML extension: {0}'.format(recipe_configs))
with open(recipe_configs) as r_c:
load_recipe = yaml.load(r_c.read())
cls.recipes[recipe_name] = Recipe(load_recipe)
# add the recipe name based on the file name
# this is needed by the tilejson query
cls.recipes[recipe_name].name = recipe_name
logger.info('Adding layer: {0}'.format(recipe_name))
'''
Plugins.load()
Plugins.hook('before_load', config=Configs)
def load_recipe(data):
name = data.get('name', 'default')
if name in RECIPES:
raise ValueError('Recipe with name {} already exist'.format(name))
data['name'] = name
RECIPES[name] = Recipe(data)
if len(RECIPES) == 1 and name != 'default':
RECIPES['default'] = RECIPES[data['name']]
for recipe in Configs.layers:
with Path(recipe).open() as f:
load_recipe(yaml.load(f.read()))
Plugins.hook('load', config=config, recipes=RECIPES)
'''
# the following model structures for recipes / layers / queries allows searching up the chain
# for attributes. If not found in the root recipes level then it will check the server configs.
class Recipe(dict):
def __init__(self, data):
super().__init__(data)
self.load_layers(data['layers'])
def load_layers(self, layers):
self.layers = {}
for layer in layers:
self.layers[layer['name']] = Layer(self, layer)
def __getattr__(self, attr):
return self.get(attr, Configs.server.get(attr, None))
class Layer(dict):
def __init__(self, recipe, layer_data):
self.recipe = recipe
super().__init__(layer_data)
self.load_queries(layer_data['queries'])
def load_queries(self, queries):
self.queries = []
for query in queries:
self.queries.append(Query(self, query))
def __getattr__(self, attr):
return self.get(attr, getattr(self.recipe, attr))
@property
def id(self):
return '{0}:{1}'.format(self.recipe.name, self.name)
@property
def description(self):
return self.get('description', 'no description provided')
class Query(dict):
def __init__(self, layer, data):
self.layer = layer
super().__init__(data)
def __getattr__(self, attr):
return self.get(attr, getattr(self.layer, attr))
| mit | 1,751,416,291,269,608,000 | 27.716981 | 119 | 0.605125 | false | 3.744157 | true | false | false |
simpletrain/pybingwallpaper | src/winsetter.py | 1 | 3113 | #!/usr/bin/env python3
import log
import sys
import subprocess
from importlib import import_module
from setter import *
from os.path import dirname, splitext
if sys.platform == 'win32':
winreg = import_module('winreg')
Image = import_module('PIL.Image')
win32gui = import_module('win32.win32gui')
def convert_photo_to_bmp(inpath, outpath):
if splitext(inpath)[1] == '.bmp':
return
Image.open(inpath).save(outpath)
SPI_SETDESKWALLPAPER = 0x0014
class Win32WallpaperSetter(WallpaperSetter):
KEY = winreg.HKEY_CURRENT_USER
SUB_KEY = 'Control Panel\\Desktop'
VALUE_NAME = 'Wallpaper'
BACKUP = True
def _read_value(self, k, valuename = None):
if not valuename: valuename = self.VALUE_NAME
try:
value = winreg.QueryValueEx(k, valuename)
if value[1] != winreg.REG_SZ:
self._logger.fatal('cannot handle non-REG_SZ value %s', value)
return None
except:
self._logger.warn('error encountered during reading value %s', valuename, exc_info=1)
return None
self._logger.debug('read {} from {} get {}'.format(valuename, k, value))
return value
def _set_value(self, k, v, valuename = None):
if not valuename: valuename = self.VALUE_NAME
self._logger.debug('set %s\\%s\\%s to %s', self.KEY, self.SUB_KEY, valuename, v)
try:
winreg.SetValueEx(k, valuename, 0, winreg.REG_SZ, v)
except:
self._logger.error('error encountered during setting value %s', valuename, exc_info=1)
return False
self._logger.debug('set {} of {} to {} succeeds'.format(valuename, k, v))
return True
def set(self, path, args):
k = None
inpath = path.replace('/', '\\')
path = "{}\\wallpaper.bmp".format(dirname(inpath))
# windows only supports BMP, convert before setting
try:
convert_photo_to_bmp(inpath, path)
except Exception as ex:
self._logger.exception(ex)
return False
try:
k = winreg.OpenKey(self.KEY, self.SUB_KEY, 0, winreg.KEY_READ|winreg.KEY_SET_VALUE)
lastvalue = self._read_value(k)
if lastvalue and self.BACKUP:
ret = self._set_value(k, lastvalue[0], self.VALUE_NAME+'Backup')
self._set_value(k, '0', 'TileWallpaper')
self._set_value(k, '10', 'WallpaperStyle')
win32gui.SystemParametersInfo(SPI_SETDESKWALLPAPER, path, 1+2)
except Exception as ex:
ret = False
self._logger.exception(ex)
finally:
if k: k.Close()
return ret
register('win', Win32WallpaperSetter)
if __name__ == '__main__':
log.setDebugLevel(log.DEBUG)
setter = Win32WallpaperSetter()
setter.set(r'w.jpg', None)
| mit | 5,449,361,964,481,104,000 | 36.963415 | 102 | 0.551879 | false | 3.852723 | false | false | false |
will-Do/avocado-vt | selftests/unit/test_utils_misc.py | 1 | 7838 | #!/usr/bin/python
import os
import tempfile
import unittest
import sys
from avocado.utils import process
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest.unittest_utils import mock
from virttest import utils_misc
from virttest import cartesian_config
from virttest import build_helper
class TestUtilsMisc(unittest.TestCase):
def test_cpu_vendor_intel(self):
cpu_info = """processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 58
model name : Intel(R) Core(TM) i7-3770 CPU @ 3.40GHz
"""
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'GenuineIntel')
def test_cpu_vendor_amd(self):
cpu_info = """processor : 3
vendor_id : AuthenticAMD
cpu family : 21
model : 16
model name : AMD A10-5800K APU with Radeon(tm) HD Graphics
"""
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'AuthenticAMD')
def test_vendor_unknown(self):
cpu_info = "this is an unknown cpu"
vendor = utils_misc.get_cpu_vendor(cpu_info, False)
self.assertEqual(vendor, 'unknown')
def test_get_archive_tarball_name(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
'tmp-archive',
'bz2')
self.assertEqual(tarball_name, 'tmp-archive.tar.bz2')
def test_get_archive_tarball_name_absolute(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
'/var/tmp/tmp',
'bz2')
self.assertEqual(tarball_name, '/var/tmp/tmp.tar.bz2')
def test_get_archive_tarball_name_from_dir(self):
tarball_name = utils_misc.get_archive_tarball_name('/tmp',
None,
'bz2')
self.assertEqual(tarball_name, 'tmp.tar.bz2')
def test_git_repo_param_helper(self):
config = """git_repo_foo_uri = git://git.foo.org/foo.git
git_repo_foo_branch = next
git_repo_foo_lbranch = local
git_repo_foo_commit = bc732ad8b2ed8be52160b893735417b43a1e91a8
"""
config_parser = cartesian_config.Parser()
config_parser.parse_string(config)
params = config_parser.get_dicts().next()
h = build_helper.GitRepoParamHelper(params, 'foo', '/tmp/foo')
self.assertEqual(h.name, 'foo')
self.assertEqual(h.branch, 'next')
self.assertEqual(h.lbranch, 'local')
self.assertEqual(h.commit, 'bc732ad8b2ed8be52160b893735417b43a1e91a8')
def test_normalize_data_size(self):
n1 = utils_misc.normalize_data_size("12M")
n2 = utils_misc.normalize_data_size("1024M", "G")
n3 = utils_misc.normalize_data_size("1024M", "T")
n4 = utils_misc.normalize_data_size("1000M", "G", 1000)
n5 = utils_misc.normalize_data_size("1T", "G", 1000)
n6 = utils_misc.normalize_data_size("1T", "M")
self.assertEqual(n1, "12.0")
self.assertEqual(n2, "1.0")
self.assertEqual(n3, "0.0009765625")
self.assertEqual(n4, "1.0")
self.assertEqual(n5, "1000.0")
self.assertEqual(n6, "1048576.0")
class FakeCmd(object):
def __init__(self, cmd):
self.fake_cmds = [
{"cmd": "numactl --hardware",
"stdout": """
available: 1 nodes (0)
node 0 cpus: 0 1 2 3 4 5 6 7
node 0 size: 18431 MB
node 0 free: 17186 MB
node distances:
node 0
0: 10
"""},
{"cmd": "ps -eLf | awk '{print $4}'",
"stdout": """
1230
1231
1232
1233
1234
1235
1236
1237
"""},
{"cmd": "taskset -cp 0 1230", "stdout": ""},
{"cmd": "taskset -cp 1 1231", "stdout": ""},
{"cmd": "taskset -cp 2 1232", "stdout": ""},
{"cmd": "taskset -cp 3 1233", "stdout": ""},
{"cmd": "taskset -cp 4 1234", "stdout": ""},
{"cmd": "taskset -cp 5 1235", "stdout": ""},
{"cmd": "taskset -cp 6 1236", "stdout": ""},
{"cmd": "taskset -cp 7 1237", "stdout": ""},
]
self.stdout = self.get_stdout(cmd)
def get_stdout(self, cmd):
for fake_cmd in self.fake_cmds:
if fake_cmd['cmd'] == cmd:
return fake_cmd['stdout']
raise ValueError("Could not locate locate '%s' on fake cmd db" % cmd)
def utils_run(cmd, shell=True):
return FakeCmd(cmd)
all_nodes_contents = "0\n"
online_nodes_contents = "0\n"
class TestNumaNode(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god(ut=self)
self.god.stub_with(process, 'run', utils_run)
all_nodes = tempfile.NamedTemporaryFile(delete=False)
all_nodes.write(all_nodes_contents)
all_nodes.close()
online_nodes = tempfile.NamedTemporaryFile(delete=False)
online_nodes.write(online_nodes_contents)
online_nodes.close()
self.all_nodes_path = all_nodes.name
self.online_nodes_path = online_nodes.name
self.numa_node = utils_misc.NumaNode(-1,
self.all_nodes_path,
self.online_nodes_path)
def test_get_node_cpus(self):
self.assertEqual(self.numa_node.get_node_cpus(0), '0 1 2 3 4 5 6 7')
def test_pin_cpu(self):
self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
self.assertEqual(self.numa_node.dict["0"], ["1230"])
self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
self.assertEqual(self.numa_node.dict["1"], ["1231"])
self.assertEqual(self.numa_node.pin_cpu("1232"), "2")
self.assertEqual(self.numa_node.dict["2"], ["1232"])
self.assertEqual(self.numa_node.pin_cpu("1233"), "3")
self.assertEqual(self.numa_node.dict["3"], ["1233"])
self.assertEqual(self.numa_node.pin_cpu("1234"), "4")
self.assertEqual(self.numa_node.dict["4"], ["1234"])
self.assertEqual(self.numa_node.pin_cpu("1235"), "5")
self.assertEqual(self.numa_node.dict["5"], ["1235"])
self.assertEqual(self.numa_node.pin_cpu("1236"), "6")
self.assertEqual(self.numa_node.dict["6"], ["1236"])
self.assertEqual(self.numa_node.pin_cpu("1237"), "7")
self.assertEqual(self.numa_node.dict["7"], ["1237"])
self.assertTrue("free" not in self.numa_node.dict.values())
def test_free_cpu(self):
self.assertEqual(self.numa_node.pin_cpu("1230"), "0")
self.assertEqual(self.numa_node.dict["0"], ["1230"])
self.assertEqual(self.numa_node.pin_cpu("1231"), "1")
self.assertEqual(self.numa_node.dict["1"], ["1231"])
self.numa_node.free_cpu("0")
self.assertEqual(self.numa_node.dict["0"], [])
self.assertEqual(self.numa_node.dict["1"], ["1231"])
def test_bitlist_to_string(self):
string = 'foo'
bitlist = [0, 1, 1, 0, 0, 1, 1, 0, 0, 1,
1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1]
self.assertEqual(utils_misc.string_to_bitlist(string), bitlist)
def test_string_to_bitlist(self):
bitlist = [0, 1, 1, 0, 0, 0, 1, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0]
string = 'bar'
self.assertEqual(utils_misc.bitlist_to_string(bitlist), string)
def tearDown(self):
self.god.unstub_all()
os.unlink(self.all_nodes_path)
os.unlink(self.online_nodes_path)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,594,863,467,533,439,000 | 33.528634 | 78 | 0.567237 | false | 3.28362 | true | false | false |
CWSL/access-om | test/exp_test_helper.py | 1 | 13583 |
from __future__ import print_function
import subprocess as sp
import sys
import stat
import shutil
import re
import os
import sys
import glob
import time
import yaml
from util import wait_for_qsub, get_git_hash
class ExpTestHelper(object):
def __init__(self, exp_name, bin_path=None):
self.exp_name = exp_name
self.res = exp_name.split('deg')[0].split('_')[-1] + 'deg'
self.my_path = os.path.dirname(os.path.realpath(__file__))
self.lab_path = os.path.realpath(os.path.join(self.my_path, '../'))
if not bin_path:
self.bin_path = os.path.join(self.lab_path, 'bin')
else:
self.bin_path = bin_path
self.control_path = os.path.join(self.lab_path, 'control')
self.exp_path = os.path.join(self.control_path, exp_name)
self.payu_config = os.path.join(self.exp_path, 'config.yaml')
self.accessom2_config = os.path.join(self.exp_path, 'accessom2.nml')
self.ocean_config = os.path.join(self.exp_path, 'ocean', 'input.nml')
self.archive = os.path.join(self.lab_path, 'archive', exp_name)
self.output000 = os.path.join(self.archive, 'output000')
self.output001 = os.path.join(self.archive, 'output001')
self.accessom2_out_000 = os.path.join(self.output000, 'access-om2.out')
self.accessom2_out_001 = os.path.join(self.output001, 'access-om2.out')
self.src = os.path.join(self.lab_path, 'src')
self.libaccessom2_src = os.path.join(self.src, 'libaccessom2')
self.mom_src = os.path.join(self.src, 'mom')
self.cice_src = os.path.join(self.src, 'cice5')
self.yatm_exe = None
self.mom_exe = None
self.cice_exe = None
self.input_path = '/short/public/access-om2/input_rc'
self.mom_input = os.path.join(self.input_path, 'mom_' + self.res)
self.cice_input = os.path.join(self.input_path, 'cice_' + self.res)
if not os.path.exists(self.bin_path):
os.mkdir(self.bin_path)
def has_run(self):
"""
See wether this experiment has been run.
"""
return os.path.exists(os.path.join(self.output000, 'access-om2.out'))
def make_paths(self, exp_name, run_num=0):
paths = {}
run_num = str(run_num).zfill(3)
paths['archive_link'] = os.path.join(paths['exp'], 'archive')
paths['output'] = os.path.join(paths['archive'], 'output' + run_num)
paths['restart'] = os.path.join(paths['archive'], 'restart' + run_num)
paths['stdout'] = os.path.join(paths['output'], 'access.out')
paths['stderr'] = os.path.join(paths['output'], 'access.err')
paths['stdout_runtime'] = os.path.join(paths['exp'], 'access.out')
paths['stderr_runtime'] = os.path.join(paths['exp'], 'access.err')
return paths
def print_output(self, files):
for file in files:
if file is not None:
if os.path.exists(file):
with open(file, 'r') as f:
print(f.read())
def get_most_recent_run_num(self, archive_path):
"""
Look in the archive directory to find which build this is.
"""
dirs = glob.glob(archive_path + '/output*')
dirs.sort()
return int(dirs[-1][-3:])
def setup_for_programmatic_run(self, exes):
"""
Various config.yaml settings need to be modified in order to run in the
test environment.
"""
yatm_exe, cice_exe, mom_exe = exes
with open(self.payu_config) as f:
doc = yaml.load(f)
doc['submodels'][0]['exe'] = yatm_exe
doc['submodels'][1]['exe'] = mom_exe
doc['submodels'][2]['exe'] = cice_exe
doc['runlog'] = False
with open(self.payu_config, 'w') as f:
yaml.dump(doc, f)
def do_basic_access_run(self, exp, model='cm'):
paths = self.make_paths(exp)
ret, qso, qse, qsub_files = self.run(paths['exp'], self.lab_path)
if ret != 0:
self.print_output([qso, qse,
paths['stdout_runtime'],
paths['stderr_runtime']])
fstring = 'Run {} failed with code {}.'
print(fstring.format(exp, ret), file=sys.stderr)
assert(ret == 0)
run_num = self.get_most_recent_run_num(paths['archive'])
paths = self.make_paths(exp, run_num)
# Model output should exist.
assert(os.path.exists(paths['output']))
assert(os.path.exists(paths['restart']))
assert(os.path.exists(paths['stdout']))
assert(os.path.exists(paths['stderr']))
with open(paths['stdout'], 'r') as f:
s = f.read()
assert('MOM4: --- completed ---' in s)
if model == 'om':
assert('********** End of MATM **********' in s)
def copy_to_bin(self, src_dir, wildcard, libaccessom2_src=None):
exes = glob.glob(wildcard)
if len(exes) != 1:
print("Error: copy_to_bin can't find one {}".format(wildcard), file=sys.stderr)
return None, 1
exe = exes[0]
ghash = get_git_hash(src_dir)
if libaccessom2_src:
libaccessom2_hash = get_git_hash(libaccessom2_src)
else:
libaccessom2_hash = None
eb = os.path.basename(exe)
if libaccessom2_hash:
new_name = '{}_{}_libaccessom2_{}.{}'.format(eb.split('.')[0], ghash,
libaccessom2_hash, eb.split('.')[1])
else:
new_name = '{}_{}.{}'.format(eb.split('.')[0], ghash,
eb.split('.')[1])
dest = os.path.join(self.bin_path, new_name)
if os.path.exists(dest):
os.remove(dest)
shutil.copy(exe, dest)
shutil.chown(dest, group='ik11')
perms = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR \
| stat.S_IXGRP | stat.S_IXOTH
os.chmod(dest, perms)
return dest, 0
def build_libaccessom2(self, clean=False):
"""
Note: the 'clean' arg does nothing.
"""
r1 = sp.call([os.path.join(self.libaccessom2_src, 'build_on_gadi.sh')])
exename, r2 = self.copy_to_bin(self.libaccessom2_src,
self.libaccessom2_src + '/build/bin/yatm.exe')
return exename, r1 + r2
def build_cice(self, clean=False):
os.environ['ACCESS_OM_DIR'] = os.path.join(self.lab_path)
os.environ['LIBACCESSOM2_ROOT'] = os.path.join(self.libaccessom2_src)
if clean:
r1 = sp.call(['make', '-C', self.cice_src, 'clean'])
r1 = sp.call(['make', '-C', self.cice_src, self.res])
if self.res == '025deg':
exe_res = '1440x1080'
elif self.res == '01deg':
exe_res = '3600x2700'
elif self.res == '1deg':
exe_res = '360x300'
else:
assert False
build_dir_wildcard = self.cice_src + '/build_*_' + exe_res + '_*p/*.exe'
exename, r2 = self.copy_to_bin(self.cice_src, build_dir_wildcard,
self.libaccessom2_src)
return exename, r1 + r2
def build_mom(self, clean=False):
"""
Note: the 'clean' arg does nothing.
"""
os.environ['ACCESS_OM_DIR'] = os.path.join(self.lab_path)
os.environ['LIBACCESSOM2_ROOT'] = os.path.join(self.libaccessom2_src)
mydir = os.getcwd()
os.chdir(os.path.join(self.mom_src, 'exp'))
r1 = sp.call(['./MOM_compile.csh', '--type', 'ACCESS-OM',
'--platform', 'nci', '--repro'])
os.chdir(mydir)
exename, r2 = self.copy_to_bin(self.mom_src,
self.mom_src + '/exec/nci/ACCESS-OM/*.x',
self.libaccessom2_src)
return exename, r1 + r2
def build(self, clean=False):
self.yatm_exe, r1 = self.build_libaccessom2(clean)
if r1 != 0:
print('YATM build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
return r1
self.cice_exe, r2 = self.build_cice(clean)
if r2 != 0:
print('CICE build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
self.mom_exe, r3 = self.build_mom(clean)
if r3 != 0:
print('MOM build failed for exp {}'.format(self.exp_name),
file=sys.stderr)
return [self.yatm_exe, self.cice_exe, self.mom_exe], r1 + r2 + r3
def run(self):
"""
Run the experiment using payu and check output.
Don't do any work if it has already run.
"""
if self.has_run():
return 0, None, None, None
else:
return self.force_run()
def force_qsub_run(self):
"""
Run using qsub
"""
# Change to experiment directory and run.
try:
os.chdir(self.exp_path)
sp.check_output(['payu', 'sweep', '--lab', self.lab_path])
run_id = sp.check_output(['payu', 'run', '--lab', self.lab_path])
run_id = run_id.decode().splitlines()[0]
os.chdir(self.my_path)
except sp.CalledProcessError as err:
os.chdir(self.my_path)
print('Error: call to payu run failed.', file=sys.stderr)
return 1, None, None, None
wait_for_qsub(run_id)
run_id = run_id.split('.')[0]
output_files = []
# Read qsub stdout file
stdout_filename = glob.glob(os.path.join(self.exp_path,
'*.o{}'.format(run_id)))
if len(stdout_filename) != 1:
print('Error: there are too many stdout files.', file=sys.stderr)
return 2, None, None, None
stdout_filename = stdout_filename[0]
output_files.append(stdout_filename)
stdout = ''
with open(stdout_filename, 'r') as f:
stdout = f.read()
# Read qsub stderr file
stderr_filename = glob.glob(os.path.join(self.exp_path,
'*.e{}'.format(run_id)))
stderr = ''
if len(stderr_filename) == 1:
stderr_filename = stderr_filename[0]
output_files.append(stderr_filename)
with open(stderr_filename, 'r') as f:
stderr = f.read()
# Read the qsub id of the collate job from the stdout.
# Payu puts this here.
m = re.search(r'(\d+.gadi-pbs)\n', stdout)
if m is None:
print('Error: qsub id of collate job.', file=sys.stderr)
return 3, stdout, stderr, output_files
# Wait for the collate to complete.
run_id = m.group(1)
wait_for_qsub(run_id)
# Return files created by qsub so caller can read or delete.
collate_files = os.path.join(self.exp_path, '*.[oe]{}'.format(run_id))
output_files += glob.glob(collate_files)
return 0, stdout, stderr, output_files
def force_interactive_run(self):
"""
Already in a PBS session, run interactively
"""
# Change to experiment directory and run.
try:
os.chdir(self.exp_path)
sp.check_output(['payu', 'sweep', '--lab', self.lab_path])
sp.check_output(['payu-run', '--lab', self.lab_path])
except sp.CalledProcessError as err:
os.chdir(self.my_path)
print('Error: call to payu run failed.', file=sys.stderr)
return 1, None, None, None
return 0, None, None, None
def force_run(self):
"""
Always try to run.
"""
try:
dont_care = os.environ['PBS_NODEFILE']
is_interactive = True
except:
is_interactive = False
# Check whether this is an interactive PBS session.
if is_interactive:
ret, stdout, stderr, output_files = self.force_interactive_run()
else:
ret, stdout, stderr, output_files = self.force_qsub_run()
return ret, stdout, stderr, output_files
def build_and_run(self):
exes, ret = self.build()
assert ret == 0
self.setup_for_programmatic_run(exes)
self.force_run()
def setup_exp_from_base(base_exp_name, new_exp_name):
"""
Create a new exp by copying the base config
"""
base_exp = ExpTestHelper(base_exp_name)
new_exp_path = os.path.join(base_exp.control_path, new_exp_name)
if os.path.exists(new_exp_path):
shutil.rmtree(new_exp_path)
shutil.copytree(base_exp.exp_path, new_exp_path, symlinks=True)
new_exp = ExpTestHelper(new_exp_name)
if os.path.exists(new_exp.archive):
shutil.rmtree(new_exp.archive)
try:
os.remove(os.path.join(new_exp.control_path, 'archive'))
except OSError:
pass
try:
os.remove(os.path.join(new_exp.control_path, 'work'))
except OSError:
pass
return new_exp
def run_exp(exp_name, force=False):
my_path = os.path.dirname(os.path.realpath(__file__))
helper = ExpTestHelper(exp_name)
exes, ret = helper.build()
assert ret == 0
helper.setup_for_programmatic_run(exes)
if force:
ret, qso, qse, qsub_files = helper.force_run()
else:
ret, qso, qse, qsub_files = helper.run()
assert ret == 0
return helper
| apache-2.0 | -427,938,558,936,310,200 | 32.455665 | 93 | 0.54421 | false | 3.427454 | true | false | false |
zzrcxb/Pugoo | DataManagement/gobase.py | 1 | 1028 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import *
TableBase = declarative_base()
class GoBase(TableBase):
__tablename__ = 'training'
filehash = Column(String(44), primary_key=True)
fileformat = Column(String(10), default=None)
filesource = Column(String(64), default=None)
rawfilepath = Column(String(256), nullable=False)
size = Column(Integer, nullable=False)
rule = Column(String(32), nullable=False)
komi = Column(Float, nullable=False)
result = Column(Float, nullable=False)
handicap = Column(Float, nullable=False)
def __repr__(self):
return '<GoBase(filehash = %s, fileformat = %s, filesource = %s, rawfilepath = %s, ' \
'size = %s, rule = %s, komi = %s, result = %s, handicap=%s)>' \
% \
(self.filehash, self.fileformatm, self.filesource,
self.rawfilepath, self.size, self.rule, self.komi, self.result, self.handicap)
def __str__(self):
return self.__repr__()
| gpl-3.0 | -4,680,477,335,273,773,000 | 38.538462 | 94 | 0.631323 | false | 3.438127 | false | false | false |
nguyenngochuy91/Ancestral-Blocks-Reconstruction | get_result.py | 1 | 3339 | #!/usr/bin/python
from homolog4 import Homolog
import os
import argparse
def parser_code():
parser = argparse.ArgumentParser()
parser.add_argument("--input","-i", default="./optimized_gene_block/",help="optimized gene block ")
parser.add_argument("--gene_name", "-g", default='gene_block_names_and_genes.txt',
help="the gene_block_names_and_genes that stores the name of the operon and its genes")
parser.add_argument("--output","-o", default="result/",
help="where the result be stored (result/)")
parser.add_argument("-a", "--accession", default='tree/accession_to_common.csv',
help="Filter file, default as potential file, if NONE then not doing the parse filter")
return parser.parse_args()
def get_accession(accession):
dict = {}
for line in open(accession,'r').readlines():
line = line.strip().split(',')
dict[line[0]]= line[1]
return dict
## parse the gene block names and genes txt file
def parse(operon_genes_dict):
result = {}
infile = open(operon_genes_dict,'r')
for line in infile.readlines():
line = line.strip().split()
result[line[0]] = line[1:]
return result
## Traverses the genome information directory
def traverseAll(path):
res=[]
for root,dirs,files in os.walk(path):
for f in files:
res.append(root+'/'+f)
return res
## given an operon file (astCADBE.txt), format the info into format easier to read
def formatOperon(operon,output,operon_genes_dict,accession_dict):
alphabet = 'abcdefghijklmnop'
operon_name = operon.split('/')[-1].split('.')[0]
genes = sorted(operon_genes_dict[operon_name])
outfile = open(output+operon_name,'w')
for i in range(len(genes)):
outfile.write(genes[i]+','+alphabet[i]+'\t')
outfile.write('\n')
result = {}
for line in [i.strip() for i in open(operon).readlines()]:
hlog = Homolog.from_blast(line)
accession = hlog.accession()[:-2]
start = str(hlog.start())
end = str(hlog.stop())
strand = str(hlog.strand())
gene_name = hlog.blast_annotation()
if accession in result:
result[accession].append([gene_name, start, end, strand])
else:
result[accession]=[[gene_name, start, end, strand]]
for species in accession_dict:
outfile.write(species+':')
if species in result:
for item in result[species]:
outfile.write(','.join(item)+'\t')
outfile.write('\n')
outfile.close()
if __name__ == "__main__":
args = parser_code()
input = args.input
result = args.output
accession = args.accession
operon_genes_dict = args.gene_name
operon_genes_dict = parse(operon_genes_dict)
accession_dict = get_accession(accession)
try:
os.makedirs(result)
except:
print ("Result dic already exists")
# goes through all the file name in the optimized_gene_block dic
res = traverseAll(input)
for file in res:
formatOperon(file,result,operon_genes_dict,accession_dict)
| gpl-3.0 | -7,847,072,557,768,804,000 | 33.791667 | 115 | 0.583708 | false | 3.697674 | false | false | false |
astrobin/astrobin | astrobin_apps_images/migrations/0013_add_regular_large_fields.py | 1 | 1337 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-04-18 20:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('astrobin_apps_images', '0012_rename_regular_crop_anonymized_to_story_crop'),
]
operations = [
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_anonymized',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_inverted',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_sharpened',
field=models.CharField(blank=True, max_length=512, null=True),
),
migrations.AddField(
model_name='thumbnailgroup',
name='regular_large_sharpened_inverted',
field=models.CharField(blank=True, max_length=512, null=True),
),
]
| agpl-3.0 | -6,564,447,764,618,918,000 | 32.425 | 86 | 0.598355 | false | 4.051515 | false | false | false |
lowinger42/ergotime | client/util.py | 1 | 2814 | #!/usr/bin/env python3
"""
Database utilities
Copyright (C) 2020 Anders Lowinger, [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import sys
import PyQt5.QtWidgets as QtWidgets
from logger import log
from settings import sett
import resource
import lib.db as db
def createQApplication():
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(False)
app.setOrganizationName("Abundo AB")
app.setOrganizationDomain("abundo.se")
app.setApplicationName("ErgoTime")
return app
def openLocalDatabase2(dbname=None):
dbconf = {"name": sett.localDatabaseName}
conn = db.Database(dbconf, driver="sqlite")
conn.connect()
log.info(f"Open local database {dbconf}")
sql = "CREATE TABLE IF NOT EXISTS report ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " user_id INT NOT NULL default -1, "
sql += " activityid INT NOT NULL default -1, "
sql += " start TIMESTAMP NOT NULL, "
sql += " stop TIMESTAMP NOT NULL, "
sql += " comment TEXT NOT NULL default '', "
sql += " modified TIMESTAMP NOT NULL, "
sql += " seq INT NOT NULL default -1, "
sql += " deleted INT NOT NULL default 0, "
sql += " server_id INT NOT NULL default -1, "
sql += " updated INT NOT NULL default -1 "
sql += ");"
conn.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS activity ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " name TEXT NOT NULL default '', "
sql += " description TEXT NOT NULL default '', "
sql += " project_id INT NOT NULL default -1, "
sql += " active INT NOT NULL default 0, "
sql += " server_id INT NOT NULL default -1 "
sql += ");"
conn.execute(sql)
sql = "CREATE TABLE IF NOT EXISTS project ("
sql += " _id INTEGER PRIMARY KEY, "
sql += " activity_id INT NOT NULL default -1, "
sql += " name TEXT NOT NULL default '', "
sql += " costcenter TEXT NOT NULL default '', "
sql += " active INT NOT NULL default 0 "
sql += ");"
conn.execute(sql)
return conn
if __name__ == "__main__":
openLocalDatabase2("c:/temp/ergotime.db")
| mit | 5,375,349,524,726,260,000 | 30.617978 | 70 | 0.634328 | false | 3.722222 | false | false | false |
EuropeanSocialInnovationDatabase/ESID-main | TextMining/Classifiers/Trainers/RuleBasedInnovativeness.py | 1 | 21912 | from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.pipeline import Pipeline
import numpy as np
import pandas as pd
import re
from os import listdir
from os.path import join,isdir
from sklearn.utils import resample
from sklearn.model_selection import cross_val_score
import pickle
from sklearn.utils import resample
class DataSet:
Annotators = []
def __init__(self):
self.Annotators = []
class Annotator:
files = []
documents = []
Name = ""
def __init__(self):
self.files = []
self.documents = []
self.Name = ""
class Document:
Lines = []
DocumentName = ""
DatabaseID = ""
Annotations = []
Text = ""
isSpam = False
Project_Mark_Objective_1A = 0
Project_Mark_Objective_1B = 0
Project_Mark_Objective_1C = 0
Project_Mark_Actors_2A = 0
Project_Mark_Actors_2B = 0
Project_Mark_Actors_2C = 0
Project_Mark_Outputs_3A = 0
Project_Mark_Innovativeness_3A = 0
isProjectObjectiveSatisfied = False
isProjectActorSatisfied = False
isProjectOutputSatisfied = False
isProjectInnovativenessSatisfied = False
isProjectObjectiveSatisfied_predicted = False
isProjectActorSatisfied_predicted = False
isProjectOutputSatisfied_predicted = False
isProjectInnovativenessSatisfied_predicted = False
def __init__(self):
self.Text = ""
self.Lines = []
self.DocumentName = ""
self.DatabaseID = ""
self.Annotations = []
self.isSpam = False
self.Project_Mark_Objective_1A = 0
self.Project_Mark_Objective_1B = 0
self.Project_Mark_Objective_1C = 0
self.Project_Mark_Actors_2A = 0
self.Project_Mark_Actors_2B = 0
self.Project_Mark_Actors_2C = 0
self.Project_Mark_Outputs_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.Project_Mark_Innovativeness_3A = 0
self.isProjectObjectiveSatisfied = False
self.isProjectActorSatisfied = False
self.isProjectOutputSatisfied = False
self.isProjectInnovativenessSatisfied = False
self.isProjectObjectiveSatisfied_predicted = False
self.isProjectActorSatisfied_predicted = False
self.isProjectOutputSatisfied_predicted = False
self.isProjectInnovativenessSatisfied_predicted = False
class Line:
StartSpan = 0
EndSpan = 0
Text = ""
Sentences = []
Tokens = []
Annotations = []
def __init__(self):
self.StartSpan = 0
self.EndSpan = 0
self.Text = ""
self.Sentences = []
self.Tokens = []
self.Annotations = []
class Sentence:
SentenceText = ""
StartSpan = -1
EndSpan = -1
Annotations = []
def __init__(self):
self.SentenceText = ""
self.StartSpan = -1
self.EndSpan = -1
self.Annotations = []
class Annotation:
FromFile = ""
FromAnnotator = ""
AnnotationText = ""
StartSpan = -1
EndSpan = -1
HighLevelClass = ""
LowLevelClass = ""
data_folder = "../../../Helpers/FullDataset_Alina/"
ds = DataSet()
total_num_spam = 0
sentences = []
total_num_files = 0
# job = aetros.backend.start_job('nikolamilosevic86/GloveModel')
annotators = [f for f in listdir(data_folder) if isdir(join(data_folder, f))]
for ann in annotators:
folder = data_folder + "/" + ann
Annot = Annotator()
Annot.Name = ann
ds.Annotators.append(Annot)
onlyfiles = [f for f in listdir(folder) if (f.endswith(".txt"))]
for file in onlyfiles:
Annot.files.append(data_folder + "/" + ann + '/' + file)
doc = Document()
total_num_files = total_num_files + 1
doc.Lines = []
# doc.Annotations = []
doc.DocumentName = file
Annot.documents.append(doc)
if (file.startswith('a') or file.startswith('t')):
continue
print file
doc.DatabaseID = file.split("_")[1].split(".")[0]
fl = open(data_folder + "/" + ann + '/' + file, 'r')
content = fl.read()
doc.Text = content
lines = content.split('\n')
line_index = 0
for line in lines:
l = Line()
l.StartSpan = line_index
l.EndSpan = line_index + len(line)
l.Text = line
line_index = line_index + len(line) + 1
sentences.append(line)
doc.Lines.append(l)
an = open(data_folder + "/" + ann + '/' + file.replace(".txt", ".ann"), 'r')
annotations = an.readlines()
for a in annotations:
a = re.sub(r'\d+;\d+', '', a).replace(' ', ' ')
split_ann = a.split('\t')
if (split_ann[0].startswith("T")):
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
low_level_ann = sp_split_ann[0]
if low_level_ann == "ProjectMark":
continue
span_start = sp_split_ann[1]
span_end = sp_split_ann[2]
ann_text = split_ann[2]
Ann = Annotation()
Ann.AnnotationText = ann_text
Ann.StartSpan = int(span_start)
Ann.EndSpan = int(span_end)
Ann.FromAnnotator = Annot.Name
Ann.FromFile = file
Ann.LowLevelClass = low_level_ann
if (low_level_ann == "SL_Outputs_3a"):
Ann.HighLevelClass = "Outputs"
if (
low_level_ann == "SL_Objective_1a" or low_level_ann == "SL_Objective_1b" or low_level_ann == "SL_Objective_1c"):
Ann.HighLevelClass = "Objectives"
if (
low_level_ann == "SL_Actors_2a" or low_level_ann == "SL_Actors_2b" or low_level_ann == "SL_Actors_2c"):
Ann.HighLevelClass = "Actors"
if (low_level_ann == "SL_Innovativeness_4a"):
Ann.HighLevelClass = "Innovativeness"
doc.Annotations.append(Ann)
for line in doc.Lines:
if line.StartSpan <= Ann.StartSpan and line.EndSpan >= Ann.EndSpan:
line.Annotations.append(Ann)
else:
id = split_ann[0]
sp_split_ann = split_ann[1].split(' ')
mark_name = sp_split_ann[0]
if (len(sp_split_ann) <= 2):
continue
mark = sp_split_ann[2].replace('\n', '')
if (mark_name == "DL_Outputs_3a"):
doc.Project_Mark_Outputs_3A = int(mark)
if int(mark) >= 1:
doc.isProjectOutputSatisfied = True
if (mark_name == "DL_Objective_1a"):
doc.Project_Mark_Objective_1A = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1b" or mark_name == "DL_Objective"):
doc.Project_Mark_Objective_1B = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Objective_1c"):
doc.Project_Mark_Objective_1C = int(mark)
if int(mark) >= 1:
doc.isProjectObjectiveSatisfied = True
if (mark_name == "DL_Innovativeness_4a" or mark_name=="DL_Innovativeness"):
doc.Project_Mark_Innovativeness_3A = int(mark)
if int(mark) >= 1:
doc.isProjectInnovativenessSatisfied = True
if (mark_name == "DL_Actors_2a" or mark_name=="DL_Actors"):
doc.Project_Mark_Actors_2A = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2b"):
doc.Project_Mark_Actors_2B = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (mark_name == "DL_Actors_2c"):
doc.Project_Mark_Actors_2C = int(mark)
if int(mark) >= 1:
doc.isProjectActorSatisfied = True
if (
doc.Project_Mark_Objective_1A == 0 and doc.Project_Mark_Objective_1B == 0 and doc.Project_Mark_Objective_1C == 0 and doc.Project_Mark_Actors_2A == 0
and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2B == 0 and doc.Project_Mark_Actors_2C == 0 and doc.Project_Mark_Outputs_3A == 0
and doc.Project_Mark_Innovativeness_3A == 0):
doc.isSpam = True
total_num_spam = total_num_spam + 1
i = 0
j = i + 1
kappa_files = 0
done_documents = []
num_overlap_spam = 0
num_spam = 0
total_objectives = 0
total_outputs = 0
total_actors = 0
total_innovativeness = 0
ann1_annotations_objectives = []
ann2_annotations_objectives = []
ann1_annotations_actors = []
ann2_annotations_actors = []
ann1_annotations_outputs = []
ann2_annotations_outputs = []
ann1_annotations_innovativeness = []
ann2_annotations_innovativeness = []
match_objectives = 0
match_outputs = 0
match_actors = 0
match_innovativeness = 0
while i < len(ds.Annotators) - 1:
while j < len(ds.Annotators):
annotator1 = ds.Annotators[i]
annotator2 = ds.Annotators[j]
for doc1 in annotator1.documents:
for doc2 in annotator2.documents:
if doc1.DocumentName == doc2.DocumentName and doc1.DocumentName not in done_documents:
done_documents.append(doc1.DocumentName)
line_num = 0
ann1_objective = [0] * len(doc1.Lines)
ann2_objective = [0] * len(doc2.Lines)
ann1_output = [0] * len(doc1.Lines)
ann2_output = [0] * len(doc2.Lines)
ann1_actor = [0] * len(doc1.Lines)
ann2_actor = [0] * len(doc2.Lines)
ann1_innovativeness = [0] * len(doc1.Lines)
ann2_innovativeness = [0] * len(doc2.Lines)
while line_num < len(doc1.Lines):
if len(doc1.Lines[line_num].Annotations) > 0:
for a in doc1.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann1_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann1_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann1_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann1_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
for a1 in doc2.Lines[line_num].Annotations:
if a1.HighLevelClass == a.HighLevelClass:
if a1.HighLevelClass == "Objectives":
match_objectives = match_objectives + 1
if a1.HighLevelClass == "Outputs":
match_outputs = match_outputs + 1
if a1.HighLevelClass == "Actors":
match_actors = match_actors + 1
if a1.HighLevelClass == "Innovativeness":
match_innovativeness = match_innovativeness + 1
if len(doc2.Lines[line_num].Annotations) > 0:
for a in doc2.Lines[line_num].Annotations:
if a.HighLevelClass == "Objectives":
ann2_objective[line_num] = 1
total_objectives = total_objectives + 1
if a.HighLevelClass == "Outputs":
ann2_output[line_num] = 1
total_outputs = total_outputs + 1
if a.HighLevelClass == "Actors":
ann2_actor[line_num] = 1
total_actors = total_actors + 1
if a.HighLevelClass == "Innovativeness":
ann2_innovativeness[line_num] = 1
total_innovativeness = total_innovativeness + 1
line_num = line_num + 1
ann1_annotations_outputs.extend(ann1_output)
ann2_annotations_outputs.extend(ann2_output)
ann1_annotations_objectives.extend(ann1_objective)
ann2_annotations_objectives.extend(ann2_objective)
ann1_annotations_actors.extend(ann1_actor)
ann2_annotations_actors.extend(ann2_actor)
ann1_annotations_innovativeness.extend(ann1_innovativeness)
ann2_annotations_innovativeness.extend(ann2_innovativeness)
print "Statistics for document:" + doc1.DocumentName
print "Annotators " + annotator1.Name + " and " + annotator2.Name
print "Spam by " + annotator1.Name + ":" + str(doc1.isSpam)
print "Spam by " + annotator2.Name + ":" + str(doc2.isSpam)
if (doc1.isSpam == doc2.isSpam):
num_overlap_spam = num_overlap_spam + 1
if doc1.isSpam:
num_spam = num_spam + 1
if doc2.isSpam:
num_spam = num_spam + 1
kappa_files = kappa_files + 1
j = j + 1
i = i + 1
j = i + 1
print annotators
doc_array = []
text_array = []
objectives = []
actors = []
outputs = []
innovativeness = []
for ann in ds.Annotators:
for doc in ann.documents:
doc_array.append(
[doc.Text, doc.isProjectObjectiveSatisfied, doc.isProjectActorSatisfied, doc.isProjectOutputSatisfied,
doc.isProjectInnovativenessSatisfied])
objectives.append(doc.isProjectObjectiveSatisfied)
actors.append(doc.isProjectActorSatisfied)
outputs.append(doc.isProjectOutputSatisfied)
innovativeness.append(doc.isProjectInnovativenessSatisfied)
text_array.append(doc.Text)
df = pd.DataFrame({'text':text_array,'classa':innovativeness})
df_majority = df[df.classa==0]
df_minority = df[df.classa==1]
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=160, # to match majority class
random_state=83293) # reproducible results
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
print df_upsampled.classa.value_counts()
TP = 0
FP = 0
FN = 0
classes = df_upsampled.classa
i = 0
innovative_1 = 0
innovative_2 = 0
innovative_3 = 0
for sample in doc_array:
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
innovative_1 = innovative_1 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Innovation rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
if ("new" in sample[0] or "novel" in sample[0] or "alternative" in sample[0] or "improved" in sample[0] or "cutting edge" in sample[0] or "better" in sample[0])\
and ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
innovative_2 = innovative_2 +1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
i = i + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Other rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
for sample in doc_array:
isInnovative = False
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_3 = innovative_3 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
precision = float(TP)/float(TP+FP)
recall = float(TP)/float(TP+FN)
f_score = 2*precision*recall/(precision+recall)
print "Third rule classifier"
print "False positives:"+str(FP)
print "False negatives:"+str(FN)
print "True positive:"+str(TP)
print "Precision: "+str(precision)
print "Recall: "+str(recall)
print "F1-score: "+str(f_score)
TP = 0
FP = 0
FN = 0
i = 0
innovative_4 = 0
for sample in doc_array:
isInnovative = False
if "innovation" in sample[0] or "innovative" in sample[0] or "novelty" in sample[0]:
isInnovative = True
if ("method" in sample[0] or "product" in sample[0] or "service" in sample[0] or "application" in sample[0] or "technology" in sample[0] or "practice" in sample[0]):
list_items = ["method","product","service","application","technology","practice"]
index_list = []
for item in list_items:
indexes = [m.start() for m in re.finditer(item, sample[0])]
index_list.extend(indexes)
for index in index_list:
end = len(sample[0])
start = 0
if index - 500>0:
start = index - 500
if index + 500<len(sample[0]):
end = index + 500
substr = sample[0][start:end]
if ("new" in substr or "novel" in substr or "alternative" in substr or "improved" in substr or "cutting edge" in substr or "better" in substr):
isInnovative = True
if isInnovative:
innovative_4 = innovative_4 + 1
if sample[4] == True:
TP = TP+1
if sample[4] == False:
FP = FP+1
else:
if sample[4]==True:
FN = FN + 1
print ""
print "Innovative 1:"+str(innovative_1)
print "Innovative 2:"+str(innovative_2)
print "Innovative 3:"+str(innovative_3)
print "Innovative 4 (1+3):"+str(innovative_4)
#scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# train = text_array[0:int(0.8*len(text_array))]
# train_Y = innovativeness[0:int(0.8*len(actors))]
#
# test = text_array[int(0.8*len(text_array)):]
# test_Y = innovativeness[int(0.8*len(actors)):]
#
# #categories = ['non actor', 'actor']
#
# text_clf = Pipeline([('vect', CountVectorizer()),
# ('tfidf', TfidfTransformer()),
# ('clf', MultinomialNB()),
# ])
#
# scores = cross_val_score(text_clf, df_upsampled.text, df_upsampled.classa, cv=10,scoring='f1')
# final = 0
# for score in scores:
# final = final + score
# print scores
# print "Final:" + str(final/10)
# text_clf.fit(train,train_Y)
#
# TP = 0
# FP = 0
# FN = 0
# i = 0
# outcome = text_clf.predict(test)
# for i in range(0,len(test)):
# if test_Y[i] == True and outcome[i] == True:
# TP = TP+1
# if test_Y[i] == False and outcome[i]==True:
# FP = FP+1
# if test_Y[i]==True and outputs[i]==False:
# FN = FN + 1
# i = i + 1
# precision = float(TP)/float(TP+FP)
# recall = float(TP)/float(TP+FN)
# f_score = 2*precision*recall/(precision+recall)
# print "ML based rule classifier"
# print "False positives:"+str(FP)
# print "False negatives:"+str(FN)
# print "True positive:"+str(TP)
# print "Precision: "+str(precision)
# print "Recall: "+str(recall)
# print "F1-score: "+str(f_score)
| gpl-3.0 | -1,968,229,541,548,031,700 | 37.240838 | 178 | 0.54404 | false | 3.525664 | true | false | false |
spaceone/pyjs | pygtkweb/demos/012-label.py | 6 | 3787 | #!/usr/bin/env python
# example label.py
import pygtk
pygtk.require('2.0')
import gtk
class Labels:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", lambda w: gtk.main_quit())
self.window.set_title("Label")
vbox = gtk.VBox(False, 5)
hbox = gtk.HBox(False, 5)
self.window.add(hbox)
hbox.pack_start(vbox, False, False, 0)
self.window.set_border_width(5)
frame = gtk.Frame("Normal Label")
label = gtk.Label("This is a Normal label")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Multi-line Label")
label = gtk.Label("This is a Multi-line label.\nSecond line\n"
"Third line")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Left Justified Label")
label = gtk.Label("This is a Left-Justified\n"
"Multi-line label.\nThird line")
label.set_justify(gtk.JUSTIFY_LEFT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Right Justified Label")
label = gtk.Label("This is a Right-Justified\nMulti-line label.\n"
"Fourth line, (j/k)")
label.set_justify(gtk.JUSTIFY_RIGHT)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
vbox = gtk.VBox(False, 5)
hbox.pack_start(vbox, False, False, 0)
frame = gtk.Frame("Line wrapped label")
label = gtk.Label("This is an example of a line-wrapped label. It "
"should not be taking up the entire "
"width allocated to it, but automatically "
"wraps the words to fit. "
"The time has come, for all good men, to come to "
"the aid of their party. "
"The sixth sheik's six sheep's sick.\n"
" It supports multiple paragraphs correctly, "
"and correctly adds "
"many extra spaces. ")
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Filled, wrapped label")
label = gtk.Label("This is an example of a line-wrapped, filled label. "
"It should be taking "
"up the entire width allocated to it. "
"Here is a sentence to prove "
"my point. Here is another sentence. "
"Here comes the sun, do de do de do.\n"
" This is a new paragraph.\n"
" This is another newer, longer, better "
"paragraph. It is coming to an end, "
"unfortunately.")
label.set_justify(gtk.JUSTIFY_FILL)
label.set_line_wrap(True)
frame.add(label)
vbox.pack_start(frame, False, False, 0)
frame = gtk.Frame("Underlined label")
label = gtk.Label("This label is underlined!\n"
"This one is underlined in quite a funky fashion")
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_pattern(
"_________________________ _ _________ _ ______ __ _______ ___")
frame.add(label)
vbox.pack_start(frame, False, False, 0)
self.window.show_all ()
def main():
gtk.main()
return 0
if __name__ == "__main__":
Labels()
main()
| apache-2.0 | 7,959,622,732,269,321,000 | 38.863158 | 82 | 0.496963 | false | 4.033014 | false | false | false |
IvanaXu/Test_Class_GOF | tPatterns/J2EEDesign_Patterns/test_Business_Delegate_Pattern.py | 1 | 1787 | # -*-coding:utf-8-*-
# @auth ivan
# @time 2016-10-26 19:01:45
# @goal test for Business Delegate Pattern
class BusinessService:
def __init__(self):
return
def doProcessing(self):
return
class EJBService(BusinessService):
def __init__(self):
BusinessService.__init__(self)
return
def doProcessing(self):
print("Processing task by invoking EJB Service")
class JMSService(BusinessService):
def __init__(self):
BusinessService.__init__(self)
return
def doProcessing(self):
print("Processing task by invoking JMS Service")
class BusinessLookUp:
def getBusinessService(self, serviceType):
if serviceType.upper() == 'EJB':
return EJBService()
else:
return JMSService()
class BusinessDelegate:
def __init__(self):
self.lookupService = BusinessLookUp()
self.businessService = None
self.serviceType = ''
def setServiceType(self, serviceType):
self.serviceType = serviceType
def doTask(self):
self.businessService = self.lookupService.getBusinessService(self.serviceType)
self.businessService.doProcessing()
class Client:
def __init__(self, businessService):
self.businessService = businessService
def doTask(self):
self.businessService.doTask()
class BusinessDelegatePatternDemo:
def __init__(self):
self.businessDelegate = None
def run(self):
self.businessDelegate = BusinessDelegate()
client = Client(self.businessDelegate)
self.businessDelegate.setServiceType("EJB")
client.doTask()
self.businessDelegate.setServiceType("JMS")
client.doTask()
B = BusinessDelegatePatternDemo()
B.run()
| gpl-3.0 | -2,944,724,611,565,286,400 | 20.792683 | 86 | 0.646894 | false | 4.070615 | false | false | false |
mtasic85/routingtable | contact_list.py | 1 | 2632 | __all__ = ['ContactList']
import random
from contact import Contact
from print_colors import PrintColors
class ContactList(object):
def __init__(self):
self.items = []
self.items_id_map = {}
self.items_raddr_map = {}
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def add(self, c):
if c.id is None and not c.bootstrap:
raise ValueError('Contact it cannot be None, it its is not bootstrap node')
if c.id is None and c.id in self.items_id_map:
raise ValueError('Bootstrap contact with id=None is already known')
self.items.append(c)
self.items_id_map[c.id] = c
self.items_raddr_map[c.remote_host, c.remote_port] = c
return c
def get(self, id_or_remote_address_or_idx):
c = None
if isinstance(id_or_remote_address_or_idx, (str, bytes)):
c_id = id_or_remote_address_or_idx
try:
c = self.items_id_map[c_id]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, (tuple, list)):
remote_host, remote_port = id_or_remote_address_or_idx
try:
c = self.items_raddr_map[remote_host, remote_port]
except KeyError as e:
pass
elif isinstance(id_or_remote_address_or_idx, int):
i = id_or_remote_address_or_idx
try:
c = self.items[i]
except IndexError as e:
pass
return c
def remove(self, c_or_id):
c = None
if isinstance(c_or_id, Contact):
c = c_or_id
self.items.remove(c)
del self.items_id_map[c.id]
del self.items_raddr_map[c.remote_host, c.remote_port]
else:
c_id = c_or_id
c = self.items_id_map.pop(c_id)
self.items.remove(c)
del self.items_raddr_map[c.remote_host, c.remote_port]
return c
def random(self, without_id=None):
if not len(self.items):
return None
# filter contacts
i = random.randint(0, len(self.items) - 1)
c = self.items[i]
if c.id == without_id:
return None
return c
def all(self, version=0, max_old=None):
contacts = []
for c in self.items:
if c.bootstrap:
contacts.append(c)
continue
# FIXME: use version and max_old
contacts.append(c)
return contacts
| mit | 3,009,064,387,989,034,000 | 25.585859 | 87 | 0.526216 | false | 3.675978 | false | false | false |
Clear-ICT/odoo-addons | account_petty_cash/wizard/issue_voucher.py | 1 | 3536 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2016 Sucros Clear Information Technologies PLC.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from openerp import api, fields, models
from openerp.addons.decimal_precision import decimal_precision as dp
from openerp.tools.translate import _
class IssueVoucherWizard(models.TransientModel):
_name = 'account.pettycash.fund.voucher'
_desc = 'Petty Cash Fund Issue Voucher Wizard'
@api.model
def _get_fund(self):
fund_id = self.env.context.get('active_id', False)
return fund_id
# Field
#
fund = fields.Many2one('account.pettycash.fund', required=True,
default=_get_fund)
date = fields.Date(required=True, default=datetime.today().date())
partner = fields.Many2one('res.partner')
lines = fields.One2many('account.pettycash.fund.voucher.line', 'wizard')
voucher = fields.Many2one('account.voucher')
@api.multi
def create_voucher(self):
Vouchers = self.env['account.voucher']
for wiz in self:
lines = []
total_lines = 0.0
for line in wiz.lines:
line_vals = {
'name': line.memo,
'type': 'dr',
'account_id': line.expense_account.id,
'amount': line.amount,
}
lines.append((0, 0, line_vals))
total_lines += line.amount
voucher_vals = {
'name': _('Petty Cash Expenditure %s' % (wiz.date)),
'journal_id': wiz.fund.journal.id,
'account_id': wiz.fund.journal.default_credit_account_id.id,
'amount': total_lines,
'petty_cash_fund': wiz.fund.id,
'partner_id': wiz.partner.id,
'date': wiz.date,
'type': 'payment',
'audit': True,
}
onchange_res = Vouchers.onchange_journal(
wiz.fund.journal.id, [], False, wiz.partner.id, wiz.date,
total_lines, 'payment', False)
voucher_vals.update(onchange_res['value'])
voucher_vals.update({'line_dr_ids': lines})
wiz.voucher = Vouchers.create(voucher_vals)
return
class IssueVoucherWizardLine(models.TransientModel):
_name = 'account.pettycash.fund.voucher.line'
_desc = 'Petty Cash Fund Issue Voucher Wizard Line'
# Fields
#
wizard = fields.Many2one('account.pettycash.fund.voucher')
expense_account = fields.Many2one(
'account.account', required=True,
domain=[('type', '=', 'other'), ('user_type.code', '=', 'expense')])
amount = fields.Float(digits_compute=dp.get_precision('Product Price'),
required=True)
memo = fields.Char()
| agpl-3.0 | -2,242,741,917,543,163,600 | 34.36 | 77 | 0.598133 | false | 3.877193 | false | false | false |
autosportlabs/kivy | kivy/base.py | 9 | 16223 | # pylint: disable=W0611
'''
Kivy Base
=========
This module contains core Kivy functionality and is not intended for end users.
Feel free to look though it, but calling any of these methods directly may well
result in unpredictable behavior.
Event loop management
---------------------
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'stopTouchApp',
)
import sys
from kivy.config import Config
from kivy.logger import Logger
from kivy.utils import platform
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
ExceptionManager.STOP.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if cls not in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occurred in the runTouchApp() main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if listener not in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before run().
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
EventLoop.add_stop_callback().'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def remove_android_splash(self, *args):
'''Remove android presplash in SDL2 bootstrap.'''
try:
from android import remove_presplash
remove_presplash()
except ImportError:
Logger.error(
'Base: Failed to import "android" module. '
'Could not remove android presplash.')
return
def post_dispatch_input(self, etype, me):
'''This function is called by dispatch_input() when we want to dispatch
an input event. The event is dispatched to all listeners and if
grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios' or root_window._density != 1:
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except AttributeError:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by idle() to read events from input providers, pass events to
postproc, and dispatch final events.
'''
# first, aquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _run_mainloop():
'''If no window has been created, this will be the executed mainloop.'''
while True:
try:
EventLoop.run()
stopTouchApp()
break
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
we try to get the window (must be created by you beforehand)
and add the widget to it. Very useful for embedding Kivy
in another toolkit. (like Qt, check kivy-designed)
'''
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# remove presplash on the next frame
if platform == 'android':
Clock.schedule_once(EventLoop.remove_android_splash)
# we are in a slave mode, don't do dispatching.
if slave:
return
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event loop
# ourself (previous behavior.)
#
try:
if EventLoop.window is None:
_run_mainloop()
else:
EventLoop.window.mainloop()
finally:
stopTouchApp()
def stopTouchApp():
'''Stop the current application by leaving the main loop'''
if EventLoop is None:
return
if EventLoop.status != 'started':
return
Logger.info('Base: Leaving application in progress...')
EventLoop.close()
| mit | 6,906,481,851,962,687,000 | 30.439922 | 79 | 0.580102 | false | 4.375135 | true | false | false |
Learningtribes/edx-platform | lms/djangoapps/lt_analytics/models.py | 1 | 26722 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
import logging
from course_blocks.api import get_course_blocks
from course_widget.grades import get_progress
from courseware import grades
from courseware.courses import get_course_by_id
from courseware.views.views import is_course_passed
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator
from django.db import models
from django.db.models import Sum, Count
from django.http import Http404
from django.utils import timezone
from django_countries.fields import CountryField
import lms.lib.comment_client as cc
from model_utils.models import TimeStampedModel
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from requests import ConnectionError
from student.models import CourseEnrollment
from track.backends.django import TrackingLog
from xmodule.modulestore.django import modulestore
from xmodule_django.models import CourseKeyField
IDLE_TIME = 900
ANALYTICS_ACCESS_GROUP = "Triboo Analytics Users"
log = logging.getLogger('lt_analytics')
DISPLAY_EXCLUDE = ['_state', 'modified']
class CourseStatus(object):
not_started = 0
in_progress = 1
finished = 2
failed = 3
verbose_names = ['Not Started', 'In Progress', 'Successful', 'Unsuccessful']
def get_day_limits(day=None, offset=0):
day = (day or timezone.now()) + timezone.timedelta(days=offset)
day_start = day.replace(hour=0, minute=0, second=0, microsecond=0)
day_end = day_start + timezone.timedelta(days=1)
return day_start, day_end
def format_time_spent(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def format_badges(badges_earned, badges_possible):
return "{} / {}".format(badges_earned, badges_possible)
def get_badges(report_badges):
badges = report_badges.split(" / ")
if len(badges) == 2:
return int(badges[0]), int(badges[1])
return 0, 0
class UnicodeMixin(object):
def __unicode__(self):
result = {}
for k, v in self.__dict__.iteritems():
if k not in DISPLAY_EXCLUDE:
result[k] = v.strftime('%Y-%m-%d %H:%S %Z') if isinstance(v, timezone.datetime) else v
return unicode(result)
class ReportMixin(object):
@classmethod
def filter_by_day(cls, day=None, **kwargs):
day_start, day_end = get_day_limits(day)
return cls.objects.filter(created__gte=day_start,
created__lt=day_end,
**kwargs)
@classmethod
def get_by_day(cls, day=None, **kwargs):
day_start, day_end = get_day_limits(day)
try:
return cls.objects.get(created__gte=day_start,
created__lt=day_end,
**kwargs)
except cls.DoesNotExist:
return None
def get_day_times_spent(day_start, day_end):
tracking_logs = TrackingLog.objects.filter(
time__gte=day_start, time__lt=day_end).exclude(
user_id=None).only('event_type', 'time', 'user_id', 'agent')
user_logs = defaultdict(list)
for l in tracking_logs:
user_logs[l.user_id].append(l)
for user_id, logs in user_logs.iteritems():
user_logs[user_id] = sorted(logs, key=lambda v: v.time)
times_spent = {}
for user_id, logs in user_logs.iteritems():
user_times_spent = []
if len(logs) >= 2:
log_pairs = zip(logs[:-1], logs[1:])
for log1, log2 in log_pairs:
total_seconds = (log2.time - log1.time).total_seconds()
time_spent = total_seconds if total_seconds < IDLE_TIME else IDLE_TIME
user_times_spent.append((log1, time_spent))
user_times_spent.append((logs[-1], IDLE_TIME))
elif len(logs) == 1:
user_times_spent.append((logs[0], IDLE_TIME))
times_spent[user_id] = user_times_spent
return times_spent
def generate_today_reports():
LearnerVisitsDailyReport.generate_today_reports()
LearnerCourseDailyReport.generate_today_reports()
learner_course_reports = LearnerCourseDailyReport.filter_by_day().prefetch_related('user__profile')
LearnerDailyReport.generate_today_reports(learner_course_reports)
CourseDailyReport.generate_today_reports(learner_course_reports)
MicrositeDailyReport.generate_today_reports(learner_course_reports)
CountryDailyReport.generate_today_reports(learner_course_reports)
class ReportLog(UnicodeMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = 'created'
learner_visit = models.DateTimeField(default=None, null=True)
learner_course = models.DateTimeField(default=None, null=True)
learner = models.DateTimeField(default=None, null=True)
course = models.DateTimeField(default=None, null=True)
microsite = models.DateTimeField(default=None, null=True)
country = models.DateTimeField(default=None, null=True)
@classmethod
def get_latest_dt(cls):
try:
report = cls.objects.filter(learner_visit__isnull=False,
learner_course__isnull=False,
learner__isnull=False,
course__isnull=False,
microsite__isnull=False,
country__isnull=False).latest()
return report.created
except cls.DoesNotExist:
return None
@classmethod
def update_or_create(cls, **kwargs):
today_start, today_end = get_day_limits()
cls.objects.update_or_create(created__gte=today_start,
created__lt=today_end,
defaults=kwargs)
class LearnerVisitsDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'user', 'course_id', 'device')
index_together = ['created', 'user', 'course_id', 'device']
user = models.ForeignKey(User, null=False)
course_id = CourseKeyField(max_length=255, null=True)
org = models.CharField(max_length=255, db_index=True, null=True, default=None)
device = models.CharField(max_length=255, null=False)
time_spent = models.PositiveIntegerField(default=0)
@classmethod
def generate_day_reports(cls, day=None):
previous_day_start, previous_day_end = get_day_limits(day=day, offset=-1)
previous_day_times_spent = get_day_times_spent(previous_day_start, previous_day_end)
for uid, one_user_times_spent in previous_day_times_spent.iteritems():
cls.update_or_create(uid, one_user_times_spent, day)
@classmethod
def generate_today_reports(cls):
cls.generate_day_reports()
ReportLog.update_or_create(learner_visit=timezone.now())
@classmethod
def update_or_create(cls, user_id, one_user_times_spent, day=None):
# group visit by course_id, device, accumulate time_spent
reports = defaultdict(lambda: 0)
for visit, time_spent in one_user_times_spent:
reports[(visit.course_id, visit.device)] += time_spent
today_start, today_end = get_day_limits(day)
for (course_id, device), time_spent in reports.iteritems():
org = course_id.org if course_id != CourseKeyField.Empty else None
if day:
cls.objects.update_or_create(user_id=user_id,
course_id=course_id,
org=org,
device=device,
created__gte=today_start,
created__lt=today_end,
created=day,
defaults={'time_spent': int(time_spent)})
else:
cls.objects.update_or_create(user_id=user_id,
course_id=course_id,
org=org,
device=device,
created__gte=today_start,
created__lt=today_end,
defaults={'time_spent': int(time_spent)})
class LearnerCourseDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = 'created'
unique_together = ('user', 'course_id', 'created')
index_together = ['user', 'course_id', 'created']
user = models.ForeignKey(User, null=False)
course_id = CourseKeyField(max_length=255, db_index=True, null=False)
org = models.CharField(max_length=255, db_index=True, null=False)
status = models.PositiveSmallIntegerField(help_text="not started: 0; in progress: 1; finished: 2; failed: 3; ",
default=0)
current_score = models.PositiveSmallIntegerField(default=0, validators=[MaxValueValidator(100)])
badges = models.CharField(max_length=20, default="0 / 0")
posts = models.IntegerField(default=0)
progress = models.PositiveSmallIntegerField(default=0, validators=[MaxValueValidator(100)])
total_time_spent = models.PositiveIntegerField(default=0)
enrollment_date = models.DateTimeField(default=None, null=True, blank=True)
completion_date = models.DateTimeField(default=None, null=True, blank=True)
@classmethod
def generate_today_reports(cls):
enrollments = CourseEnrollment.objects.filter(is_active=True).prefetch_related('user')
overviews = CourseOverview.objects.filter(start__lte=timezone.now()).only('id')
course_ids = set([o.id for o in overviews])
for enrollment in enrollments:
if enrollment.course_id in course_ids:
cls.update_or_create(enrollment)
ReportLog.update_or_create(learner_course=timezone.now())
@classmethod
def update_or_create(cls, enrollment):
"""
create today's summary. Call multiple times with the same parameters will only create one.
Args:
enrollment: CourseEnrollment object
total_time_spent: total_time_spent calculated for this enrollment
Returns: summary object.
"""
course_id = enrollment.course_id
user = enrollment.user
today_start, today_end = get_day_limits()
with modulestore().bulk_operations(course_id):
if user.is_active:
try:
course = get_course_by_id(course_id, None)
except Http404:
return
total_time_spent = (LearnerVisitsDailyReport.objects.filter(
user=user, course_id=course_id).aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
course_structure = get_course_blocks(user, course.location)
courseware_summary = grades.progress_summary(user, course, course_structure)
grade_summary = grades.grade(user, course, course_structure=course_structure)
if (not courseware_summary) or (not grade_summary):
log.warning('course: {} does not have progress info, skip.')
return
progress = get_progress(user, course, courseware_summary, grade_summary, False)
passed = is_course_passed(course, grade_summary)
if progress['progress'] == 100:
status = CourseStatus.finished if passed else CourseStatus.failed
else:
try:
last_report = cls.objects.exclude(created__gte=today_start, created__lt=today_end).filter(
course_id=course_id, user=user).latest()
except cls.DoesNotExist:
last_report = None
if last_report and last_report.status == CourseStatus.in_progress:
# if last time, it's already in progress, so keep it
status = last_report.status
else:
# if not, based on if he visit the course page to say if it started
if total_time_spent > 0 or progress['progress'] > 0:
status = CourseStatus.in_progress
else:
status = CourseStatus.not_started
try:
cc_user = cc.User(id=user.id, course_id=course.id).to_dict()
posts = cc_user.get('comments_count', 0) + cc_user.get('threads_count', 0)
except (cc.CommentClient500Error, cc.CommentClientRequestError, ConnectionError):
posts = 0
cls.objects.update_or_create(
user=user, course_id=course_id,
created__gte=today_start, created__lt=today_end,
defaults={'org': course_id.org,
'progress': progress['progress'],
'badges': format_badges(progress['nb_trophies_earned'], progress['nb_trophies_possible']),
'current_score': progress['current_score'],
'enrollment_date': enrollment.created,
'completion_date': enrollment.completed,
'status': status,
'total_time_spent': total_time_spent,
'posts': posts})
class LearnerDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'user', 'org')
index_together = ['created', 'user', 'org']
user = models.ForeignKey(User, null=False)
org = models.CharField(max_length=255, db_index=True, null=False)
enrollments = models.PositiveSmallIntegerField(default=0)
average_final_score = models.PositiveSmallIntegerField(default=0)
badges = models.CharField(max_length=20, default="0 / 0")
posts = models.IntegerField(default=0)
finished = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.finished])
failed = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.failed])
not_started = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.not_started])
in_progress = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.in_progress])
country = models.CharField(default='', max_length=255)
total_time_spent = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_user_org = defaultdict(list)
for report in learner_course_reports:
reports_by_user_org[(report.user_id, report.org)].append(report)
for (user_id, org), reports in reports_by_user_org.iteritems():
cls.update_or_create(user_id, org, reports)
ReportLog.update_or_create(learner=timezone.now())
@classmethod
def update_or_create(cls, user_id, org, learner_course_reports):
enrollments = len(learner_course_reports)
total_score = 0
nb_completed_courses = 0
posts = 0
badges_earned = 0
badges_possible = 0
finished = 0
failed = 0
in_progress = 0
not_started = 0
for report in learner_course_reports:
posts += report.posts
earned, possible = get_badges(report.badges)
badges_earned += earned
badges_possible += possible
if report.status == CourseStatus.finished:
finished += 1
elif report.status == CourseStatus.failed:
failed += 1
elif report.status == CourseStatus.in_progress:
in_progress += 1
elif report.status == CourseStatus.not_started:
not_started += 1
if report.status in [CourseStatus.finished, CourseStatus.failed]:
total_score += report.current_score
nb_completed_courses += 1
average_final_score = 0
if nb_completed_courses > 0:
average_final_score = total_score / nb_completed_courses
total_time_spent = (LearnerVisitsDailyReport.objects.filter(user_id=user_id).aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
user_id=user_id,
org=org,
created__gte=today_start,
created__lt=today_end,
defaults={'enrollments': enrollments,
'average_final_score': average_final_score,
'total_time_spent': total_time_spent,
'posts': posts,
'badges': format_badges(badges_earned, badges_possible),
'finished': finished,
'failed': failed,
'in_progress': in_progress,
'not_started': not_started})
class CourseDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta(object):
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('course_id', 'created')
index_together = ['course_id', 'created']
course_id = CourseKeyField(max_length=255, db_index=True, null=False)
enrollments = models.PositiveIntegerField(default=0)
average_final_score = models.PositiveSmallIntegerField(default=0)
posts = models.IntegerField(default=0)
finished = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.finished])
failed = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.failed])
in_progress = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.in_progress])
not_started = models.SmallIntegerField(default=0, verbose_name=CourseStatus.verbose_names[CourseStatus.not_started])
average_complete_time = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_course = defaultdict(list)
for report in learner_course_reports:
reports_by_course[report.course_id].append(report)
for course_id, reports in reports_by_course.iteritems():
cls.update_or_create(course_id, reports)
ReportLog.update_or_create(course=timezone.now())
@classmethod
def update_or_create(cls, course_id, learner_course_reports):
total_score = 0
nb_completed_courses = 0
posts = 0
finished = 0
failed = 0
in_progress = 0
not_started = 0
total_time = 0
for report in learner_course_reports:
posts += report.posts
status = report.status
if status == CourseStatus.finished:
finished += 1
elif status == CourseStatus.in_progress:
in_progress += 1
elif status == CourseStatus.not_started:
not_started += 1
elif status == CourseStatus.failed:
failed += 1
if status in [CourseStatus.finished, CourseStatus.failed]:
total_score += report.current_score
total_time += report.total_time_spent
nb_completed_courses += 1
average_final_score = average_complete_time = 0
if nb_completed_courses > 0:
average_final_score = total_score / nb_completed_courses
average_complete_time = total_time / nb_completed_courses
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
course_id=course_id,
created__gte=today_start,
created__lt=today_end,
defaults={'enrollments': len(learner_course_reports),
'average_final_score': average_final_score,
'posts': posts,
'finished': finished,
'failed': failed,
'in_progress': in_progress,
'not_started': not_started,
'average_complete_time': average_complete_time})
class MicrositeDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta:
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'org')
index_together = ['created', 'org']
org = models.CharField(max_length=255, null=False)
users = models.PositiveIntegerField(default=0)
courses = models.PositiveIntegerField(default=0)
finished = models.PositiveIntegerField(default=0)
unique_visitors = models.PositiveIntegerField(default=0)
average_time_spent = models.PositiveIntegerField(default=0)
total_time_spent_on_mobile = models.PositiveIntegerField(default=0)
total_time_spent_on_desktop = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_org = defaultdict(list)
for report in learner_course_reports:
reports_by_org[report.org].append(report)
for org, reports in reports_by_org.iteritems():
cls.update_or_create(org, reports)
ReportLog.update_or_create(microsite=timezone.now())
@classmethod
def update_or_create(cls, org, learner_course_reports):
users = set()
courses = set()
finished = 0
total_time_spent = 0
time_count = 0
for report in learner_course_reports:
users.add(report.user_id)
courses.add(unicode(report.course_id))
if report.status == CourseStatus.finished:
finished += 1
total_time_spent += report.total_time_spent
time_count += 1
average_time_spent = total_time_spent / time_count if time_count else 0
total_time_spent_on_mobile = 0
total_time_spent_on_desktop = 0
for course_id in courses:
course_key = CourseKey.from_string(course_id)
total_time_spent_on_mobile += (LearnerVisitsDailyReport.objects.filter(
course_id=course_key, device="mobile").aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
total_time_spent_on_desktop += (LearnerVisitsDailyReport.objects.filter(
course_id=course_key, device="desktop").aggregate(
Sum('time_spent')).get('time_spent__sum') or 0)
unique_visitors = (LearnerVisitsDailyReport.filter_by_day(org=org).aggregate(
Count('user_id', distinct=True)).get('user_id__count') or 0)
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
org=org,
created__gte=today_start,
created__lt=today_end,
defaults={'users': len(users),
'courses': len(courses),
'finished': finished,
'unique_visitors': unique_visitors,
'average_time_spent': average_time_spent,
'total_time_spent_on_mobile': total_time_spent_on_mobile,
'total_time_spent_on_desktop': total_time_spent_on_desktop})
@classmethod
def update_or_create_unique_visitors(cls, day, org):
day_start, day_end = get_day_limits(day)
unique_visitors = (LearnerVisitsDailyReport.filter_by_day(day=day, org=org).aggregate(
Count('user_id', distinct=True)).get('user_id__count') or 0)
print "day=%s, org=%s" % (day, org)
cls.objects.update_or_create(
org=org,
created__gte=day_start,
created__lt=day_end,
defaults={'created': day, 'unique_visitors': unique_visitors})
@classmethod
def get_unique_visitors_csv_data(cls, org):
unique_visitors_csv_data = ""
unique_visitors = cls.objects.filter(org=org).values('created', 'unique_visitors').order_by('created')
for uv in unique_visitors:
unique_visitors_csv_data += "%s,%d\\n" % (uv['created'].strftime('%d-%m-%Y'), uv['unique_visitors'])
return unique_visitors_csv_data
class CountryDailyReport(UnicodeMixin, ReportMixin, TimeStampedModel):
class Meta:
app_label = 'lt_analytics'
get_latest_by = "created"
unique_together = ('created', 'org', 'country')
index_together = ['created', 'org']
org = models.CharField(max_length=255, null=False)
country = CountryField(null=True)
nb_users = models.PositiveIntegerField(default=0)
@classmethod
def generate_today_reports(cls, learner_course_reports):
reports_by_org = defaultdict(list)
for report in learner_course_reports:
reports_by_org[report.org].append(report)
for org, reports in reports_by_org.iteritems():
cls.update_or_create(org, reports)
ReportLog.update_or_create(country=timezone.now())
@classmethod
def update_or_create(cls, org, learner_course_reports):
users_by_country = defaultdict(int)
users = []
for report in learner_course_reports:
if report.user.id not in users:
users.append(report.user.id)
users_by_country[report.user.profile.country] += 1
for country, nb_users in users_by_country.iteritems():
today_start, today_end = get_day_limits()
cls.objects.update_or_create(
org=org,
country=country,
created__gte=today_start,
created__lt=today_end,
defaults={'nb_users': nb_users})
| agpl-3.0 | 6,291,872,158,171,765,000 | 41.08189 | 120 | 0.594791 | false | 4.149379 | false | false | false |
EinsteinCarrey/Shoppinglist | classes/user.py | 1 | 2895 | import global_functions
from classes.shopping_list import ShoppingList
class User(object):
def __init__(self, username, password, firstname, lastname):
"""
Attributes:
username (str): A unique name to identify user.
password (str): A secret phrase to authenticate a user.
firstname (str): The user's first name.
lastname (str): The user's last name.
:arg
username (str): A unique name to identify user.
password (str): A secret phrase to authenticate a user.
firstname (str): The user's first name.
lastname (str): The user's last name.
"""
self.username = username
self.password_hash = global_functions.sha1_hash(password)
self.firstname = firstname
self.lastname = lastname
self.shopping_lists = dict()
self.id = global_functions.get_random_id()
def create_shopping_list(self, title=None):
""" Creates a new ShoppingList object
:arg
title: The caption of the shoppinglist
:returns
str: id of the new shoppinglist that has been created
"""
if title is None or len(title) < 1:
return "shopping list must have a title"
if not isinstance(title, str):
return "shopping list title must be a string"
for shoppinglist in self.shopping_lists.values():
if title.lower() == shoppinglist.title.lower():
return "Shopping list `" + title + "` already exists"
new_list = ShoppingList(title)
# add the new shopping list object to the list of shoppinglists
# owned by current user
self.shopping_lists[str(new_list.id)] = new_list
def remove_shopping_list(self, shopping_list_id):
""" Deletes the selected shoppinglist object from memory
:arg
shopping_list_id (str): The caption of the shoppinglist
:returns
True if the shoppinglist has been deleted successfully,
otherwise return
error message
"""
if not isinstance(shopping_list_id, int):
return "Shopping list id should be an Integer"
for shopping_list in self.shopping_lists:
if str(shopping_list.id) == str(shopping_list_id):
del shopping_list
return True
return "Shopping list does not exist"
def list_shopping_lists(self):
"""
:returns
list: Returns a list of all the shoppinglists
owned by current user
"""
list_names = []
for shoppinglist in self.shopping_lists.values():
list_names.append(shoppinglist.title)
return list_names
| mit | -8,741,231,990,077,959,000 | 31.166667 | 72 | 0.572366 | false | 4.602544 | false | false | false |
operepo/ope | libs/gluon/packages/dal/pydal/adapters/sapdb.py | 11 | 3967 | # -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from .base import BaseAdapter
class SAPDBAdapter(BaseAdapter):
drivers = ('sapdb',)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'json': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,table):
return (self.QUOTE_TEMPLATE + '_id_Seq') % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
def connector(user=user, password=password, database=db,
host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
| mit | -6,862,974,411,384,298,000 | 39.896907 | 197 | 0.546005 | false | 3.452567 | false | false | false |
alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras | data_helpers.py | 1 | 4232 | import numpy as np
import re
import itertools
from collections import Counter
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open("./data/rt-polarity.pos").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open("./data/rt-polarity.neg").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def pad_sentences(sentences, padding_word="<PAD/>"):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def load_data():
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
# Load and preprocess data
sentences, labels = load_data_and_labels()
sentences_padded = pad_sentences(sentences)
vocabulary, vocabulary_inv = build_vocab(sentences_padded)
x, y = build_input_data(sentences_padded, labels, vocabulary)
return [x, y, vocabulary, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| mit | -5,858,940,968,319,273,000 | 34.864407 | 91 | 0.639178 | false | 3.550336 | false | false | false |
TianpeiLuke/GParML | gd.py | 2 | 4805 |
import numpy as np
from numpy.linalg.linalg import LinAlgError
import sys
import traceback
import gd_local_MapReduce as local_MapReduce
def print_out(len_maxiters, display, fnow, current_grad, beta, iteration):
if display:
print '\r',
print '{0:>0{mi}g} {1:> 12e} {2:> 12e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
_fail_count = 0
_allowed_failures = 100
def safe_f_and_grad_f(f_and_gradf, x, iteration=0, step_size=0, *optargs):
'''
Calls f and gradf and returns inf for f in case of warnings / assertion errors and so on.
The returned gradf in that case is 0, which screws up SCG's momentum, so a re-start should be done
'''
global _fail_count, _allowed_failures
try:
[f, gradf] = f_and_gradf(x, iteration, step_size, *optargs)
_fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError, Warning, AssertionError) as e:
if _fail_count >= _allowed_failures:
print 'Too many errors...'
raise e
_fail_count += 1
print
_,_,tb = sys.exc_info()
tbInfo = traceback.extract_tb(tb)
filename,line,func,text = tbInfo[-1]
print ('An error occurred on line ' + str(line) + ' in filename ' + filename)
print 'Increasing failed count (' + str(_fail_count) + ') and returning nlml inf'
f = np.inf
gradf = np.ones(x.shape)
return f, gradf
def GD(f_and_gradf, x, tmp_folder, fixed_embeddings=False, optargs=(), maxiters=500, max_f_eval=500, display=True, xtol=None, ftol=None, gtol=None):
"""
Optimisation through Gradient Descent
f: the objective function
gradf : the gradient function (should return a 1D np.ndarray)
x : the initial condition
Returns
x the optimal value for x
flog : a list of all the objective values
function_eval number of fn evaluations
status: string describing convergence status
"""
if xtol is None:
xtol = 1e-16
if ftol is None:
ftol = 1e-6
if gtol is None:
gtol = 1e-6
len_maxiters = len(str(maxiters))
step_size = 0.01
mom_size = 0.0
f_gradf = safe_f_and_grad_f(f_and_gradf, x, iteration=0, step_size=0, *optargs)
fnow = f_gradf[0]
flog = [fnow]
gradnow = f_gradf[1]
direction = - gradnow
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads(tmp_folder)
iteration = 0
while iteration < maxiters:
xprop = x + step_size * direction
f_gradf = safe_f_and_grad_f(f_and_gradf, xprop, iteration=iteration, step_size=step_size, *optargs)
fproposed = f_gradf[0]
if (np.abs(fnow - fproposed) < ftol):
break
print 'converged due to ftol'
if (np.abs(step_size) < xtol):
break
print 'converged due to xtol'
if (fproposed <= fnow):
fnow = fproposed
flog += [fnow]
gradnow = f_gradf[1]
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_grad_now(tmp_folder)
x = xprop
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_X(tmp_folder, step_size)
direction = - (gradnow + mom_size * step_size * direction)
#direction = - (gradnow - mom_size * step_size * direction)
if not fixed_embeddings:
local_MapReduce.embeddings_set_grads_update_d(tmp_folder, mom_size * step_size)
step_size *= 2.0
iteration += 1
max_abs_gradnow = np.max(np.abs(gradnow))
if not fixed_embeddings:
max_abs_gradnow = max(max_abs_gradnow, local_MapReduce.embeddings_get_grads_max_gradnow(tmp_folder))
if (max_abs_gradnow < gtol):
break
print 'converged due to grad'
else:
step_size /= 2.0
if display:
print ' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters)
current_grad = np.sum(np.abs(gradnow))
if not fixed_embeddings:
current_grad += local_MapReduce.embeddings_get_grads_current_grad(tmp_folder)
print_out(len_maxiters, display, fnow, current_grad, step_size, iteration)
if display:
current_grad = np.sum(np.abs(gradnow))
if not fixed_embeddings:
current_grad += local_MapReduce.embeddings_get_grads_current_grad(tmp_folder)
print_out(len_maxiters, display, fnow, current_grad, step_size, iteration)
print ""
return x, flog, None, 'converged... NOT'
| bsd-3-clause | -4,125,403,642,333,400,600 | 36.834646 | 221 | 0.597294 | false | 3.398161 | false | false | false |
avislab/sensorstest | MPU-6050/pyplay_accel.py | 1 | 2633 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import random, pygame, sys, thread, time
from pygame.locals import *
from mpu6050 import *
y_offset = 0
rotation = 0
def mpu6050_read():
global y_offset
global rotation
mpu = MPU6050()
mpu.initialize()
# Set calibration data
mpu.gyro_offs = {'x': -178, 'y': 259, 'z': -104}
mpu.accel_offs = {'y': -354, 'x': 389, 'z': -1482}
accel_data = mpu.get_accel()
x_rotation = mpu.get_x_rotation(accel_data)
y_rotation = mpu.get_y_rotation(accel_data)
while True:
accel_data = mpu.get_accel()
x_rotation = mpu.get_x_rotation(accel_data)
y_rotation = mpu.get_y_rotation(accel_data)
y_offset = y_rotation * 2
rotation = x_rotation
time.sleep(0.001)
FPS = 100
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
CELLSIZE = 20
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# R G B
WHITE = (255, 255, 255)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
BGCOLOR = BLACK
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('MPU-6050')
thread.start_new_thread(mpu6050_read,())
while True:
runGame()
def runGame():
global y_offset
global rotation
titleFont = pygame.font.Font('freesansbold.ttf', 50)
titleSurf1 = titleFont.render('MPU-6050', True, WHITE)
while True: # main game loop
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
terminate()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
terminate()
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
rotatedSurf1 = pygame.transform.rotate(titleSurf1, rotation)
rotatedRect1 = rotatedSurf1.get_rect()
rotatedRect1.center = (WINDOWWIDTH/2, WINDOWHEIGHT/2 + y_offset)
DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def drawGrid():
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
if __name__ == '__main__':
main()
| gpl-2.0 | 422,374,709,986,391,360 | 22.096491 | 74 | 0.621724 | false | 2.858849 | false | false | false |
lokik/sfepy | examples/homogenization/nonlinear_homogenization.py | 1 | 8053 | # -*- coding: utf-8 -*-
import numpy as nm
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
import sfepy.discrete.fem.periodic as per
from sfepy.base.base import Struct
from sfepy.terms.terms_hyperelastic_ul import\
HyperElasticULFamilyData, NeoHookeanULTerm, BulkPenaltyULTerm
from sfepy.terms.extmods.terms import sym2nonsym
from sfepy.discrete.functions import ConstantFunctionByRegion
from sfepy import data_dir
import sfepy.linalg as la
def recovery_hook(pb, ncoors, region, ts,
naming_scheme='step_iel', recovery_file_tag=''):
from sfepy.base.ioutils import get_print_info
from sfepy.homogenization.recovery import get_output_suffix
import os.path as op
for ii, icell in enumerate(region.cells):
out = {}
pb.set_mesh_coors(ncoors[ii], update_fields=True,
clear_all=False, actual=True)
stress = pb.evaluate('ev_integrate_mat.3.Y(mat_he.S, u)',
mode='el_avg')
out['cauchy_stress'] = Struct(name='output_data',
mode='cell',
data=stress,
dofs=None)
strain = pb.evaluate('ev_integrate_mat.3.Y(mat_he.E, u)',
mode='el_avg')
out['green_strain'] = Struct(name='output_data',
mode='cell',
data=strain,
dofs=None)
out['displacement'] = Struct(name='output_data',
mode='vertex',
data=ncoors[ii] - pb.get_mesh_coors(),
dofs=None)
output_dir = pb.conf.options.get('output_dir', '.')
format = get_print_info(pb.domain.mesh.n_el, fill='0')[1]
suffix = get_output_suffix(icell, ts, naming_scheme, format,
pb.output_format)
micro_name = pb.get_output_name(extra='recovered_'
+ recovery_file_tag + suffix)
filename = op.join(output_dir, op.basename(micro_name))
fpv = pb.conf.options.get('file_per_var', False)
pb.save_state(filename, out=out, file_per_var=fpv)
def def_mat(ts, mode, coors, term, pb):
if not (mode == 'qp'):
return
if not hasattr(pb, 'family_data'):
pb.family_data = HyperElasticULFamilyData()
update_var = pb.conf.options.mesh_update_variable
if pb.equations is None:
state_u = pb.create_variables([update_var])[update_var]
else:
state_u = pb.get_variables()[update_var]
if state_u.data[0] is None:
state_u.init_data()
state_u.set_data(
pb.domain.get_mesh_coors(actual=True) - pb.domain.get_mesh_coors())
state_u.field.clear_mappings()
family_data = pb.family_data(state_u, term.region,
term.integral, term.integration)
if len(state_u.field.mappings0) == 0:
state_u.field.save_mappings()
n_el, n_qp, dim, n_en, n_c = state_u.get_data_shape(term.integral,
term.integration,
term.region.name)
conf_mat = pb.conf.materials
solid_key = [key for key in conf_mat.keys() if 'solid' in key][0]
solid_mat = conf_mat[solid_key].values
mat = {}
for mat_key in ['mu', 'K']:
if isinstance(solid_mat[mat_key], dict):
mat_fun = ConstantFunctionByRegion({mat_key: solid_mat[mat_key]})
mat[mat_key] = mat_fun.function(ts=ts, coors=coors, mode='qp',
term=term, problem=pb)[mat_key].reshape((n_el, n_qp, 1, 1))
else:
mat[mat_key] = nm.ones((n_el, n_qp, 1, 1)) * solid_mat[mat_key]
shape = family_data.green_strain.shape[:2]
sym = family_data.green_strain.shape[-2]
dim2 = dim**2
fargs = [family_data.get(name)
for name in NeoHookeanULTerm.family_data_names]
stress = nm.empty(shape + (sym, 1), dtype=nm.float64)
tanmod = nm.empty(shape + (sym, sym), dtype=nm.float64)
NeoHookeanULTerm.stress_function(stress, mat['mu'], *fargs)
NeoHookeanULTerm.tan_mod_function(tanmod, mat['mu'], *fargs)
fargs = [family_data.get(name)
for name in BulkPenaltyULTerm.family_data_names]
stress_p = nm.empty(shape + (sym, 1), dtype=nm.float64)
tanmod_p = nm.empty(shape + (sym, sym), dtype=nm.float64)
BulkPenaltyULTerm.stress_function(stress_p, mat['K'], *fargs)
BulkPenaltyULTerm.tan_mod_function(tanmod_p, mat['K'], *fargs)
stress_ns = nm.zeros(shape + (dim2, dim2), dtype=nm.float64)
tanmod_ns = nm.zeros(shape + (dim2, dim2), dtype=nm.float64)
sym2nonsym(stress_ns, stress + stress_p)
sym2nonsym(tanmod_ns, tanmod + tanmod_p)
npts = nm.prod(shape)
J = family_data.det_f
mtx_f = family_data.mtx_f.reshape((npts, dim, dim))
out = {
'E': 0.5 * (la.dot_sequences(mtx_f, mtx_f, 'ATB') - nm.eye(dim)),
'A': ((tanmod_ns + stress_ns) / J).reshape((npts, dim2, dim2)),
'S': ((stress + stress_p) / J).reshape((npts, sym, 1)),
}
return out
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
dim = 2
options = {
'coefs': 'coefs',
'requirements': 'requirements',
'volume': {'expression': 'd_volume.5.Y(u)'},
'output_dir': './output',
'coefs_filename': 'coefs_hyper_homog',
'multiprocessing': True,
'chunks_per_worker': 2,
'mesh_update_variable': 'u',
'mesh_update_corrector': 'corrs_rs',
'recovery_hook': 'recovery_hook',
'store_micro_idxs': [49, 81],
}
fields = {
'displacement': ('real', 'vector', 'Y', 1),
}
functions = {
'match_x_plane': (per.match_x_plane,),
'match_y_plane': (per.match_y_plane,),
'mat_fce': (lambda ts, coors, mode=None, term=None, problem=None, **kwargs:
def_mat(ts, mode, coors, term, problem),),
}
materials = {
'mat_he': 'mat_fce',
'solid': ({'K': 1000,
'mu': {'Ym': 100, 'Yc': 10},
},),
}
variables = {
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
'Pi': ('parameter field', 'displacement', 'u'),
'Pi1u': ('parameter field', 'displacement', '(set-to-None)'),
'Pi2u': ('parameter field', 'displacement', '(set-to-None)'),
}
regions = {
'Y': 'all',
'Ym': 'cells of group 1',
'Yc': 'cells of group 2',
}
regions.update(define_box_regions(dim, (0., 0.), (1., 1.)))
ebcs = {
'fixed_u': ('Corners', {'u.all': 0.0}),
}
epbcs = {
'periodic_ux': (['Left', 'Right'], {'u.all': 'u.all'}, 'match_x_plane'),
'periodic_uy': (['Bottom', 'Top'], {'u.all': 'u.all'}, 'match_y_plane'),
}
coefs = {
'A': {
'requires': ['pis', 'corrs_rs'],
'expression': 'dw_nonsym_elastic.3.Y(mat_he.A, Pi1u, Pi2u)',
'set_variables': [('Pi1u', ('pis', 'corrs_rs'), 'u'),
('Pi2u', ('pis', 'corrs_rs'), 'u')],
'class': cb.CoefNonSymNonSym,
},
'S': {
'expression': 'ev_integrate_mat.3.Y(mat_he.S, u)',
'set_variables': [],
'class': cb.CoefOne,
}
}
requirements = {
'pis': {
'variables': ['u'],
'class': cb.ShapeDimDim,
},
'corrs_rs': {
'requires': ['pis'],
'ebcs': ['fixed_u'],
'epbcs': ['periodic_ux', 'periodic_uy'],
'equations': {
'balance_of_forces':
"""dw_nonsym_elastic.3.Y(mat_he.A, v, u)
= - dw_nonsym_elastic.3.Y(mat_he.A, v, Pi)"""
},
'set_variables': [('Pi', 'pis', 'u')],
'class': cb.CorrDimDim,
'save_name': 'corrs_hyper_homog',
'dump_variables': ['u'],
},
}
solvers = {
'ls': ('ls.scipy_direct', {}),
'newton': ('nls.newton', {
'i_max': 1,
'eps_a': 1e-4,
'problem': 'nonlinear',
}),
}
| bsd-3-clause | -5,512,007,324,990,879,000 | 32.836134 | 79 | 0.539178 | false | 3.090177 | false | false | false |
mikehulluk/morphforge | src/morphforgecontrib/simulation/synapse_templates/exponential_form/exp2synnmda/neuron.py | 1 | 8557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from .core import PostSynapticMech_Exp2SynNMDA_Base
from morphforge.simulation.neuron.simulationdatacontainers.mhocfile import MHocFileData
from morphforge.simulation.neuron.simulationdatacontainers.mhocfile import MHOCSections
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforge.simulation.neuron.biophysics.modfile import ModFile
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordable
from morphforge.simulation.neuron.hocmodbuilders.hocmodutils import HocModUtils
from Cheetah.Template import Template
from morphforge.simulation.neuron.networks import NEURONPostSynapticMechTemplateForwardToTemplate
from morphforge.stdimports import MFRandom
from morphforge.units import parse_unit_str
from morphforge.stdimports import StandardTags
from morphforgecontrib.simulation.synapse_templates.exponential_form.neuron_records import Neuron_PSM_Std_CurrentRecord
from morphforgecontrib.simulation.synapse_templates.exponential_form.neuron_records import Neuron_PSM_Std_ConductanceRecord
from morphforge.simulation.neuron.networks import NEURONSynapse
class Neuron_PSM_Std_NMDAVoltageDependanceRecord(NEURONRecordable):
def __init__(self, neuron_syn_post, **kwargs):
super(Neuron_PSM_Std_NMDAVoltageDependanceRecord, self).__init__(**kwargs)
self.neuron_syn_post = neuron_syn_post
self._description="MyDesc!!"
def get_unit(self):
return parse_unit_str('')
def get_std_tags(self):
return [StandardTags.NMDAVoltageDependancy]
def build_hoc(self, hocfile_obj):
assert len(self.neuron_syn_post.synapses) == 1
obj_name_hoc = hocfile_obj[MHocFileData.Synapses][self.neuron_syn_post]["synnamepost"]
HocModUtils.create_record_from_object(hocfile_obj=hocfile_obj, vecname="RecVec%s" % self.name, objname=obj_name_hoc, objvar="voltage_dependancy", recordobj=self)
def build_mod(self, modfile_set):
pass
class Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord(NEURONRecordable):
def __init__(self, neuron_syn_post, **kwargs):
super(Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord, self).__init__(**kwargs)
self.neuron_syn_post = neuron_syn_post
def get_unit(self):
return parse_unit_str('uS')
def get_std_tags(self):
return [StandardTags.NMDAConductanceWithVDep]
def build_hoc(self, hocfile_obj):
assert len(self.neuron_syn_post.synapses) == 1
obj_name_hoc = hocfile_obj[MHocFileData.Synapses][self.neuron_syn_post]["synnamepost"]
HocModUtils.create_record_from_object(hocfile_obj=hocfile_obj, vecname="RecVec%s" % self.name, objname=obj_name_hoc, objvar="gtot", recordobj=self)
def build_mod(self, modfile_set):
pass
exp2HOCTmpl = """
// Post-Synapse [$synnamepost]
objref $synnamepost
${cellname}.internalsections[$sectionindex] $synnamepost = new Exp2SynNMDAMorphforge ($sectionpos)
${synnamepost}.tau1 = $tau_open.rescale("ms").magnitude
${synnamepost}.tau2 = $tau_close.rescale("ms").magnitude
${synnamepost}.e = $e_rev.rescale("mV").magnitude
${synnamepost}.popening = $pOpening
${synnamepost}.is_vdep_on = $is_vdep_on
${synnamepost}.peak_conductance = $peak_conductance.rescale('uS').magnitude
${synnamepost}.is_conductance_limited_on = $is_conductance_limited_on
${synnamepost}.conductance_limit = $conductance_limit
${synnamepost}.gamma = $gamma.rescale('per_mV').magnitude
${synnamepost}.eta = $eta.rescale('per_mM').magnitude
${synnamepost}.mg2conc = $mg2conc.rescale('mM').magnitude
"""
class NEURONPostSynapticMechTemplate_Exp2SynNMDA(PostSynapticMech_Exp2SynNMDA_Base, NEURONPostSynapticMechTemplateForwardToTemplate):
def __init__(self, **kwargs):
super(NEURONPostSynapticMechTemplate_Exp2SynNMDA, self).__init__(**kwargs)
def build_hoc_for_instance(self, instance, hocfile_obj):
params = instance.get_resolved_parameters()
tau_open = params['tau_open']
tau_close = params['tau_close']
e_rev = params['e_rev']
popening = params['popening']
vdep = params['vdep']
limit_conductance = params['limit_conductance']
peak_conductance = params['peak_conductance']
gamma = params['gamma']
eta = params['eta']
mg2conc = params['mg2conc']
cell = instance.cell_location.cell
section = instance.cell_location.morphlocation.section
syn_name_post = instance.name + 'Post'
hoc_data_cell = hocfile_obj[MHocFileData.Cells][cell]
data = {
'synnamepost': syn_name_post,
'cell': cell,
'cellname': hoc_data_cell['cell_name'],
'sectionindex': hoc_data_cell['section_indexer'][section],
'sectionpos': instance.cell_location.morphlocation.sectionpos,
'tau_open': tau_open,
'tau_close': tau_close,
'e_rev': e_rev,
'pOpening': popening,
'random_seed': MFRandom.get_seed(),
'is_vdep_on': (1.0 if vdep else 0.0),
'is_conductance_limited_on': (1.0 if limit_conductance not in [None,False] else 0.0),
'conductance_limit': (limit_conductance if limit_conductance not in [None,False] else -1.0),
'peak_conductance': peak_conductance,
'gamma':gamma,
'eta':eta,
'mg2conc':mg2conc,
}
hocfile_obj.add_to_section(MHOCSections.InitSynapsesChemPost,
Template(exp2HOCTmpl, data).respond())
assert not instance in hocfile_obj[MHocFileData.Synapses]
hocfile_obj[MHocFileData.Synapses][instance] = data
def template_build_mod(self, modfile_set):
import postsynaptic_mechanisms_exp2syn_nmda_modfile_new
modfile = ModFile(modtxt=postsynaptic_mechanisms_exp2syn_nmda_modfile_new.get_exp2_syn_nmda_modfile(), name='UnusedParameterXXXExpSyn2', strict_modlunit=True)
modfile_set.append(modfile)
def get_record_for_instance(self, instance, what, **kwargs):
if what == NEURONSynapse.Recordables.SynapticCurrent:
return Neuron_PSM_Std_CurrentRecord(neuron_syn_post=instance, **kwargs)
if what == NEURONSynapse.Recordables.SynapticConductance:
return Neuron_PSM_Std_ConductanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAVoltageDependancy:
return Neuron_PSM_Std_NMDAVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAVoltageDependancy:
return Neuron_PSM_Std_NMDAVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
if what == StandardTags.NMDAConductanceWithVDep:
return Neuron_PSM_Std_NMDAConductanceWithVoltageDependanceRecord(neuron_syn_post=instance, **kwargs)
assert False
NEURONEnvironment.synapse_psm_template_type.register_plugin(PostSynapticMech_Exp2SynNMDA_Base, NEURONPostSynapticMechTemplate_Exp2SynNMDA)
| bsd-2-clause | -4,668,474,014,525,667,000 | 42.658163 | 169 | 0.707374 | false | 3.450403 | false | false | false |
SequencingDOTcom/oAuth2-demo | python-django/oauth2demo/oauth/core/oauthclient.py | 4 | 4128 | import urllib
import sched
import time
from threading import Thread
from token import Token
from ..utils.http import do_basic_secure_post
from ..exceptions.exceptions import BasicAuthenticationFailedException
class DefaultSequencingOAuth2Client(object):
# Attribute for value of redirect url
ATTR_REDIRECT_URL = "redirect_uri"
# Attribute for value of response type
ATTR_RESPONSE_TYPE = "response_type"
# Attribute for value state
ATTR_STATE = "state"
# Attribute for value client id
ATTR_CLIENT_ID = "client_id"
# Attribute for value scope
ATTR_SCOPE = "scope"
# Attribute for value code
ATTR_CODE = "code"
# Attribute for value refresh token
ATTR_REFRESH_TOKEN = "refresh_token"
# Attribute for access token
ATTR_ACCESS_TOKEN = "access_token"
# Attribute for value grant type
ATTR_GRANT_TYPE = "grant_type"
# Attribute for value expires in
ATTR_EXPIRES_IN = "expires_in"
def __init__(self, auth_parameters):
self.auth_parameters = auth_parameters
self.token = None
self._token_refresher = None
def http_redirect_parameters(self):
attributes = {
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri,
self.ATTR_RESPONSE_TYPE: self.auth_parameters.response_type,
self.ATTR_STATE: self.auth_parameters.state,
self.ATTR_CLIENT_ID: self.auth_parameters.client_id,
self.ATTR_SCOPE: self.auth_parameters.scope
}
return attributes
def login_redirect_url(self):
params = urllib.urlencode(self.http_redirect_parameters())
return '%s?%s' % (self.auth_parameters.oauth_authorization_uri, params)
def authorize(self, response_code, response_state):
if response_state != self.auth_parameters.state:
raise ValueError("Invalid state parameter")
uri = self.auth_parameters.oauth_token_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type,
self.ATTR_CODE: response_code,
self.ATTR_REDIRECT_URL: self.auth_parameters.redirect_uri
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Failure authentication.")
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = result[self.ATTR_REFRESH_TOKEN]
timelife = int(result[self.ATTR_EXPIRES_IN])
self.token = Token(access_token, refresh_token, timelife)
self._token_refresher = self.__TokenRefresher(self, timelife - 60)
self._token_refresher.start()
return self.token
def is_authorized(self):
return (self.token is not None) and (self.token.lifetime != 0)
def _refresh_token(self):
uri = self.auth_parameters.oauth_token_refresh_uri
params = {
self.ATTR_GRANT_TYPE: self.auth_parameters.grant_type_refresh_token,
self.ATTR_REFRESH_TOKEN: self.token.refresh_token
}
result = do_basic_secure_post(uri, self.auth_parameters, params)
if result is None:
raise BasicAuthenticationFailedException("Authentication against backend failed. " +
"Server replied with: " + result)
access_token = result[self.ATTR_ACCESS_TOKEN]
refresh_token = self.token.refresh_token
timelife = result[self.ATTR_EXPIRES_IN]
self.token = Token(access_token, refresh_token, timelife)
class __TokenRefresher(Thread):
def __init__(self, outer, frequency):
Thread.__init__(self)
self.outer = outer
self.frequency = frequency
self.scheduler = sched.scheduler(time.time, time.sleep)
def run(self):
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
self.scheduler.run()
def __run_refresh_token(self):
self.outer._refresh_token()
self.scheduler.enter(self.frequency, 1, self.__run_refresh_token, ())
| mit | 4,327,431,929,401,514,500 | 33.4 | 96 | 0.641957 | false | 4.066995 | false | false | false |
nijel/weblate | weblate/checks/tests/test_models.py | 1 | 5050 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Tests for unitdata models."""
from django.urls import reverse
from weblate.checks.models import Check
from weblate.checks.tasks import batch_update_checks
from weblate.trans.models import Unit
from weblate.trans.tasks import auto_translate
from weblate.trans.tests.test_views import FixtureTestCase, ViewTestCase
class CheckModelTestCase(FixtureTestCase):
def create_check(self, name):
return Check.objects.create(unit=self.get_unit(), check=name)
def test_check(self):
check = self.create_check("same")
self.assertEqual(
str(check.get_description()), "Source and translation are identical"
)
self.assertTrue(check.get_doc_url().endswith("user/checks.html#check-same"))
self.assertEqual(str(check), "Unchanged translation")
def test_check_nonexisting(self):
check = self.create_check("-invalid-")
self.assertEqual(check.get_description(), "-invalid-")
self.assertEqual(check.get_doc_url(), "")
def test_check_render(self):
unit = self.get_unit()
unit.source_unit.extra_flags = "max-size:1:1"
unit.source_unit.save()
check = self.create_check("max-size")
url = reverse(
"render-check", kwargs={"check_id": check.check, "unit_id": unit.id}
)
self.assertEqual(
str(check.get_description()),
'<a href="{0}?pos=0" class="thumbnail">'
'<img class="img-responsive" src="{0}?pos=0" /></a>'.format(url),
)
self.assert_png(self.client.get(url))
class BatchUpdateTest(ViewTestCase):
"""Test for complex manipulating translation."""
def setUp(self):
super().setUp()
self.translation = self.get_translation()
def do_base(self):
# Single unit should have no consistency check
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
# Add linked project
other = self.create_link_existing()
# Now the inconsistent check should be there
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
return other
def test_autotranslate(self):
other = self.do_base()
auto_translate(
None,
other.translation_set.get(language_code="cs").pk,
"translate",
"todo",
"others",
self.component.pk,
[],
99,
)
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, set())
def test_noop(self):
other = self.do_base()
# The batch update should not remove it
batch_update_checks(self.component.id, ["inconsistent"])
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, {"inconsistent"})
def test_toggle(self):
other = self.do_base()
one_unit = self.get_unit()
other_unit = Unit.objects.get(
translation__language_code=one_unit.translation.language_code,
translation__component=other,
id_hash=one_unit.id_hash,
)
translated = one_unit.target
combinations = (
(translated, "", {"inconsistent"}),
("", translated, {"inconsistent"}),
("", "", set()),
(translated, translated, set()),
("", translated, {"inconsistent"}),
)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(self.component.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected)
for update_one, update_other, expected in combinations:
Unit.objects.filter(pk=one_unit.pk).update(target=update_one)
Unit.objects.filter(pk=other_unit.pk).update(target=update_other)
batch_update_checks(other.id, ["inconsistent"])
unit = self.get_unit()
self.assertEqual(unit.all_checks_names, expected)
| gpl-3.0 | 7,613,588,041,203,271,000 | 35.309353 | 84 | 0.623935 | false | 3.900309 | true | false | false |
sloria/sepal | sepal/datasets/tasks.py | 1 | 8545 | import os
from django.conf import settings
import yaafelib as yf
import wave
import contextlib
from celery import task
from sepal.datasets.models import *
from sepal.datasets.utils import filter_by_key, find_dict_by_item
@task()
def handle_uploaded_file(f):
'''Saves an uploaded data source to MEDIA_ROOT/data_sources
'''
with open(os.path.join(settings.MEDIA_ROOT, 'data_sources', f.name), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return destination
@task()
def extract_features(dataset_id, instance_id, audiofile_path):
dataset = Dataset.objects.get(pk=dataset_id)
inst = Instance.objects.get(pk=instance_id)
n_frames, sample_rate, duration = 0, 0, 0
# Calculate the sample rate and duration
with contextlib.closing(wave.open(audiofile_path, 'r')) as audiofile:
n_frames = audiofile.getnframes()
sample_rate = audiofile.getframerate()
duration = n_frames / float(sample_rate)
# Format - {'Display name': 'name: Definition'}
FEATURES = [
{'display_name': 'Spectral Shape Characteristics',
'yaafe_name': 'sss',
'yaafe_definition': 'SpectralShapeStatistics',
'subfeatures': ['Spectral centroid', 'Spectral spread', 'Spectral kurtosis', 'Spectral skewness']
},
{'display_name': 'Temporal Shape Characteristics',
'yaafe_name': 'tss',
'yaafe_definition': 'TemporalShapeStatistics',
'subfeatures': ['Temporal centroid', 'Temporal spread', 'Temporal kurtosis', 'Temporal skewness']
},
{'display_name': 'ZCR',
'yaafe_name': 'zcr',
'yaafe_definition': 'ZCR',
'unit': 'Hz'
},
{'display_name': 'Energy',
'yaafe_name': 'energy',
'yaafe_definition': 'Energy',
},
{'display_name': 'Loudness',
'yaafe_name': 'loudness',
'yaafe_definition': 'Loudness',
},
{'display_name': 'Spectral rolloff',
'yaafe_name': 'spectral_rolloff',
'yaafe_definition': 'SpectralRolloff',
},
{'display_name': 'Perceptual sharpness',
'yaafe_name': 'perceptual_sharpness',
'yaafe_definition': 'PerceptualSharpness',
},
{'display_name': 'Perceptual spread',
'yaafe_name': 'perceptual_spread',
'yaafe_definition': 'PerceptualSpread',
},
{'display_name': 'Duration',
'unit': 's',
},
{'display_name': 'Sample rate',
'unit': 'Hz',
},
{'display_name': 'Spectral decrease',
'yaafe_name': 'spectral_decrease',
'yaafe_definition': 'SpectralDecrease',
},
{'display_name': "Spectral flatness",
'yaafe_name': 'spectral_flatness',
'yaafe_definition': 'SpectralFlatness',
},
# {'display_name': "Spectral flux",
# 'yaafe_name': 'spectral_flux',
# 'yaafe_definition': 'SpectralFlux',
# },
{'display_name': "Spectral slope",
'yaafe_name': 'spectral_slope',
'yaafe_definition': 'SpectralSlope',
},
# {'display_name': "Spectral variation",
# 'yaafe_name': 'spectral_variation',
# 'yaafe_definition': 'SpectralVariation',
# }
]
# Add features to extract
feature_plan = yf.FeaturePlan(sample_rate=sample_rate, resample=False)
for feature in FEATURES:
if 'yaafe_definition' in feature:
# YAAFE feature plans take definitions of the form: 'zcr: ZCR'
full_definition = feature['yaafe_name'] + ': ' + feature['yaafe_definition']
# Add the feature to the feature plan to be extracted
feature_plan.addFeature(full_definition)
# Configure an Engine
engine = yf.Engine()
engine.load(feature_plan.getDataFlow())
# Extract features
afp = yf.AudioFileProcessor()
afp.processFile(engine, audiofile_path)
# outputs dict format - {'Spectral centroid': [[2.33], [4.34],...[2.55]]}
outputs = {}
# Read and store output arrays to outputs dict
for feature in FEATURES:
if 'yaafe_definition' in feature: # Exclude duration and sample rate
output_name = feature['yaafe_name']
# If the feature has subfeatures, e.g. Spec shape stats
if 'subfeatures' in feature:
full_output = engine.readOutput(output_name)
for i, subfeature_display_name in enumerate(feature['subfeatures']):
outputs[subfeature_display_name] = full_output[:, i]
# If the feature has only 1 dimension(1 X T array)
else:
display_name = feature['display_name']
a = engine.readOutput(output_name) # 2D array
# Transpose data to make it a 1D array
outputs[display_name] = a.transpose()[0]
# Create YAAFE feature objects
feature_obj_list = []
for display_name in outputs.keys():
feature = find_dict_by_item(('display_name', display_name), FEATURES)
f, created = Feature.objects.get_or_create(
name=display_name.lower(),
display_name=display_name
)
if feature and ('unit' in feature):
f.unit = feature['unit']
f.save()
feature_obj_list.append(f)
# Create Sample rate and Duration objects
rate_obj, created = Feature.objects.get_or_create(name='sample rate')
if not rate_obj.unit:
rate_obj.unit = 'Hz'
rate_obj.save()
feature_obj_list.append(rate_obj)
duration_obj, created = Feature.objects.get_or_create(name='duration')
if not duration_obj.unit:
duration_obj.unit = 's'
duration_obj.save()
feature_obj_list.append(duration_obj)
# Associate features with instance
# for feature in feature_obj_list:
# inst.features.add(feature)
# If dataset has labels
if dataset.labels():
# NOTE: This assumes there's only one label name per dataset.
# Just indexes the first label name
label_name = dataset.labels()[0]
else:
# attach a placeholder LabelName called 'variable'
filtered = LabelName.objects.filter(name='variable')
# make sure that 'get' doesn't return an error if there are more than 1
# LabelName called 'variable'
if len(filtered) <= 1:
label_name, c = LabelName.objects.get_or_create(name='variable')
else:
label_name = filtered[0]
# Add a placeholder label value called "none" to instance
# This is necessary in order for plotting to work
filtered = LabelValue.objects.filter(value="none", label_name=label_name)
if len(filtered) <= 1:
no_label, c = LabelValue.objects.get_or_create(value="none",
label_name=label_name)
else:
no_label = filtered[0]
inst.label_values.add(no_label)
inst.save()
# Save output data and associate it with inst
for display_name, output in outputs.iteritems():
if output.size > 0: # Avoid empty data
for i in range(output[0].size):
output_mean = output[i].mean()
FeatureValue.objects.create(value=output_mean,
feature=Feature.objects.get(name__iexact=display_name.lower()),
instance=inst)
# Save sample_rate and duration data
FeatureValue.objects.create(value=sample_rate,
feature=Feature.objects.get(name='sample rate'),
instance=inst)
FeatureValue.objects.create(value=duration,
feature=Feature.objects.get(name='duration'),
instance=inst)
| bsd-3-clause | 8,606,148,952,394,087,000 | 36.977778 | 118 | 0.542071 | false | 4.150073 | false | false | false |
Blackfin/rclone | make_manual.py | 2 | 2008 | #!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"overview.md",
"drive.md",
"s3.md",
"swift.md",
"dropbox.md",
"googlecloudstorage.md",
"amazonclouddrive.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def main():
check_docs(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
out.write(read_doc(doc))
print "Written '%s'" % outfile
if __name__ == "__main__":
main()
| mit | 329,188,811,106,438,460 | 23.487805 | 84 | 0.581175 | false | 3.162205 | false | false | false |
goodcrypto/goodcrypto-libs | reinhardt/utils.py | 1 | 3336 | '''
Utility classes and functions.
Copyright 2009-2016 GoodCrypto
Last modified: 2016-05-18
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
import os
from datetime import datetime
from traceback import format_exc
from django.conf import settings
from django.utils.encoding import force_text, smart_text, DjangoUnicodeDecodeError
from syr.log import get_log
log = get_log()
def to_unicode(s):
''' Converts string to unicode. If can't convert, returns u''.
See
django.utils.encoding.smart_str() and smart_unicode() for an better implementation.
http://www.saltycrane.com/blog/2008/11/python-unicodeencodeerror-ascii-codec-cant-encode-character/
http://wiki.python.org/moin/UnicodeEncodeError'''
try:
unicode_s = force_text(s)
str(unicode_s)
except Exception as e:
try:
unicode_s = force_text(s, encoding=syr.prefered_encoding)
except:
try:
# \u0000 through \u00FF, inclusive
unicode_s = force_text(s, encoding='iso-8859-1')
except Exception as e:
log('Unable to convert %r to unicode: %r' % (s, e))
unicode_s = force_text('')
return unicode_s
def is_secure_connection(request):
''' Check if connection is secure. '''
secure = False
try:
if 'HTTP_X_SCHEME' in request.META:
secure = 'https' == request.META['HTTP_X_SCHEME']
elif 'wsgi.url_scheme' in request.META:
secure = 'https' == request.META['wsgi.url_scheme']
except:
log(format_exc())
return secure
def django_error_page_response(request, error=None):
''' Return a response with Django's error page.
If settings.DEBUG is True, Django automatically shows a useful
error page for exceptions in views. But sometimes an exception
isn't propogated out of the view, such as when the exception
occurs in a separate thread. This shows the Django error page
for any exception.
If error is not present or is None, returns an error page for the
last exception.
Example:
error = None
...
# in separate thread
error = sys.exc_info()
...
# in parent thread
show_django_error_page(error)
'''
from django.views.debug import technical_500_response
# error should be sys.exc_info() from an earlier except block
if not error:
error = sys.exc_info()
exc_type, exc_value, tb = error
response = technical_500_response(request, exc_type, exc_value, tb)
def is_django_error_page(html):
''' Returns True if this html contains a Django error page,
else returns False.'''
django_error_1 = "You're seeing this error because you have"
django_error_2 = 'display a standard 500 page'
try:
smart_html = smart_text(html)
except DjangoUnicodeDecodeError:
# definitely not a django html error page
result = False
else:
result = (django_error_1 in smart_html) and (django_error_2 in smart_html)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | 7,627,728,768,087,755,000 | 27.033613 | 111 | 0.623501 | false | 4.063337 | false | false | false |
activityworkshop/Murmeli | murmeli/postservice.py | 1 | 14009 | '''Post service, dealing with outgoing post'''
import threading
import time
import socks
from murmeli.system import System, Component
from murmeli.signals import Timer
from murmeli.message import StatusNotifyMessage, Message, RelayMessage
from murmeli import dbutils
from murmeli import imageutils
from murmeli import guinotification
class DefaultMessageTransport:
'''Class which the outgoing postman usually uses to send messages.
May be substituted by another object for use in unit tests.'''
@staticmethod
def send_message(msg_bytes, whoto):
'''Try to send the given message over the default mechanism'''
try:
sock = socks.socksocket()
sock.setproxy(socks.PROXY_TYPE_SOCKS4, "localhost", 11109)
sock.connect((whoto + ".onion", 11009))
num_sent = sock.send(msg_bytes)
sock.close()
if num_sent != len(msg_bytes):
print("Num bytes sent:", num_sent, "but message has length:", len(msg_bytes))
else:
return PostService.RC_MESSAGE_SENT
except Exception as exc:
print("Socks send threw something:", exc)
return PostService.RC_MESSAGE_FAILED
class PostService(Component):
'''System component for managing the outgoing post'''
# Return codes
RC_MESSAGE_SENT = 1
RC_MESSAGE_IGNORED = 2
RC_MESSAGE_FAILED = 3
RC_MESSAGE_INVALID = 4
def __init__(self, parent, transport=None):
Component.__init__(self, parent, System.COMPNAME_POSTSERVICE)
self.work_lock = threading.Lock()
self.flush_timer = None
self.need_to_flush = True
self.step_counter = -1
self.running = False
self.flush_interval = 30 # By default, flush every 30 seconds
self.transport = transport or DefaultMessageTransport()
self.should_broadcast = True
def set_timer_interval(self, timer_secs):
'''Set the interval to a non-default value (especially for tests)'''
self.flush_interval = timer_secs
self.step_counter = 0
def checked_start(self):
'''Start the separate threads'''
self.running = True
if self.flush_interval:
self.flush_timer = Timer(self.flush_interval, self._flush)
return True
def stop(self):
'''Stop this component'''
self.running = False
if self.flush_timer:
self.flush_timer.stop()
def request_broadcast(self):
'''Request a broadcast in a separate thread'''
self.step_counter = -1
self.request_flush()
def request_flush(self):
'''Request a flush in a separate thread'''
self.need_to_flush = True
if self.flush_interval == 0:
Timer(1, self._flush, repeated=False)
def _flush(self):
'''Flush the outbox'''
self.step_counter = (self.step_counter + 1) % 10
if not self.step_counter:
self._broadcast()
if not self.need_to_flush:
return
if self.work_lock.acquire(timeout=2):
print("Flush")
self.need_to_flush = False
self.call_component(System.COMPNAME_GUI, "notify_gui",
notify_type=guinotification.NOTIFY_OUTBOX_FLUSHING)
# Look in the outbox for messages
database = self.get_component(System.COMPNAME_DATABASE)
messages_found = 0
messages_sent = 0
failed_recpts = set()
# Loop twice over all messages, firstly dealing with priority messages
for flush_iter in range(2):
print("Flush iter %d" % flush_iter)
for msg in database.get_outbox():
if not msg:
continue # message already deleted
if not self.running:
break # flushing stopped from outside
if flush_iter == 0:
messages_found += 1
recipient = msg.get('recipient')
if not self.call_component(System.COMPNAME_CONTACTS,
"is_online", tor_id=recipient):
continue # not a priority for the first iter
msg_sent, should_delete = self.deal_with_outbox_msg(msg, failed_recpts)
if msg_sent:
messages_sent += 1
self.call_component(System.COMPNAME_GUI, "notify_gui",
notify_type=guinotification.NOTIFY_MSG_SENT)
if should_delete \
and not database.delete_from_outbox(index=msg.get("_id")):
print("Failed to delete from outbox:", msg)
# Wait inbetween sending to avoid overloading the network
time.sleep(3)
print("From %d messages, I managed to send %d" % (messages_found, messages_sent))
# We tried to send a message to these recipients but failed - set them to be offline
for recpt in failed_recpts:
self.call_component(System.COMPNAME_CONTACTS, "gone_offline",
tor_id=recpt)
print("Finished flush, releasing lock")
self.work_lock.release()
def deal_with_outbox_msg(self, msg, failed_recpts):
'''Deal with a message in the outbox, trying to send if possible'''
# send_timestamp = msg.get('timestamp', None) # not used yet
# TODO: if timestamp is too old, either delete the message or move to inbox
# Some messages have a single recipient, others only have a recipientList
recipient = msg.get('recipient')
if recipient:
return self.deal_with_single_recipient(msg, recipient, failed_recpts)
if msg.get('recipientList'):
return self.deal_with_relayed_message(msg, failed_recpts)
print("msg in outbox had neither recipient nor recipientList?", msg)
msg_sent = False
should_delete = False
return (msg_sent, should_delete)
def deal_with_single_recipient(self, msg, recipient, failed_recpts):
'''Try to send the given message to the specified recipient'''
print("Dealing with single recipient:", recipient)
msg_bytes = None
msg_sent = False
should_delete = False
send_result = self.RC_MESSAGE_IGNORED
database = self.get_component(System.COMPNAME_DATABASE)
# Check recipient status, if it's deleted then delete message also
if dbutils.get_status(database, recipient) in [None, 'deleted']:
send_result = self.RC_MESSAGE_IGNORED
elif recipient in failed_recpts:
print("Not even bothering to try to send to '%s', previously failed" % recipient)
send_result = self.RC_MESSAGE_FAILED
else:
msg_bytes = imageutils.string_to_bytes(msg['message'])
send_result = self._send_message(msg_bytes, msg.get('encType'), recipient)
msg_sent = (send_result == self.RC_MESSAGE_SENT)
if msg_sent:
# The recipient and I are both online
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=recipient)
own_tor_id = dbutils.get_own_tor_id(database)
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=own_tor_id)
self.call_component(System.COMPNAME_LOGGING, "log",
logstr="Sent '%s' to '%s'" % (msg.get('msgType'), recipient))
if send_result in [self.RC_MESSAGE_IGNORED, self.RC_MESSAGE_SENT, self.RC_MESSAGE_INVALID]:
# either recipient was blocked or message was sent, either way delete it
should_delete = True
else:
failed_recpts.add(recipient)
if not msg.get('queue'):
print("Failed to send a message but it shouldn't be queued, deleting it")
should_delete = True
elif msg.get('relays'):
print("Failed to send but I can try to relay it")
signed_blob = self._get_blob_to_relay(msg, database)
# Loop over each relay in the list and try to send to each one
failed_relays = set()
for relay in msg.get('relays'):
if relay not in failed_recpts and \
self._send_message(signed_blob, Message.ENCTYPE_RELAY,
relay) == self.RC_MESSAGE_SENT:
print("Sent message to relay '%s'" % relay)
self.call_component(System.COMPNAME_LOGGING, "log",
logstr="Relayed '%s'" % msg.get('msgType'))
else:
# Send failed, so add this relay to the list of failed ones
failed_relays.add(relay)
failed_recpts.add(relay)
# here we update the list even if it hasn't changed
database.update_outbox_message(index=msg["_id"],
props={"relays":list(failed_relays)})
return (msg_sent, should_delete)
def _get_blob_to_relay(self, msg, database):
'''Get a signed blob so the message can be relayed'''
if msg.get('relayMessage'):
return bytes(msg.get('relayMessage'))
print("No signed blob in message, need to create one")
msg_bytes = imageutils.string_to_bytes(msg['message'])
signed_blob = RelayMessage.wrap_outgoing_message(self._sign_message(msg_bytes))
database.update_outbox_message(index=msg["_id"],
props={"relayMessage":list(signed_blob)})
return signed_blob
def _sign_message(self, msg_bytes):
'''Sign the given bytes with our own key id'''
database = self.get_component(System.COMPNAME_DATABASE)
own_key_id = dbutils.get_own_key_id(database)
crypto = self.get_component(System.COMPNAME_CRYPTO)
if not own_key_id or not crypto:
print("Failed to sign message using own key '%s'" % own_key_id)
return None
return crypto.sign_data(msg_bytes, own_key_id)
def deal_with_relayed_message(self, msg, failed_recpts):
'''Try to send the given relay message to a recipient list'''
msg_sent = False
should_delete = False
msg_bytes = imageutils.string_to_bytes(msg['message'])
failed_recpts_for_message = set()
database = self.get_component(System.COMPNAME_DATABASE)
own_tor_id = dbutils.get_own_tor_id(database)
for recpt in msg.get('recipientList'):
if recpt in failed_recpts:
failed_recpts_for_message.add(recpt)
else:
send_result = self._send_message(msg_bytes, msg.get('encType'), recpt)
if send_result == self.RC_MESSAGE_SENT:
msg_sent = True
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=recpt)
self.call_component(System.COMPNAME_CONTACTS, "come_online", tor_id=own_tor_id)
elif send_result == self.RC_MESSAGE_FAILED:
# Couldn't send to this relay recipient
failed_recpts_for_message.add(recpt)
failed_recpts.add(recpt)
if failed_recpts_for_message:
# update msg with the new recipientList
relays = list(failed_recpts_for_message)
database.update_outbox_message(index=msg["_id"],
props={"recipientList":relays})
print("Failed to send a relay to:", failed_recpts_for_message)
else:
print("Relayed everything, now deleting relay message")
should_delete = True
return (msg_sent, should_delete)
def _send_message(self, msg_bytes, enctype, whoto):
'''Send the given message to the specified recipient'''
if not msg_bytes:
return self.RC_MESSAGE_INVALID
print("Send_message (%d bytes) to '%s'" % (len(msg_bytes), whoto))
if not whoto or not isinstance(whoto, str) or len(whoto) < 16:
print("whoto no good, returning invalid")
return self.RC_MESSAGE_INVALID
database = self.get_component(System.COMPNAME_DATABASE)
profile = database.get_profile(torid=whoto)
status = profile.get('status') if profile else None
if enctype == Message.ENCTYPE_NONE:
status = 'allowed'
if not status or status in ['deleted', 'blocked']:
# recipient not found or unsuitable status
print("status no good, returning ignored")
return self.RC_MESSAGE_IGNORED
# Use configured transport object to send
if self.transport:
print("passing on to self.transport")
return self.transport.send_message(msg_bytes, whoto)
print("no transport available, so failed")
return self.RC_MESSAGE_FAILED
def _broadcast(self):
'''Broadcast our online status by adding to the outbox'''
database = self.get_component(System.COMPNAME_DATABASE)
if not database or not self.should_broadcast:
return
if self.work_lock.acquire(timeout=2):
print("Broadcast")
profile_list = database.get_profiles_with_status(["trusted", "robot"])
if profile_list:
crypto = self.get_component(System.COMPNAME_CRYPTO)
msg = StatusNotifyMessage()
msg.recipients = [c['torid'] for c in profile_list]
dbutils.add_message_to_outbox(msg, crypto, database)
self.work_lock.release()
self.need_to_flush = True
| gpl-2.0 | 6,563,628,373,632,077,000 | 45.696667 | 99 | 0.579984 | false | 4.254176 | false | false | false |
rspavel/spack | var/spack/repos/builtin/packages/flink/package.py | 5 | 1283 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Flink(Package):
"""
Apache Flink is an open source stream processing framework with
powerful stream- and batch-processing capabilities.
"""
homepage = "https://flink.apache.org/"
url = "http://archive.apache.org/dist/flink/flink-1.9.1/flink-1.9.1-bin-scala_2.11.tgz"
version('1.9.1', sha256='f69de344cd593e92f8261e19ae8a47b3910e9a70a7cd1ccfb1ecd1ff000b93ea')
version('1.9.0', sha256='a2245f68309e94ed54d86a680232a518aed9c5ea030bcc0b298bc8f27165eeb7')
version('1.8.3', sha256='1ba90e99f70ad7e2583d48d1404d1c09e327e8fb8fa716b1823e427464cc8dc0')
version('1.8.2', sha256='1a315f4f1fab9d651702d177b1741439ac98e6d06e9e13f9d410b34441eeda1c')
version('1.8.1', sha256='4fc0d0f163174ec43e160fdf21a91674979b978793e60361e2fce5dddba4ddfa')
depends_on('java@8:', type='run')
def url_for_version(self, version):
url = "http://archive.apache.org/dist/flink/flink-{0}/flink-{0}-bin-scala_2.11.tgz"
return url.format(version)
def install(self, spec, prefix):
install_tree('.', prefix)
| lgpl-2.1 | 5,248,184,728,426,844,000 | 40.387097 | 96 | 0.734217 | false | 2.607724 | false | false | false |
SiLab-Bonn/fe65_p2 | fe65p2/scans/timewalk_scan.py | 1 | 17087 | from fe65p2.scan_base import ScanBase
import fe65p2.plotting as plotting
import fe65p2.analysis as analysis
import time
import numpy as np
import bitarray
import tables as tb
from bokeh.charts import output_file, save, show
from bokeh.models.layouts import Column, Row
import yaml
from basil.dut import Dut
import logging
import os
import itertools
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
row = range(1,65,1)
all_pixels = []
for (r,c) in itertools.product(row,row):
all_pixels.append((r,c))
local_configuration = {
"mask_steps": 1,
"repeat_command": 101,
"scan_range": [0.005, 0.2, 0.005],#[0.05, 0.55, 0.01], #[0.005, 0.30, 0.01], # [0.01, 0.2, 0.01],# [0.01, 0.20, 0.01], #[0.1, 0.4, 0.05],
"columns": [True] * 2 + [False] * 14,
"mask_filename": '/media/topcoup/TB/Prmp36_vthA255_vthB0_PreCmp110/col1/output_data/20170119_163931_tu_threshold_scan.h5',
"pix_list": [(2,6),(3,3)],
#DAC parameters
"PrmpVbpDac": 36,
"vthin1Dac": 255,
"vthin2Dac": 0,
"vffDac" : 24,
"PrmpVbnFolDac" : 51,
"vbnLccDac" : 1,
"compVbnDac":25,
"preCompVbnDac" : 110
}
class TimewalkScan(ScanBase):
scan_id = "timewalk_scan"
def scan(self, mask_steps=4, repeat_command=101, columns=[True] * 16, pix_list=[], scan_range=[], mask_filename='', **kwargs):
'''Scan loop
This scan is to measure time walk. The charge injection can be driven by the GPAC or an external device.
In the latter case the device is Agilent 33250a connected through serial port.
The time walk and TOT are measured by a TDC module in the FPGA.
The output is an .h5 file (data) and an .html file with plots.
To perform a proper timewalk scan a mask_filename i.e. the output of the tuned threshold scan has to be provided.
'''
def load_vthin1Dac(mask):
if os.path.exists(mask):
in_file = tb.open_file(mask, 'r')
dac_status = yaml.load(in_file.root.meta_data.attrs.dac_status)
vthrs1 = dac_status['vthin1Dac']
logging.info("Loaded vth1 from noise scan: %s", str(vthrs1))
return int(vthrs1)
else: return 29
vth1 = load_vthin1Dac(mask_filename)
inj_factor = 1.0
INJ_LO = 0.0
try:
dut = Dut(ScanBase.get_basil_dir(self) + '/examples/lab_devices/agilent33250a_pyserial.yaml')
dut.init()
logging.info('Connected to ' + str(dut['Pulser'].get_info()))
except RuntimeError:
INJ_LO = 0.2
inj_factor = 2.0
logging.info('External injector not connected. Switch to internal one')
self.dut['INJ_LO'].set_voltage(INJ_LO, unit='V')
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
#self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut['global_conf']['vffDac'] = int(kwargs.get('vffDac', 42))
self.dut['global_conf']['PrmpVbnFolDac'] = int(kwargs.get('PrmpVbnFolDac', 51))
self.dut['global_conf']['vbnLccDac'] = int(kwargs.get('vbnLccDac',1))
self.dut['global_conf']['compVbnDac'] = int(kwargs.get('compVbnDac',25))
self.dut.write_global()
self.dut['control']['RESET'] = 0b01
self.dut['control']['DISABLE_LD'] = 0
self.dut['control']['PIX_D_CONF'] = 0
self.dut['control'].write()
self.dut['control']['CLK_OUT_GATE'] = 1
self.dut['control']['CLK_BX_GATE'] = 1
self.dut['control'].write()
time.sleep(0.1)
self.dut['control']['RESET'] = 0b11
self.dut['control'].write()
self.dut['global_conf']['OneSr'] = 1
self.dut['global_conf']['TestHit'] = 0
self.dut['global_conf']['SignLd'] = 0
self.dut['global_conf']['InjEnLd'] = 0
self.dut['global_conf']['TDacLd'] = 0
self.dut['global_conf']['PixConfLd'] = 0
self.dut.write_global()
self.dut['global_conf']['ColEn'][:] = bitarray.bitarray([True] * 16) # (columns)
self.dut['global_conf']['ColSrEn'][:] = bitarray.bitarray([True] * 16)
self.dut.write_global()
self.dut['pixel_conf'].setall(False)
self.dut.write_pixel()
self.dut['global_conf']['InjEnLd'] = 1
self.dut.write_global()
self.dut['global_conf']['InjEnLd'] = 0
mask_en = np.full([64, 64], False, dtype=np.bool)
mask_tdac = np.full([64, 64], 16, dtype=np.uint8)
for inx, col in enumerate(columns):
if col:
mask_en[inx * 4:(inx + 1) * 4, :] = True
if mask_filename:
logging.info('Using pixel mask from file: %s', mask_filename)
with tb.open_file(mask_filename, 'r') as in_file_h5:
mask_tdac = in_file_h5.root.scan_results.tdac_mask[:]
mask_en = in_file_h5.root.scan_results.en_mask[:]
self.dut.write_en_mask(mask_en)
self.dut.write_tune_mask(mask_tdac)
self.dut['global_conf']['OneSr'] = 1
self.dut.write_global()
self.dut['inj'].set_delay(50000) # 1 zero more
self.dut['inj'].set_width(1000)
self.dut['inj'].set_repeat(repeat_command)
self.dut['inj'].set_en(False)
self.dut['trigger'].set_delay(400-4)
self.dut['trigger'].set_width(16)
self.dut['trigger'].set_repeat(1)
self.dut['trigger'].set_en(False)
logging.debug('Enable TDC')
self.dut['tdc']['RESET'] = True
self.dut['tdc']['EN_TRIGGER_DIST'] = True
self.dut['tdc']['ENABLE_EXTERN'] = False
self.dut['tdc']['EN_ARMING'] = False
self.dut['tdc']['EN_INVERT_TRIGGER'] = False
self.dut['tdc']['EN_INVERT_TDC'] = False
self.dut['tdc']['EN_WRITE_TIMESTAMP'] = True
scan_range = np.arange(scan_range[0], scan_range[1], scan_range[2]) / inj_factor
scan_range = np.append(scan_range, 0.3 / inj_factor)
scan_range = np.append(scan_range, 0.5 / inj_factor)
#scan_range = np.append(scan_range, 0.7 / inj_factor)
self.pixel_list = pix_list
p_counter = 0
for pix in pix_list:
mask_en = np.full([64, 64], False, dtype=np.bool)
mask_en[pix[0], pix[1]] = True
self.dut.write_en_mask(mask_en)
self.dut.write_inj_mask(mask_en)
self.inj_charge = []
for idx, k in enumerate(scan_range):
dut['Pulser'].set_voltage(INJ_LO, float(INJ_LO + k), unit='V')
self.dut['INJ_HI'].set_voltage(float(INJ_LO + k), unit='V')
self.inj_charge.append(float(k) * 1000.0 * analysis.cap_fac())
time.sleep(0.5)
with self.readout(scan_param_id=idx + p_counter * len(scan_range)):
logging.info('Scan Parameter: %f (%d of %d)', k, idx + 1, len(scan_range))
self.dut['tdc']['ENABLE'] = True
self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut.write_global()
time.sleep(0.1)
self.dut['global_conf']['vthin1Dac'] = int(vth1)
self.dut['global_conf']['vthin2Dac'] = int(kwargs.get('vthin2Dac', 0))
self.dut['global_conf']['PrmpVbpDac'] = int(kwargs.get('PrmpVbpDac', 36))
self.dut['global_conf']['preCompVbnDac'] = int(kwargs.get('preCompVbnDac', 110))
self.dut.write_global()
time.sleep(0.1)
#self.dut['global_conf']['PrmpVbnFolDac'] = kwargs['PrmpVbnFolDac']
#self.dut['global_conf']['vbnLccDac'] = kwargs['vbnLccDac']
#self.dut['global_conf']['compVbnDac'] = kwargs['compVbnDac']
#self.dut['global_conf']['preCompVbnDac'] = kwargs['preCompVbnDac']
#self.dut.write_global()
#time.sleep(0.1)
#self.dut.write_global()
#time.sleep(0.1)
self.dut['inj'].start()
while not self.dut['inj'].is_done():
#time.sleep(0.05)
pass
while not self.dut['trigger'].is_done():
#time.sleep(0.05)
pass
self.dut['tdc'].ENABLE = 0
p_counter += 1
def tdc_table(self, scanrange):
h5_filename = self.output_filename + '.h5'
with tb.open_file(h5_filename, 'r+') as in_file_h5:
raw_data = in_file_h5.root.raw_data[:]
meta_data = in_file_h5.root.meta_data[:]
if (meta_data.shape[0] == 0):
print 'empty output'
return
repeat_command = in_file_h5.root.meta_data.attrs.kwargs
a = repeat_command.rfind("repeat_command: ")
repeat_command = repeat_command[a + len("repeat_command: "):a + len("repeat_command: ") + 7]
a = repeat_command.rfind("\n")
repeat_command = int(repeat_command[0:a])
param, index = np.unique(meta_data['scan_param_id'], return_index=True)
pxl_list = []
for p in param:
pix_no = int(p) / int(len(self.inj_charge))
pxl_list.append(self.pixel_list[pix_no][0] * 64 + self.pixel_list[pix_no][1])
index = index[1:]
index = np.append(index, meta_data.shape[0])
index = index - 1
stops = meta_data['index_stop'][index]
split = np.split(raw_data, stops)
avg_tdc = []
avg_tdc_err = []
avg_del = []
avg_del_err = []
hits = []
deletelist = ()
for i in range(len(split[:-1])): # loop on pulses
rwa_data_param = split[i]
tdc_data = rwa_data_param & 0xFFF # take last 12 bit
tdc_delay = (rwa_data_param & 0x0FF00000) >> 20
counter = 0.0
TOT_sum = 0.0
DEL_sum = 0.0
if (tdc_data.shape[0] == 0 or tdc_data.shape[0] == 1):
counter = 1.0
for j in range(tdc_data.shape[0]): # loop on repeats
if (j > 0):
counter += 1
TOT_sum += tdc_data[j]
DEL_sum += tdc_delay[j]
if (counter > 1):
hits.append(counter)
avg_tdc.append((float(TOT_sum) / float(counter)) * 1.5625)
avg_tdc_err.append(1.5625 / (np.sqrt(12.0 * counter)))
avg_del.append((float(DEL_sum) / float(counter)) * 1.5625)
avg_del_err.append(1.5625 / (np.sqrt(12.0 * counter)))
else:
deletelist = np.append(deletelist, i)
pxl_list = np.delete(pxl_list, deletelist)
newpix = [0]
pix_no_old = pxl_list[0]
runparam = 0
for p in pxl_list:
if p != pix_no_old:
newpix = np.append(newpix, runparam)
pix_no_old = p
runparam = runparam + 1
addedvalues = 0
for pixels in range(len(newpix)):
missingvalues = 0
if newpix[pixels] == newpix[-1]:
missingvalues = scanrange - abs(newpix[pixels] + addedvalues - len(hits))
else:
if abs(newpix[pixels] - newpix[pixels + 1]) < scanrange:
missingvalues = scanrange - abs(newpix[pixels] - newpix[pixels + 1])
if missingvalues != 0:
hits = np.insert(hits, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_tdc = np.insert(avg_tdc, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_tdc_err = np.insert(avg_tdc_err, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_del = np.insert(avg_del, newpix[pixels] + addedvalues, np.zeros(missingvalues))
avg_del_err = np.insert(avg_del_err, newpix[pixels] + addedvalues, np.zeros(missingvalues))
pxl_list = np.insert(pxl_list, newpix[pixels] + addedvalues,
(pxl_list[newpix[pixels] + addedvalues]) * np.ones(missingvalues))
addedvalues = addedvalues + missingvalues
injections = []
for pixels in range(int(len(pxl_list) / len(self.inj_charge))):
for i in range(len(self.inj_charge)):
injections = np.append(injections, self.inj_charge[i])
pix, stop = np.unique(pxl_list, return_index=True)
stop = np.sort(stop)
stop = list(stop)
stop.append(len(avg_tdc))
repeat_command_dic={}
repeat_command_dic['repeat_command']=repeat_command
avg_tab = np.rec.fromarrays([injections, pxl_list, hits, avg_tdc, avg_tdc_err, avg_del, avg_del_err],
dtype=[('charge', float), ('pixel_no', int), ('hits', int),
('tot_ns', float), ('err_tot_ns', float), ('delay_ns', float),
('err_delay_ns', float)])
tdc_table=in_file_h5.create_table(in_file_h5.root, 'tdc_data', avg_tab, filters=self.filter_tables)
tdc_table.attrs.repeat_command = repeat_command_dic
thresholds = ()
expfit0 = ()
expfit1 = ()
expfit2 = ()
expfit3 = ()
pixels = ()
for i in range(len(stop) - 1):
s1 = int(stop[i])
s2 = int(stop[i + 1])
A, mu, sigma = analysis.fit_scurve(hits[s1:s2], injections[s1:s2],repeat_command)
if np.max(hits[s1:s2]) > (repeat_command + 200): # or mu > 3000:
thresholds = np.append(thresholds, 0)
expfit0 = np.append(expfit0, 0)
expfit1 = np.append(expfit1, 0)
expfit2 = np.append(expfit2, 0)
expfit3 = np.append(expfit3, 0)
pixels = np.append(pixels, pxl_list[s1])
continue
for values in range(s1, s2):
if injections[values] >= 5 / 4 * mu:
s1 = values
break
numberer = 0
hitvaluesold = hits[-1]
for hitvalues in hits[s1:s2]:
if abs(hitvalues - hitvaluesold) <= 1 and hitvalues != 0:
break
numberer = numberer + 1
hitvaluesold = hitvalues
if numberer == len(avg_del[s1:s2]):
numberer = 0
expfit = analysis.fit_exp(injections[s1:s2], avg_del[s1:s2], mu, abs(numberer))
startexp = -expfit[0] * np.log((25.0 + np.min(avg_del[s1:s2]) - expfit[3]) / expfit[2]) - expfit[1]
if np.isnan(startexp) or startexp >= 2000:
startexp = 0
thresholds = np.append(thresholds, startexp)
expfit0 = np.append(expfit0, expfit[0])
expfit1 = np.append(expfit1, expfit[1])
expfit2 = np.append(expfit2, expfit[2])
expfit3 = np.append(expfit3, expfit[3])
pixels = np.append(pixels, pxl_list[s1])
thresh = np.rec.fromarrays([pixels, thresholds, expfit0, expfit1, expfit2, expfit3],
dtype=[('pixel_no', int), ('td_threshold', float),
('expfit0', float), ('expfit1', float), ('expfit2', float),
('expfit3', float)])
in_file_h5.create_table(in_file_h5.root, 'td_threshold', thresh, filters=self.filter_tables)
p1, p2, single_scan = plotting.plot_timewalk(h5_filename)
output_file(self.output_filename + '.html', title=self.run_name)
status = plotting.plot_status(h5_filename)
save(Row(Column(p1, p2, status), single_scan))
#show(p1)
if __name__ == "__main__":
Timescan = TimewalkScan()
Timescan.start(**local_configuration)
scanrange = local_configuration['scan_range']
Timescan.tdc_table(len(np.arange(scanrange[0], scanrange[1], scanrange[2]))+2)
| gpl-2.0 | -1,280,557,465,826,803,200 | 44.687166 | 141 | 0.52022 | false | 3.353022 | false | false | false |
goerz/clusterjob | docs/source/conf.py | 1 | 11667 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# clusterjob documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 27 17:35:53 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# We import the clusterjob just to ensure that it installed in the same
# environment as sphinx, so that autdoc works
import clusterjob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'ipyparallel': ('http://ipyparallel.readthedocs.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'clusterjob'
copyright = '2015, Michael Goerz'
author = 'Michael Goerz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# autodoc settings
autoclass_content = 'both'
autodoc_member_order = 'bysource'
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
# -- Extensions to the Napoleon GoogleDocstring class ---------------------
from sphinx.ext.napoleon.docstring import GoogleDocstring
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'clusterjobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'clusterjob.tex', 'clusterjob Documentation',
'Michael Goerz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clusterjob', 'clusterjob Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'clusterjob', 'clusterjob Documentation',
author, 'clusterjob', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -8,506,664,327,711,867,000 | 32.144886 | 80 | 0.715351 | false | 3.65393 | true | false | false |
vholland/schemaorg | scripts/inputconvert.py | 1 | 3381 | #!/usr/bin/env python2.7
import unittest
import os
from os import path, getenv
from os.path import expanduser
import logging # https://docs.python.org/2/library/logging.html#logging-levels
import glob
import argparse
import sys
import csv
sys.path.append( os.getcwd() )
sys.path.insert( 1, 'lib' ) #Pickup libs, rdflib etc., from shipped lib directory
sys.path.insert( 1, 'sdopythonapp' ) #Pickup sdopythonapp functionality
sys.path.insert( 1, 'sdopythonapp/lib' ) #Pickup sdopythonapp libs, rdflib etc., from shipped lib directory
sys.path.insert( 1, 'sdopythonapp/site' ) #Pickup sdopythonapp from shipped site
# Ensure that the google.appengine.* packages are available
# in tests as well as all bundled third-party packages.
sdk_path = getenv('APP_ENGINE', expanduser("~") + '/google-cloud-sdk/platform/google_appengine/')
sys.path.insert(0, sdk_path) # add AppEngine SDK to path
import rdflib
from rdflib.term import URIRef, Literal
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.plugins.sparql import prepareQuery, processUpdate
from rdflib.compare import graph_diff
from rdflib.namespace import RDFS, RDF
import pyRdfa
rdflib.plugin.register("jsonld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
rdflib.plugin.register("rdfa", Parser, "pyRdfa.rdflibparsers", "RDFaParser")
rdflib.plugin.register("jsonld", Serializer, "rdflib_jsonld.serializer", "JsonLDSerializer")
OUTTYPES = {'jsonld': 'jsonld','xml':'xml','nq':'nquads','rdf':'xml','ttl':'turtle'}
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input" , action='append',nargs='*', help="Input file(s)")
parser.add_argument("-o","--outputdir", help="Output directory (Default = .)")
parser.add_argument("-f","--format", default='ttl', help="Output format ['xml', 'rdf', 'nquads','nt','jsonld','ttl']")
parser.add_argument("-c","--combinefile", default=None, help="Combine outputs into file")
parser.add_argument("-d","--defaultns", help="Default output namespace")
args = parser.parse_args()
print("%s: Arguments: %s" % (sys.argv[0],args))
if args.format not in OUTTYPES:
parser.print_help()
sys.exit(1)
format = args.format
combine = args.combinefile
SPARQL1 = """
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX schema: <http://schema.org/>
DELETE { ?s dc:source ?o }
INSERT { ?s schema:source ?o }
WHERE {
?s dc:source ?o .
}
"""
def out(filename):
graph.update(SPARQL1)
graph.bind('',URIRef('http://schema.org/'),override=True, replace=True)
if args.outputdir:
outfile = "%s/%s" % (args.outputdir,filename)
else:
outfile = filename
print("Writing %s triples to %s" % (len(graph),outfile))
f = open(outfile,'w')
f.write(graph.serialize(format=OUTTYPES.get(format),auto_compact=True))
files = args.input[0]
graph = rdflib.ConjunctiveGraph()
for fullfilename in files:
if not combine:
graph = rdflib.ConjunctiveGraph()
if args.outputdir:
filename = os.path.basename(fullfilename)
else:
filename = fullfilename
filestub, ext = os.path.splitext(filename)
ext = ext[1:]
graph.parse(fullfilename,format = ext)
print("Loaded %s triples from %s" % (len(graph), filename))
if not combine:
out(filename="%s.%s" % (filestub,format))
if combine:
print("Outputting ")
out(filename=combine)
| apache-2.0 | 1,162,541,376,466,549,000 | 31.2 | 118 | 0.695948 | false | 3.244722 | false | false | false |
crs4/omero.biobank | bl/vl/app/importer/study.py | 1 | 5639 | # BEGIN_COPYRIGHT
# END_COPYRIGHT
"""
Import study
============
A study represents a general context. It is characterized by the
following fields::
label description
ASTUDY A textual description of ASTUDY, no tabs please.
The description column is optional. The study sub-operation will read
in a tsv files with the above information and output the VIDs of the
newly created study objects.
"""
import os, csv, copy
import core
DEFAULT_DESCRIPTION = 'No description provided'
class Recorder(core.Core):
def __init__(self, out_stream=None, report_stream=None,
host=None, user=None, passwd=None, keep_tokens=1,
batch_size=1000, operator='Alfred E. Neumann', logger=None):
super(Recorder, self).__init__(host, user, passwd, keep_tokens=keep_tokens,
study_label=None, logger=logger)
self.out_stream = out_stream
if self.out_stream:
self.out_stream.writeheader()
self.report_stream = report_stream
if self.report_stream:
self.report_stream.writeheader()
self.batch_size = batch_size
self.operator = operator
def record(self, records, blocking_validation):
def records_by_chunk(batch_size, records):
offset = 0
while len(records[offset:]) > 0:
yield records[offset:offset+batch_size]
offset += batch_size
if not records:
msg = 'No records are going to be imported'
self.logger.critical(msg)
raise core.ImporterValidationError(msg)
self.preload_studies()
records, bad_records = self.do_consistency_checks(records)
for br in bad_records:
self.report_stream.writerow(br)
if blocking_validation and len(bad_records) >= 1:
raise core.ImporterValidationError('%d invalid records' % len(bad_records))
for i, c in enumerate(records_by_chunk(self.batch_size, records)):
self.logger.info('start processing chunk %d' % i)
self.process_chunk(c)
self.logger.info('done processing chunk %d' % i)
def preload_studies(self):
self.logger.info('start prefetching studies')
self.known_studies = {}
studies = self.kb.get_objects(self.kb.Study)
for s in studies:
self.known_studies[s.label] = s
self.logger.info('there are %d Study(s) in the kb'
% (len(self.known_studies)))
def do_consistency_checks(self, records):
self.logger.info('start consistency checks')
k_map = {}
good_records = []
bad_records = []
mandatory_fields = ['label']
for i, r in enumerate(records):
reject = ' Rejecting import of record %d: ' % i
if self.missing_fields(mandatory_fields, r):
f = 'missing mandatory field'
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
if r['label'] in self.known_studies:
f = 'there is a pre-existing study with label %s' % r['label']
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
if r['label'] in k_map:
f = 'there is a pre-existing study with label %s in this batch' % r['label']
self.logger.error(reject + f)
bad_rec = copy.deepcopy(r)
bad_rec['error'] = f
bad_records.append(bad_rec)
continue
k_map['label'] = r
good_records.append(r)
self.logger.info('done with consistency checks')
return good_records, bad_records
def process_chunk(self, chunk):
studies = []
for r in chunk:
conf = {'label': r['label'], 'description': r['description']}
studies.append(self.kb.factory.create(self.kb.Study, conf))
self.kb.save_array(studies)
for d in studies:
self.logger.info('saved %s[%s] as %s.' % (d.label, d.description, d.id))
self.out_stream.writerow({
'study': 'None',
'label': d.label,
'type': 'Study',
'vid': d.id,
})
help_doc = """
import new Study definitions into the KB.
"""
def make_parser(parser):
parser.add_argument('--label', metavar="STRING",
help="overrides the label column value")
class RecordCanonizer(core.RecordCanonizer):
def canonize(self, r):
super(RecordCanonizer, self).canonize(r)
r.setdefault('description', DEFAULT_DESCRIPTION)
def implementation(logger, host, user, passwd, args, close_handles):
f = csv.DictReader(args.ifile, delimiter='\t')
logger.info('start processing file %s' % args.ifile.name)
records = [r for r in f]
if not records:
logger.info('empty file')
return
canonizer = RecordCanonizer(['label'], args)
canonizer.canonize_list(records)
o = csv.DictWriter(args.ofile, fieldnames=['study', 'label', 'type', 'vid'],
delimiter='\t', lineterminator=os.linesep)
report_fnames = f.fieldnames
report_fnames.append('error')
report = csv.DictWriter(args.report_file, report_fnames,
delimiter='\t', lineterminator=os.linesep,
extrasaction='ignore')
recorder = Recorder(o, report, host=host, user=user, passwd=passwd,
keep_tokens=args.keep_tokens, logger=logger)
try:
recorder.record(records, args.blocking_validator)
except core.ImporterValidationError as ve:
logger.critical(ve.message)
raise
finally:
close_handles(args)
logger.info('done processing file %s' % args.ifile.name)
def do_register(registration_list):
registration_list.append(('study', help_doc, make_parser,
implementation))
| gpl-2.0 | -3,478,938,050,830,232,600 | 32.170588 | 84 | 0.636992 | false | 3.56673 | false | false | false |
GrahamDumpleton/autowrapt | src/__startup__/sitecustomize.py | 1 | 5685 | '''Provides a custom 'sitecustomize' module which will be used when the
'autowrapt' wrapper script is used when launching a Python program. This
custom 'sitecustomize' module will find any existing 'sitecustomize'
module which may have been overridden and ensures that that is imported
as well. Once that is done then the monkey patches for ensuring any
bootstrapping is done for registering post import hook callback
functions after the 'usercustomize' module is loaded will be applied. If
however 'usercustomize' support is not enabled, then the registration
will be forced immediately.
'''
import os
import sys
import site
import time
_debug = os.environ.get('AUTOWRAPT_DEBUG',
'off').lower() in ('on', 'true', '1')
def log_message(text, *args):
if _debug:
text = text % args
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
print('AUTOWRAPT: %s (%d) - %s' % (timestamp, os.getpid(), text))
log_message('autowrapt - sitecustomize (%s)', __file__)
log_message('working_directory = %r', os.getcwd())
log_message('sys.prefix = %r', os.path.normpath(sys.prefix))
try:
log_message('sys.real_prefix = %r', sys.real_prefix)
except AttributeError:
pass
log_message('sys.version_info = %r', sys.version_info)
log_message('sys.executable = %r', sys.executable)
if hasattr(sys, 'flags'):
log_message('sys.flags = %r', sys.flags)
log_message('sys.path = %r', sys.path)
# This 'sitecustomize' module will override any which may already have
# existed, be it one supplied by the user or one which has been placed
# in the 'site-packages' directory of the Python installation. We need
# to ensure that the existing 'sitecustomize' module is still loaded. To
# do that we remove the special startup directory containing this module
# from 'sys.path' and use the 'imp' module to find any original
# 'sitecustomize' module and load it.
import imp
boot_directory = os.path.dirname(__file__)
pkgs_directory = os.path.dirname(os.path.dirname(boot_directory))
log_message('pkgs_directory = %r', pkgs_directory)
log_message('boot_directory = %r', boot_directory)
path = list(sys.path)
try:
path.remove(boot_directory)
except ValueError:
pass
try:
(file, pathname, description) = imp.find_module('sitecustomize', path)
except ImportError:
pass
else:
log_message('sitecustomize = %r', (file, pathname, description))
imp.load_module('sitecustomize', file, pathname, description)
# Before we try and setup or trigger the bootstrapping for the
# registration of the post import hook callback functions, we need to
# make sure that we are still executing in the context of the same
# Python installation as the 'autowrapt' script was installed in. This
# is necessary because if it isn't and we were now running out of a
# different Python installation, then it may not have the 'autowrapt'
# package installed and so our attempts to import it will fail causing
# startup of the Python interpreter to fail in an obscure way.
expected_python_prefix = os.environ.get('AUTOWRAPT_PYTHON_PREFIX')
actual_python_prefix = os.path.realpath(os.path.normpath(sys.prefix))
expected_python_version = os.environ.get('AUTOWRAPT_PYTHON_VERSION')
actual_python_version = '.'.join(map(str, sys.version_info[:2]))
python_prefix_matches = expected_python_prefix == actual_python_prefix
python_version_matches = expected_python_version == actual_python_version
log_message('python_prefix_matches = %r', python_prefix_matches)
log_message('python_version_matches = %r', python_version_matches)
if python_prefix_matches and python_version_matches:
bootstrap_packages = os.environ.get('AUTOWRAPT_BOOTSTRAP')
log_message('bootstrap_packages = %r', bootstrap_packages)
if bootstrap_packages:
# When the 'autowrapt' script is run from out of a Python egg
# directory under 'buildout', then the path to the egg directory
# will not actually be listed in 'sys.path' as yet. This is
# because 'buildout' sets up any scripts so that 'sys.path' is
# specified only within the script. So that we can find the
# 'autowrapt' package, we need to ensure that in this case the
# egg directory for 'autowrapt' is manually added to 'sys.path'
# before we can import it.
pkgs_directory_missing = pkgs_directory not in sys.path
if pkgs_directory_missing:
sys.path.insert(0, pkgs_directory)
from autowrapt.bootstrap import bootstrap
from autowrapt.bootstrap import register_bootstrap_functions
# If we had to add the egg directory above corresponding to the
# 'autowrapt' package, now remove it to ensure the presence of
# the directory doesn't cause any later problems. It is quite
# possible that the directory will be added back in by scripts
# run under 'buildout' but that would be the normal behaviour
# and better off letting it do it how it wants to rather than
# leave the directory in place.
if pkgs_directory_missing:
try:
sys.path.remove(pkgs_directory)
except ValueError:
pass
# Trigger the application of the monkey patches to the 'site'
# module so that actual registration of the post import hook
# callback functions is only run after any 'usercustomize'
# module has been imported. If 'usercustomize' module support
# is disabled, as it will be in a Python virtual environment,
# then trigger the registration immediately.
bootstrap()
if not site.ENABLE_USER_SITE:
register_bootstrap_functions()
| bsd-2-clause | 6,302,914,326,998,667,000 | 37.938356 | 74 | 0.707828 | false | 3.883197 | false | false | false |
tknapen/reward_np_analysis | preprocessing.py | 1 | 7273 | # from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import os.path as op
import glob
import json
import nipype
from nipype import config, logging
import matplotlib.pyplot as plt
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
from nipype.utils.filemanip import copyfile
import nibabel as nib
from IPython.display import Image
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from IPython.display import Image
from IPython import embed as shell
from workflows.preprocessing_pipeline import create_all_calcarine_reward_preprocessing_workflow
from workflows.nii_to_h5 import create_all_calcarine_reward_2_h5_workflow
from workflows.whole_brain_GLM import create_whole_brain_GLM_workflow
# we will create a workflow from a BIDS formatted input, at first for the specific use case
# of a 7T PRF experiment's preprocessing.
# a project directory that we assume has already been created.
raw_data_dir = '/home/raw_data/-2014/reward/human_reward/data/'
preprocessed_data_dir = '/home/shared/-2014/reward/new/'
FS_subject_dir = os.path.join(raw_data_dir, 'FS_SJID')
preprocess = False
GLM = True
mask = True
for si in range(1,7): #
sub_id, FS_ID = 'sub-00%i'%si, 'sub-00%i'%si
sess_id = 'ses-*'
# now we set up the folders and logging there.
opd = op.join(preprocessed_data_dir, sub_id)
try:
os.makedirs(op.join(opd, 'log'))
except OSError:
pass
config.update_config({ 'logging': {
'log_directory': op.join(opd, 'log'),
'log_to_file': True,
'workflow_level': 'INFO',
'interface_level': 'INFO'
},
'execution': {
'stop_on_first_crash': False
}
})
logging.update_logging(config)
# load the sequence parameters from json file
with open(os.path.join(raw_data_dir, 'acquisition_parameters.json')) as f:
json_s = f.read()
acquisition_parameters = json.loads(json_s)
# load the analysis parameters from json file
with open(os.path.join(raw_data_dir, 'analysis_parameters.json')) as f:
json_s = f.read()
analysis_info = json.loads(json_s)
# load the analysis/experimental parameters for this subject from json file
with open(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json')) as f:
json_s = f.read()
experimental_parameters = json.loads(json_s)
analysis_info.update(experimental_parameters)
if not op.isdir(os.path.join(preprocessed_data_dir, sub_id)):
try:
os.makedirs(os.path.join(preprocessed_data_dir, sub_id))
except OSError:
pass
# copy json files to preprocessed data folder
# this allows these parameters to be updated and synced across subjects by changing only the raw data files.
copyfile(os.path.join(raw_data_dir, 'acquisition_parameters.json'), os.path.join(preprocessed_data_dir, 'acquisition_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, 'analysis_parameters.json'), os.path.join(preprocessed_data_dir, 'analysis_parameters.json'), copy = True)
copyfile(os.path.join(raw_data_dir, sub_id ,'experimental_parameters.json'), os.path.join(preprocessed_data_dir, sub_id ,'experimental_parameters.json'), copy = True)
if preprocess:
# the actual workflow
all_calcarine_reward_workflow = create_all_calcarine_reward_preprocessing_workflow(analysis_info, name = 'all_calcarine_reward')
# standard in/output variables
all_calcarine_reward_workflow.inputs.inputspec.raw_directory = raw_data_dir
all_calcarine_reward_workflow.inputs.inputspec.sub_id = sub_id
all_calcarine_reward_workflow.inputs.inputspec.sess_id = sess_id
all_calcarine_reward_workflow.inputs.inputspec.output_directory = opd
all_calcarine_reward_workflow.inputs.inputspec.psc_func = analysis_info['psc_func']
# to what file do we motion correct?
all_calcarine_reward_workflow.inputs.inputspec.which_file_is_EPI_space = analysis_info['which_file_is_EPI_space']
# registration details
all_calcarine_reward_workflow.inputs.inputspec.FS_ID = FS_ID
all_calcarine_reward_workflow.inputs.inputspec.FS_subject_dir = FS_subject_dir
all_calcarine_reward_workflow.inputs.inputspec.standard_file = op.join(os.environ['FSL_DIR'], 'data/standard/MNI152_T1_1mm_brain.nii.gz')
# all the input variables for retroicor functionality
# the key 'retroicor_order_or_timing' determines whether slice timing
# or order is used for regressor creation
all_calcarine_reward_workflow.inputs.inputspec.MB_factor = acquisition_parameters['MultiBandFactor']
all_calcarine_reward_workflow.inputs.inputspec.nr_dummies = acquisition_parameters['NumberDummyScans']
all_calcarine_reward_workflow.inputs.inputspec.tr = acquisition_parameters['RepetitionTime']
all_calcarine_reward_workflow.inputs.inputspec.slice_direction = acquisition_parameters['SliceDirection']
all_calcarine_reward_workflow.inputs.inputspec.phys_sample_rate = acquisition_parameters['PhysiologySampleRate']
all_calcarine_reward_workflow.inputs.inputspec.slice_timing = acquisition_parameters['SliceTiming']
all_calcarine_reward_workflow.inputs.inputspec.slice_order = acquisition_parameters['SliceOrder']
all_calcarine_reward_workflow.inputs.inputspec.acceleration = acquisition_parameters['SenseFactor']
all_calcarine_reward_workflow.inputs.inputspec.epi_factor = acquisition_parameters['EpiFactor']
all_calcarine_reward_workflow.inputs.inputspec.wfs = acquisition_parameters['WaterFatShift']
all_calcarine_reward_workflow.inputs.inputspec.te_diff = acquisition_parameters['EchoTimeDifference']
# write out the graph and run
all_calcarine_reward_workflow.write_graph(opd + '.svg', format='svg', graph2use='colored', simple_form=False)
all_calcarine_reward_workflow.run('MultiProc', plugin_args={'n_procs': 24})
# all_calcarine_reward_workflow.run()
if GLM:
glm_wf = create_whole_brain_GLM_workflow(analysis_info)
glm_wf.inputs.inputspec.sub_id = sub_id
glm_wf.inputs.inputspec.preprocessed_directory = preprocessed_data_dir
glm_wf.write_graph(opd + '_GLM.svg', format='svg', graph2use='colored', simple_form=False)
glm_wf.run('MultiProc', plugin_args={'n_procs': 6})
if mask:
n2h = create_all_calcarine_reward_2_h5_workflow(analysis_info, name='all_calcarine_reward_nii_2_h5')
# standard in/output variables
n2h.inputs.inputspec.preprocessed_data_dir = preprocessed_data_dir
n2h.inputs.inputspec.sub_id = sub_id
n2h.write_graph(opd + '_h5.svg', format='svg', graph2use='colored', simple_form=False)
n2h.run()
| mit | -1,604,775,566,669,153,300 | 47.486667 | 170 | 0.688299 | false | 3.540896 | true | false | false |
treyhunner/databundles | databundles/geo/util.py | 1 | 6216 | '''
Created on Feb 15, 2013
@author: eric
'''
from collections import namedtuple
import random
from osgeo import gdal, ogr
BoundingBox = namedtuple('BoundingBox', ['min_x', 'min_y','max_x', 'max_y'])
def extents(database, table_name, where=None, lat_col='_db_lat', lon_col='_db_lon'):
'''Return the bounding box for a table in the database. The partition must specify
a table
'''
# Find the extents of the data and figure out the offsets for the array.
e= database.connection.execute
if where:
where = "WHERE "+where
else:
where = ''
r = e("""SELECT min({lon}) as min_x, min({lat}) as min_y,
max({lon}) as max_x, max({lat}) as max_y from {table} {where}"""
.format(lat=lat_col, lon=lon_col, table=table_name, where=where)
).first()
# Convert to a regular tuple
o = BoundingBox(r[0], r[1],r[2],r[3])
return o
#From http://danieljlewis.org/files/2010/06/Jenks.pdf
#
# !!!! Use psal instead!
# !!!! http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Natural_Breaks
#
def jenks_breaks(dataList, numClass):
dataList.sort()
print "A"
mat1 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat1.append(temp)
print "B"
mat2 = []
for i in range(0, len(dataList) + 1):
temp = []
for j in range(0, numClass + 1):
temp.append(0)
mat2.append(temp)
print "C"
for i in range(1, numClass + 1):
mat1[1][i] = 1
mat2[1][i] = 0
for j in range(2, len(dataList) + 1):
mat2[j][i] = float('inf')
print "D"
v = 0.0
# # iterations = datalist * .5*datalist * Numclass
for l in range(2, len(dataList) + 1):
s1 = 0.0
s2 = 0.0
w = 0.0
for m in range(1, l + 1):
i3 = l - m + 1
val = float(dataList[i3 - 1])
s2 += val * val
s1 += val
w += 1
v = s2 - (s1 * s1) / w
i4 = i3 - 1
if i4 != 0:
for j in range(2, numClass + 1):
if mat2[l][j] >= (v + mat2[i4][j - 1]):
mat1[l][j] = i3
mat2[l][j] = v + mat2[i4][j - 1]
mat1[l][1] = 1
mat2[l][1] = v
k = len(dataList)
kclass = []
print "E"
for i in range(0, numClass + 1):
kclass.append(0)
kclass[numClass] = float(dataList[len(dataList) - 1])
countNum = numClass
print 'F'
while countNum >= 2:
#print "rank = " + str(mat1[k][countNum])
id_ = int((mat1[k][countNum]) - 2)
#print "val = " + str(dataList[id])
kclass[countNum - 1] = dataList[id_]
k = int((mat1[k][countNum] - 1))
countNum -= 1
return kclass
def getGVF( dataList, numClass ):
""" The Goodness of Variance Fit (GVF) is found by taking the
difference between the squared deviations from the array mean (SDAM)
and the squared deviations from the class means (SDCM), and dividing by the SDAM
"""
breaks = jenks_breaks(dataList, numClass)
dataList.sort()
listMean = sum(dataList)/len(dataList)
print listMean
SDAM = 0.0
for i in range(0,len(dataList)):
sqDev = (dataList[i] - listMean)**2
SDAM += sqDev
SDCM = 0.0
for i in range(0,numClass):
if breaks[i] == 0:
classStart = 0
else:
classStart = dataList.index(breaks[i])
classStart += 1
classEnd = dataList.index(breaks[i+1])
classList = dataList[classStart:classEnd+1]
classMean = sum(classList)/len(classList)
print classMean
preSDCM = 0.0
for j in range(0,len(classList)):
sqDev2 = (classList[j] - classMean)**2
preSDCM += sqDev2
SDCM += preSDCM
return (SDAM - SDCM)/SDAM
def rasterize(pixel_size=25):
# Open the data source
RASTERIZE_COLOR_FIELD = "__color__"
orig_data_source = ogr.Open("test.shp")
# Make a copy of the layer's data source because we'll need to
# modify its attributes table
source_ds = ogr.GetDriverByName("Memory").CopyDataSource(orig_data_source, "")
source_layer = source_ds.GetLayer(0)
source_srs = source_layer.GetSpatialRef()
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create a field in the source layer to hold the features colors
field_def = ogr.FieldDefn(RASTERIZE_COLOR_FIELD, ogr.OFTReal)
source_layer.CreateField(field_def)
source_layer_def = source_layer.GetLayerDefn()
field_index = source_layer_def.GetFieldIndex(RASTERIZE_COLOR_FIELD)
# Generate random values for the color field (it's here that the value
# of the attribute should be used, but you get the idea)
for feature in source_layer:
feature.SetField(field_index, random.randint(0, 255))
source_layer.SetFeature(feature)
# Create the destination data source
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
target_ds = gdal.GetDriverByName('GTiff').Create('test.tif', x_res,
y_res, 3, gdal.GDT_Byte)
target_ds.SetGeoTransform(( x_min, pixel_size, 0, y_max, 0, -pixel_size,))
if source_srs:
# Make the target raster have the same projection as the source
target_ds.SetProjection(source_srs.ExportToWkt())
else:
# Source has no projection (needs GDAL >= 1.7.0 to work)
target_ds.SetProjection('LOCAL_CS["arbitrary"]')
# Rasterize
err = gdal.RasterizeLayer(target_ds, (3, 2, 1), source_layer,
burn_values=(0, 0, 0),
options=["ATTRIBUTE=%s" % RASTERIZE_COLOR_FIELD])
if err != 0:
raise Exception("error rasterizing layer: %s" % err)
| bsd-3-clause | 8,801,215,917,953,914,000 | 29.772277 | 107 | 0.542471 | false | 3.302869 | false | false | false |
dikshyam/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/knn.py | 9 | 1784 | # Natural Language Toolkit - K nearest neighbour classifier
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier import instances as ins, Classifier, distancemetric as dm
from nltk import probability as prob
class IB1(Classifier):
def __init__(self, training, attributes, klass):
Classifier.__init__(self, training, attributes, klass)
def classify(self, instances):
for each_test in instances:
id = InstanceDistances()
for each_training in self.training:
dist = dm.euclidean_distance(each_test, each_training, self.attributes)
id.distance(dist, each_training)
each_test.classified_klass = id.klass(majority_klass_vote)
@classmethod
def can_handle_continuous_attributes(self):
return True
def is_trained(self):
return True
class InstanceDistances:
"""
Maps instances to the distance they are from a common test_instance
"""
def __init__(self):
self.distances = {}
def distance(self, value, instance):
if value in self.distances:
self.distances[value].append(instance)
else:
self.distances[value] = [instance]
def minimum_distance_instances(self):
keys = self.distances.keys()
keys.sort()
return self.distances[keys[0]]
def klass(self, strategy):
return strategy(self.minimum_distance_instances())
def majority_klass_vote(instances):
fd = prob.FreqDist()
for each in instances:
fd.inc(each.klass_value)
return fd.max()
| gpl-3.0 | -4,724,643,084,960,081,000 | 30.875 | 87 | 0.63509 | false | 4.139211 | false | false | false |
trawick/edurepo | src/edurepo/repo/remove.py | 1 | 1232 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'edurepo.settings')
import sys
sys.path.append('.')
import django
from repo.models import Course, LearningObjective
def delete_learning_objective(lo, delete, noisy=True):
if noisy:
print 'Deleting objective %s...' % lo
if delete:
lo.delete()
return 0
def delete_course(course, delete=False, noisy=True):
courses = Course.objects.filter(id=course)
assert courses, "Course %s is not in the system" % course
if noisy:
print 'Deleting course %s...' % courses[0]
learning_objectives = LearningObjective.objects.filter(course=courses[0])
for lo in learning_objectives:
rc = delete_learning_objective(lo, delete, noisy=noisy)
if rc:
return rc
if delete:
courses[0].delete()
return 0
def process(args):
if len(args) != 2:
print >> sys.stderr, "Usage: %s course check-or-delete" % sys.argv[0]
return 1
course = args[0]
mode = args[1]
assert mode == 'check' or mode == 'delete'
delete = mode == 'delete'
return delete_course(course, delete=delete)
if __name__ == '__main__':
django.setup()
sys.exit(process(sys.argv[1:]))
| apache-2.0 | -8,927,304,199,601,935,000 | 23.156863 | 77 | 0.633117 | false | 3.530086 | false | false | false |
AKSW/QuitStore | quit/provenance.py | 1 | 3220 | #!/usr/bin/env python3
import functools as ft
from rdflib import BNode
from quit.namespace import FOAF, PROV, QUIT
class Blame(object):
"""
Reusable Blame object for web client
"""
def __init__(self, quit):
self.quit = quit
def _generate_values(self, quads):
result = list()
for quad in quads:
(s, p, o, c) = quad
c.rewrite = True
# Todo: BNodes in VALUES are not supported by specification? Using UNDEF for now
_s = 'UNDEF' if isinstance(s, BNode) else s.n3()
_p = 'UNDEF' if isinstance(p, BNode) else p.n3()
_o = 'UNDEF' if isinstance(o, BNode) else o.n3()
_c = 'UNDEF' if isinstance(c, BNode) else c.identifier.n3()
c.rewrite = False
result.append((_s, _p, _o, _c))
return result
def run(self, quads=None, branch_or_ref='master'):
"""
Annotated every quad with the respective author
Args:
querystring: A string containing a SPARQL ask or select query.
Returns:
The SPARQL result set
"""
commit = self.quit.repository.revision(branch_or_ref)
g, commitid = self.quit.instance(branch_or_ref)
quads = [x for x in g.store.quads((None, None, None))]
if len(quads) == 0:
return []
values = self._generate_values(quads)
values_string = ft.reduce(lambda acc, quad: acc + '( %s %s %s %s )\n' % quad, values, '')
q = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX aksw: <http://aksw.org/>
PREFIX quit: <http://quit.aksw.org/vocab/>
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?s ?p ?o ?context ?hex ?name ?email ?date WHERE {
?commit quit:preceedingCommit* ?c .
?c prov:endedAtTime ?date ;
prov:qualifiedAssociation ?qa ;
quit:updates ?update ;
quit:hex ?hex .
?qa prov:agent ?user ;
prov:hadRole quit:Author .
?user foaf:mbox ?email ;
rdfs:label ?name .
?update quit:graph ?context ;
quit:additions ?additions .
GRAPH ?additions {
?s ?p ?o
}
FILTER NOT EXISTS {
?y quit:preceedingCommit+ ?z .
?z quit:updates ?update2 .
?update2 quit:graph ?g ;
quit:removals ?removals .
GRAPH ?removals {
?s ?p ?o
}
}
VALUES (?s ?p ?o ?context) {
%s
}
}
""" % values_string
return self.quit.store.store.query(
q,
initNs={'foaf': FOAF, 'prov': PROV, 'quit': QUIT},
initBindings={'commit': QUIT['commit-' + commit.id]}
)
| gpl-3.0 | -7,105,127,579,763,082,000 | 31.857143 | 97 | 0.473602 | false | 3.815166 | false | false | false |
wheldom01/privacyidea | privacyidea/lib/event.py | 2 | 8819 | # -*- coding: utf-8 -*-
#
# 2016-05-04 Cornelius Kölbel <[email protected]>
# Initial writup
#
# License: AGPLv3
# (c) 2016. Cornelius Kölbel
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from privacyidea.models import EventHandler, EventHandlerOption, db
from privacyidea.lib.error import ParameterError
from privacyidea.lib.audit import getAudit
import functools
import logging
log = logging.getLogger(__name__)
AVAILABLE_EVENTS = []
class event(object):
"""
This is the event decorator that calls the event handler in the handler
module. This event decorator can be used at any API call
"""
def __init__(self, eventname, request, g):
self.eventname = eventname
if not eventname in AVAILABLE_EVENTS:
AVAILABLE_EVENTS.append(eventname)
self.request = request
self.g = g
def __call__(self, func):
"""
Returns a wrapper that wraps func.
The wrapper will evaluate the event handling definitions and call the
defined action.
:param func: The function that is decorated
:return: function
"""
@functools.wraps(func)
def event_wrapper(*args, **kwds):
# here we have to evaluate the event configuration from the
# DB table eventhandler and based on the self.eventname etc...
# TODO: do Pre-Event Handling
f_result = func(*args, **kwds)
# Post-Event Handling
e_handles = self.g.event_config.get_handled_events(self.eventname)
for e_handler_def in e_handles:
log.debug("Handling event {eventname} with "
"{eventDef}".format(eventname=self.eventname,
eventDef=e_handler_def))
event_handler_name = e_handler_def.get("handlermodule")
event_handler = get_handler_object(event_handler_name)
# The "action is determined by the event configuration
# In the options we can pass the mailserver configuration
options = {"request": self.request,
"g": self.g,
"response": f_result,
"handler_def": e_handler_def}
if event_handler.check_condition(options=options):
log.debug("Handling event {eventname} with options"
"{options}".format(eventname=self.eventname,
options=options))
# create a new audit object
event_audit = getAudit(self.g.audit_object.config)
# copy all values from the originial audit entry
event_audit_data = dict(self.g.audit_object.audit_data)
event_audit_data["action"] = "EVENT {trigger}>>" \
"{handler}:{action}".format(
trigger=self.eventname,
handler=e_handler_def.get("handlermodule"),
action=e_handler_def.get("action"))
event_audit_data["action_detail"] = "{0!s}".format(
e_handler_def.get("options"))
event_audit_data["info"] = e_handler_def.get("name")
event_audit.log(event_audit_data)
event_handler.do(e_handler_def.get("action"),
options=options)
# set audit object to success
event_audit.log({"success": True})
event_audit.finalize_log()
return f_result
return event_wrapper
def get_handler_object(handlername):
"""
Return an event handler object based on the Name of the event handler class
:param handlername: The identifier of the Handler Class
:type hanldername: basestring
:return:
"""
# TODO: beautify and make this work with several different handlers
from privacyidea.lib.eventhandler.usernotification import \
UserNotificationEventHandler
from privacyidea.lib.eventhandler.tokenhandler import TokenEventHandler
from privacyidea.lib.eventhandler.scripthandler import ScriptEventHandler
h_obj = None
if handlername == "UserNotification":
h_obj = UserNotificationEventHandler()
if handlername == "Token":
h_obj = TokenEventHandler()
if handlername == "Script":
h_obj = ScriptEventHandler()
return h_obj
def enable_event(event_id, enable=True):
"""
Enable or disable the and event
:param event_id: ID of the event
:return:
"""
ev = EventHandler.query.filter_by(id=event_id).first()
if not ev:
raise ParameterError("The event with id '{0!s}' does not "
"exist".format(event_id))
# Update the event
ev.active = enable
r = ev.save()
return r
def set_event(name, event, handlermodule, action, conditions=None,
ordering=0, options=None, id=None, active=True):
"""
Set an event handling configuration. This writes an entry to the
database eventhandler.
:param name: The name of the event definition
:param event: The name of the event to react on. Can be a single event or
a comma separated list.
:type event: basestring
:param handlermodule: The identifier of the event handler module. This is
an identifier string like "UserNotification"
:type handlermodule: basestring
:param action: The action to perform. This is an action defined by the
handler module
:type action: basestring
:param conditions: A condition. Only if this condition is met, the action is
performed.
:type conditions: dict
:param ordering: An optional ordering of the event definitions.
:type ordering: integer
:param options: Additional options, that are needed as parameters for the
action
:type options: dict
:param id: The DB id of the event. If the id is given, the event is
updated. Otherwise a new entry is generated.
:type id: int
:return: The id of the event.
"""
conditions = conditions or {}
if id:
id = int(id)
event = EventHandler(name, event, handlermodule, action,
conditions=conditions, ordering=ordering,
options=options, id=id, active=active)
return event.id
def delete_event(event_id):
"""
Delete the event configuration with this given ID.
:param event_id: The database ID of the event.
:type event_id: int
:return:
"""
event_id = int(event_id)
ev = EventHandler.query.filter_by(id=event_id).first()
r = ev.delete()
return r
class EventConfiguration(object):
"""
This class is supposed to contain the event handling configuration during
the Request. It can be read initially (in the init method) an can be
accessed later during the request.
"""
def __init__(self):
self.eventlist = []
self._read_events()
@property
def events(self):
return self.eventlist
def get_handled_events(self, eventname):
"""
Return a list of the event handling definitions for the given eventname
:param eventname:
:return:
"""
eventlist = [e for e in self.eventlist if (
eventname in e.get("event") and e.get("active"))]
return eventlist
def get_event(self, eventid):
"""
Return the reduced list with the given eventid. This list should only
have one element.
:param eventid: id of the event
:type eventid: int
:return: list with one element
"""
if eventid is not None:
eventid = int(eventid)
eventlist = [e for e in self.eventlist if e.get("id") == eventid]
return eventlist
else:
return self.eventlist
def _read_events(self):
q = EventHandler.query.order_by(EventHandler.ordering)
for e in q:
self.eventlist.append(e.get())
| agpl-3.0 | 752,872,875,086,803,500 | 35.433884 | 80 | 0.606215 | false | 4.305176 | true | false | false |
Julian/Great | great/views/music.py | 1 | 4539 | from datetime import datetime
from uuid import UUID
import json
from minion.renderers import JSON
from minion.request import Response
from minion.traversal import LeafResource, TreeResource
from sqlalchemy import String, select
from sqlalchemy.sql.expression import cast
import attr
from great.models import music
from great.models.core import ModelManager, NotFound
def _uuid_to_str(obj):
if isinstance(obj, UUID):
return obj.hex
raise TypeError("{!r} is not JSON serializable".format(obj))
@attr.s
class ModelResource(object):
manager = attr.ib()
from_detail_json = attr.ib(default=json.load)
for_detail_json = attr.ib(default=lambda model: model)
renderer = JSON(default=_uuid_to_str)
def get_child(self, name, request):
if not name:
return self
elif name == "tracked":
# FIXME
query = self.manager.db.execute(
select(self.manager._basic_fields).where(
self.manager.table.c.tracked,
),
)
return LeafResource(
render=lambda request: self.renderer.render(
jsonable=[dict(each) for each in query.fetchall()],
request=request,
),
)
id = int(name)
def render_detail(request):
try:
content = self.for_detail_json(self.manager.detail(id=id))
except NotFound:
return Response(code=404)
return self.renderer.render(jsonable=content, request=request)
return LeafResource(render=render_detail)
def render(self, request):
if request.method == b"GET":
fields = [
field
for raw in request.url.get(b"fields")
for field in raw.rstrip(b",").split(b",")
]
content = self.manager.list(
fields=fields,
)
elif request.method == b"POST":
try:
new = self.from_detail_json(request.content)
except ValueError:
return Response(code=400)
content = self.for_detail_json(self.manager.create(**new))
elif request.method == b"DELETE":
self.manager.delete(id=json.load(request.content)[u"id"])
return Response(code=204)
else:
return Response(code=405)
return self.renderer.render(jsonable=content, request=request)
def init_app(bin, root):
music_resource = TreeResource()
db = bin.provide("engine").connect()
for table, detail_columns, from_detail_json, for_detail_json in (
(
music.albums,
[
music.albums.c.comments,
music.albums.c.compilation,
music.albums.c.live,
cast(music.albums.c.mbid, String).label("mbid"),
music.albums.c.pinned,
music.albums.c.rating,
music.albums.c.release_date,
music.albums.c.type,
],
_album_from_json,
_album_for_json,
),
(
music.artists,
[
music.artists.c.comments,
cast(music.artists.c.created_at, String).label("created_at"),
cast(music.artists.c.mbid, String).label("mbid"),
cast(music.artists.c.modified_at, String).label("modified_at"),
music.artists.c.pinned,
music.artists.c.rating,
],
json.load,
lambda artist: artist,
),
):
music_resource.set_child(
name=table.name,
resource=ModelResource(
from_detail_json=from_detail_json,
for_detail_json=for_detail_json,
manager=ModelManager(
db=db,
table=table,
detail_columns=detail_columns,
),
)
)
root.set_child("music", music_resource)
def _album_from_json(detail):
album = json.load(detail)
release_date = album.get(u"release_date")
if release_date is not None:
album[u"release_date"] = datetime.strptime(
release_date, "%Y-%m-%d"
).date()
return album
def _album_for_json(album):
release_date = album.get(u"release_date")
if release_date is not None:
album[u"release_date"] = release_date.strftime("%Y-%m-%d")
return album
| mit | 6,941,405,291,674,199,000 | 29.463087 | 79 | 0.546376 | false | 4.100271 | false | false | false |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons/rigify/rigs/basic/copy.py | 6 | 4587 | #====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
from ...utils import copy_bone
from ...utils import strip_org, make_deformer_name
from ...utils import create_bone_widget
class Rig:
""" A "copy" rig. All it does is duplicate the original bone and
constrain it.
This is a control and deformation rig.
"""
def __init__(self, obj, bone, params):
""" Gather and validate data about the rig.
"""
self.obj = obj
self.org_bone = bone
self.org_name = strip_org(bone)
self.params = params
self.make_control = params.make_control
self.make_deform = params.make_deform
def generate(self):
""" Generate the rig.
Do NOT modify any of the original bones, except for adding constraints.
The main armature should be selected and active before this is called.
"""
bpy.ops.object.mode_set(mode='EDIT')
# Make a control bone (copy of original).
if self.make_control:
bone = copy_bone(self.obj, self.org_bone, self.org_name)
# Make a deformation bone (copy of original, child of original).
if self.make_deform:
def_bone = copy_bone(self.obj, self.org_bone, make_deformer_name(self.org_name))
# Get edit bones
eb = self.obj.data.edit_bones
# UNUSED
# if self.make_control:
# bone_e = eb[bone]
if self.make_deform:
def_bone_e = eb[def_bone]
# Parent
if self.make_deform:
def_bone_e.use_connect = False
def_bone_e.parent = eb[self.org_bone]
bpy.ops.object.mode_set(mode='OBJECT')
pb = self.obj.pose.bones
if self.make_control:
# Constrain the original bone.
con = pb[self.org_bone].constraints.new('COPY_TRANSFORMS')
con.name = "copy_transforms"
con.target = self.obj
con.subtarget = bone
# Create control widget
create_bone_widget(self.obj, bone)
def add_parameters(params):
""" Add the parameters of this rig type to the
RigifyParameters PropertyGroup
"""
params.make_control = bpy.props.BoolProperty(name="Control", default=True, description="Create a control bone for the copy")
params.make_deform = bpy.props.BoolProperty(name="Deform", default=True, description="Create a deform bone for the copy")
def parameters_ui(layout, params):
""" Create the ui for the rig parameters.
"""
r = layout.row()
r.prop(params, "make_control")
r = layout.row()
r.prop(params, "make_deform")
def create_sample(obj):
""" Create a sample metarig for this rig type.
"""
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('Bone')
bone.head[:] = 0.0000, 0.0000, 0.0000
bone.tail[:] = 0.0000, 0.0000, 0.2000
bone.roll = 0.0000
bone.use_connect = False
bones['Bone'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['Bone']]
pbone.rigify_type = 'basic.copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
| gpl-3.0 | 8,010,847,085,069,767,000 | 32 | 128 | 0.618051 | false | 3.611811 | false | false | false |
paihu/moebox | settings.py | 1 | 3166 | """
Django settings for moebox project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
from .config import *
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g#ouo6a&jt%lyl*=2rhu7ajzezl)rtff7ha!%s84^f@#^=&!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'moebox',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'moebox.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'd:/xampp/htdocs_root'
| mit | -6,121,905,040,672,723,000 | 24.95082 | 91 | 0.685407 | false | 3.471491 | false | false | false |
XertroV/nodeup-xk-io | nodeup-server/wallet.py | 1 | 1417 | from binascii import hexlify
import logging
from pycoin.tx import Tx
from models import unprocessed_txs, txs, all_addresses, addr_to_uid, Account, known_txs, exchange_rate, nodes_recently_updated
from constants import COIN
from digitalocean_custom import calc_node_minutes
def hash_to_hex(h):
return hexlify(h[::-1])
def process_tx_initial(tx_obj: Tx):
found_relevant_address = False
for out in tx_obj.txs_out:
address = out.bitcoin_address()
if address in all_addresses:
found_relevant_address = True
break
if not found_relevant_address:
logging.info('Found irrelevant tx %s' % hash_to_hex(tx_obj.hash()))
return
tx_hash = tx_obj.hash()
txid = hash_to_hex(tx_hash).decode()
if tx_hash in known_txs:
return
known_txs.add(tx_hash)
txs[tx_hash] = tx_obj.as_hex()
for out in tx_obj.txs_out:
address = out.bitcoin_address()
if address in all_addresses and address is not None:
unprocessed_txs.add(tx_hash)
uid = addr_to_uid[address]
account = Account(uid)
account.txs.add(tx_hash)
account.unconf_minutes.incr(calc_node_minutes(satoshi_amount=out.coin_value, exchange_rate=exchange_rate.get()))
account.add_msg('Found tx for %.08f, %s' % (out.coin_value / COIN, txid))
nodes_recently_updated.append(account.uid)
| mit | 4,699,442,673,061,066,000 | 34.425 | 126 | 0.645025 | false | 3.381862 | false | false | false |
saullocastro/pyNastran | pyNastran/bdf/mesh_utils/bdf_merge.py | 1 | 5578 | from __future__ import print_function
import os
import numpy as np
from six import string_types, iteritems
from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber
from pyNastran.bdf.bdf import BDF
def bdf_merge(bdf_filenames, bdf_filename_out=None, renumber=True, encoding=None,
size=8, is_double=False, cards_to_skip=None, log=None):
"""
Merges multiple BDF into one file
Parameters
----------
bdf_filenames : List[str]
list of bdf filenames
bdf_filename_out : str / None
the output bdf filename (default=None; None -> no writing)
renumber : bool
should the bdf be renumbered (default=True)
encoding : str
the unicode encoding (default=None; system default)
size : int; {8, 16}; default=8
the bdf write precision
is_double : bool; default=False
the field precision to write
cards_to_skip : List[str]; (default=None -> don't skip any cards)
There are edge cases (e.g. FLUTTER analysis) where things can break due to
uncross-referenced cards. You need to disable entire classes of cards in
that case (e.g. all aero cards).
Supports
--------
nodes: GRID
coords: CORDx
elements: CQUAD4, CTRIA3, CTETRA, CPENTA, CHEXA, CELASx, CBAR, CBEAM
CONM1, CONM2, CMASS
properties: PSHELL, PCOMP, PSOLID, PMASS
materials: MAT1, MAT8
.. todo:: doesn't support SPOINTs/EPOINTs
.. warning:: still very preliminary
"""
if not isinstance(bdf_filenames, (list, tuple)):
raise TypeError('bdf_filenames is not a list/tuple...%s' % str(bdf_filenames))
if not len(bdf_filenames) > 1:
raise RuntimeError("You can't merge one BDF...bdf_filenames=%s" % str(bdf_filenames))
for bdf_filename in bdf_filenames:
if not isinstance(bdf_filename, string_types):
raise TypeError('bdf_filenames is not a string...%s' % bdf_filename)
#bdf_filenames = [bdf_filenames]
#starting_id_dict_default = {
#'cid' : max(model.coords.keys()),
#'nid' : max(model.nodes.keys()),
#'eid' : max([
#max(model.elements.keys()),
#max(model.masses.keys()),
#]),
#'pid' : max([
#max(model.properties.keys()),
#max(model.properties_mass.keys()),
#]),
#'mid' : max(model.material_ids),
#}
model = BDF(debug=False, log=log)
model.disable_cards(cards_to_skip)
bdf_filename0 = bdf_filenames[0]
model.read_bdf(bdf_filename0, encoding=encoding)
model.log.info('primary=%s' % bdf_filename0)
data_members = [
'coords', 'nodes', 'elements', 'masses', 'properties', 'properties_mass',
'materials',
]
for bdf_filename in bdf_filenames[1:]:
#model.log.info('model.masses = %s' % model.masses)
starting_id_dict = {
'cid' : max(model.coords.keys()) + 1,
'nid' : max(model.nodes.keys()) + 1,
'eid' : max([
max(model.elements.keys()),
0 if len(model.masses) == 0 else max(model.masses.keys()),
]) + 1,
'pid' : max([
max(model.properties.keys()),
0 if len(model.properties_mass) == 0 else max(model.properties_mass.keys()),
]) + 1,
'mid' : max(model.material_ids) + 1,
}
#for param, val in sorted(iteritems(starting_id_dict)):
#print(' %-3s %s' % (param, val))
model.log.info('secondary=%s' % bdf_filename)
model2 = BDF(debug=False)
model2.disable_cards(cards_to_skip)
bdf_dump = 'bdf_merge_temp.bdf'
#model2.read_bdf(bdf_filename, xref=False)
bdf_renumber(bdf_filename, bdf_dump, starting_id_dict=starting_id_dict,
size=size, is_double=is_double, cards_to_skip=cards_to_skip)
model2 = BDF(debug=False)
model2.disable_cards(cards_to_skip)
model2.read_bdf(bdf_dump)
os.remove(bdf_dump)
#model.log.info('model2.node_ids = %s' % np.array(model2.node_ids))
for data_member in data_members:
data1 = getattr(model, data_member)
data2 = getattr(model2, data_member)
if isinstance(data1, dict):
#model.log.info(' working on %s' % (data_member))
for key, value in iteritems(data2):
if data_member in 'coords' and key == 0:
continue
if isinstance(value, list):
raise NotImplementedError(type(value))
else:
assert key not in data1, key
data1[key] = value
#print(' %s' % key)
else:
raise NotImplementedError(type(data1))
#if bdf_filenames_out:
#model.write_bdf(bdf_filenames_out, size=size)
if renumber:
model.log.info('final renumber...')
starting_id_dict = {
'cid' : 1,
'nid' : 1,
'eid' : 1,
'pid' : 1,
'mid' : 1,
}
bdf_renumber(model, bdf_filename_out, starting_id_dict=starting_id_dict,
size=size, is_double=is_double, cards_to_skip=cards_to_skip)
elif bdf_filename_out:
model.write_bdf(out_filename=bdf_filename_out, encoding=None,
size=size, is_double=is_double,
interspersed=True,
enddata=None)
return model
| lgpl-3.0 | -7,953,364,121,187,741,000 | 37.468966 | 93 | 0.558085 | false | 3.505971 | false | false | false |
lucastheis/isa | code/tools/logsumexp.py | 1 | 1347 | """
A numerically stable implementation of the logarithm of sums of exponentials.
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
from numpy import log, sum, exp, zeros, max, asarray, vectorize, inf, nan, squeeze, reshape
def logsumexp(x, ax=None):
"""
Computes the log of the sum of the exp of the entries in x in a numerically
stable way.
@type x: array_like
@param x: a list, array or matrix of numbers
@type ax: integer
@param ax: axis along which the sum is applied
@rtype: array
@return: an array containing the results
"""
if ax is None:
x_max = max(x, ax) - 1.
return x_max + log(sum(exp(x - x_max)))
else:
x_max_shape = list(x.shape)
x_max_shape[ax] = 1
x_max = asarray(max(x, ax)) - 1.
return x_max + log(sum(exp(x - x_max.reshape(x_max_shape)), ax))
def logmeanexp(x, ax=None):
"""
Computes the log of the mean of the exp of the entries in x in a numerically
stable way. Uses logsumexp.
@type x: array_like
@param x: a list, array or matrix of numbers
@type ax: integer
@param ax: axis along which the values are averaged
@rtype: array
@return: an array containing the results
"""
x = asarray(x)
n = x.size if ax is None else x.shape[ax]
return logsumexp(x, ax) - log(n)
| mit | 2,026,095,489,688,845,800 | 22.631579 | 91 | 0.673348 | false | 2.80625 | false | false | false |
jobs2phones/j2p | jobs2phones/user_manage.py | 1 | 3780 | import load
import send
import imaplib
import email
import yaml
import re
from bs4 import BeautifulSoup
with open('../config.yaml') as f:
cf = yaml.safe_load(f)
def parse_text_message_from_email(msg):
'''
Gets the actual text sent to the
email address by parsing it out of the email body
'''
text = {}
text['sender'] = msg['from']
msg_body = ''
if msg.is_multipart():
for i,part in enumerate(msg.walk()):
if part.get_content_type() =='text/plain':
msg_body = part.get_payload(decode=True)
elif part.get_content_type() =='text/html':
msg_soup = BeautifulSoup(part.get_payload(decode=True))
else:
msg_body = msg.get_payload(decode=True)
if len(msg_body) == 0:
msg_body = msg_soup.find('body').text
msg_body.replace('\r','').replace('\n','')
text['message']=msg_body
return text
def parse_choices(choices_made):
'''
Takes a numbered list of choices and maps them
to the relevant search criteria.
'''
search_criteria='';
for choice in choices_made:
if choice == '1':
search_criteria='dishwasher&20philadelphia ' + search_criteria
if choice == '2':
search_criteria='warehouse&20philadelphia ' + search_criteria
if choice == '3':
search_criteria='cook&20philadelphia ' + search_criteria
return search_criteria
def read_mailbox_and_edit_users(M):
"""
Processes mail in order to add,edit, and remove users
"""
Session = load.bind_to_database(cf['postgres_username'],cf['postgres_password'],
cf['postgres_db'])
rv, data_num = M.search(None, "ALL")
if rv != 'OK':
print "No messages found!"
return
messages=[]
print str(len(data_num[0].split())) + " new messages found"
for num in data_num[0].split():
rv, data = M.fetch(num, '(RFC822)')
if rv != 'OK':
print "ERROR getting message", num
return
email_data = email.message_from_string(data[0][1])
text = parse_text_message_from_email(email_data)
choices_made = re.findall(r'\d+',text['message'])
if 'stop' in text['message'].lower():
if load.check_user(Session,text['sender']):
load.delete_user(Session,text['sender'])
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['stop_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif 'start' in text['message'].lower() or 'list' in text['message'].lower():
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['start_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif 'demo' in text['message'].lower() or 'list' in text['message'].lower():
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
cf['demo_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
elif len(choices_made) > 0:
search_criteria = parse_choices(choices_made)
if len(search_criteria) > 0:
if load.check_user(Session,text['sender']):
load.edit_user(Session,text['sender'],search_criteria)
else:
load.insert_user(Session,'',text['sender'],'',search_criteria)
send.send_text(cf['fromaddr'],cf['username'],cf['password'],text['sender'],
str(choices_made) + '. ' + cf['chosen_message'])
M.store(num , '+FLAGS', '\\Deleted') #This archives the message.
| mit | -1,107,737,909,954,239,700 | 37.181818 | 91 | 0.567196 | false | 3.825911 | false | false | false |
akesandgren/easybuild-easyblocks | easybuild/easyblocks/b/boost.py | 1 | 16449 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Boost, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Petar Forai (IMP/IMBA)
@author: Luca Marsella (CSCS)
@author: Guilherme Peretti-Pezzi (CSCS)
@author: Joachim Hein (Lund University)
@author: Michele Dolfi (ETH Zurich)
@author: Simon Branford (University of Birmingham)
"""
from distutils.version import LooseVersion
import fileinput
import glob
import os
import re
import sys
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import ERROR
from easybuild.tools.filetools import apply_regex_substitutions, copy, mkdir, symlink, which, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import AARCH64, POWER, UNKNOWN
from easybuild.tools.systemtools import get_cpu_architecture, get_glibc_version, get_shared_lib_ext
class EB_Boost(EasyBlock):
"""Support for building Boost."""
def __init__(self, *args, **kwargs):
"""Initialize Boost-specific variables."""
super(EB_Boost, self).__init__(*args, **kwargs)
self.objdir = None
self.pyvers = []
if LooseVersion(self.version) >= LooseVersion("1.71.0"):
self.bjamcmd = 'b2'
else:
self.bjamcmd = 'bjam'
@staticmethod
def extra_options():
"""Add extra easyconfig parameters for Boost."""
extra_vars = {
'boost_mpi': [False, "Build mpi boost module", CUSTOM],
'boost_multi_thread': [None, "Build boost with multi-thread option (DEPRECATED)", CUSTOM],
'toolset': [None, "Toolset to use for Boost configuration ('--with-toolset' for bootstrap.sh)", CUSTOM],
'build_toolset': [None, "Toolset to use for Boost compilation "
"('toolset' for b2, default calculated from toolset)", CUSTOM],
'mpi_launcher': [None, "Launcher to use when running MPI regression tests", CUSTOM],
'only_python_bindings': [False, "Only install Boost.Python library providing Python bindings", CUSTOM],
'use_glibcxx11_abi': [None, "Use the GLIBCXX11 ABI", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def patch_step(self):
"""Patch Boost source code before building."""
super(EB_Boost, self).patch_step()
# TIME_UTC is also defined in recent glibc versions, so we need to rename it for old Boost versions (<= 1.49)
glibc_version = get_glibc_version()
old_glibc = glibc_version is not UNKNOWN and LooseVersion(glibc_version) > LooseVersion("2.15")
if old_glibc and LooseVersion(self.version) <= LooseVersion("1.49.0"):
self.log.info("Patching because the glibc version is too new")
files_to_patch = ["boost/thread/xtime.hpp"] + glob.glob("libs/interprocess/test/*.hpp")
files_to_patch += glob.glob("libs/spirit/classic/test/*.cpp") + glob.glob("libs/spirit/classic/test/*.inl")
for patchfile in files_to_patch:
try:
for line in fileinput.input("%s" % patchfile, inplace=1, backup='.orig'):
line = re.sub(r"TIME_UTC", r"TIME_UTC_", line)
sys.stdout.write(line)
except IOError as err:
raise EasyBuildError("Failed to patch %s: %s", patchfile, err)
def prepare_step(self, *args, **kwargs):
"""Prepare build environment."""
super(EB_Boost, self).prepare_step(*args, **kwargs)
# keep track of Python version(s) used during installation,
# so we can perform a complete sanity check
if get_software_root('Python'):
self.pyvers.append(get_software_version('Python'))
def configure_step(self):
"""Configure Boost build using custom tools"""
# mpi sanity check
if self.cfg['boost_mpi'] and not self.toolchain.options.get('usempi', None):
raise EasyBuildError("When enabling building boost_mpi, also enable the 'usempi' toolchain option.")
# create build directory (Boost doesn't like being built in source dir)
self.objdir = os.path.join(self.builddir, 'obj')
mkdir(self.objdir)
# generate config depending on compiler used
toolset = self.cfg['toolset']
if toolset is None:
if self.toolchain.comp_family() == toolchain.INTELCOMP:
toolset = 'intel-linux'
elif self.toolchain.comp_family() == toolchain.GCC:
toolset = 'gcc'
else:
raise EasyBuildError("Unknown compiler used, don't know what to specify to --with-toolset, aborting.")
cmd = "%s ./bootstrap.sh --with-toolset=%s --prefix=%s %s"
tup = (self.cfg['preconfigopts'], toolset, self.objdir, self.cfg['configopts'])
run_cmd(cmd % tup, log_all=True, simple=True)
# Use build_toolset if specified or the bootstrap toolset without the OS suffix
self.toolset = self.cfg['build_toolset'] or re.sub('-linux$', '', toolset)
user_config = []
# Explicitely set the compiler path to avoid B2 checking some standard paths like /opt
cxx = os.getenv('CXX')
if cxx:
cxx = which(cxx, on_error=ERROR)
# Remove default toolset config which may lead to duplicate toolsets (e.g. for intel-linux)
apply_regex_substitutions('project-config.jam', [('using %s ;' % toolset, '')])
# Add our toolset config with no version and full path to compiler
user_config.append("using %s : : %s ;" % (self.toolset, cxx))
if self.cfg['boost_mpi']:
# configure the boost mpi module
# http://www.boost.org/doc/libs/1_47_0/doc/html/mpi/getting_started.html
# let Boost.Build know to look here for the config file
# Check if using a Cray toolchain and configure MPI accordingly
if self.toolchain.toolchain_family() == toolchain.CRAYPE:
if self.toolchain.PRGENV_MODULE_NAME_SUFFIX == 'gnu':
craympichdir = os.getenv('CRAY_MPICH2_DIR')
craygccversion = os.getenv('GCC_VERSION')
# We configure the gcc toolchain below, so make sure the EC doesn't use another toolset
if self.toolset != 'gcc':
raise EasyBuildError("For the cray toolchain the 'gcc' toolset must be used.")
# Remove the previous "using gcc" line add above (via self.toolset) if present
user_config = [x for x in user_config if not x.startswith('using gcc :')]
user_config.extend([
'local CRAY_MPICH2_DIR = %s ;' % craympichdir,
'using gcc ',
': %s' % craygccversion,
': CC ',
': <compileflags>-I$(CRAY_MPICH2_DIR)/include ',
r' <linkflags>-L$(CRAY_MPICH2_DIR)/lib \ ',
'; ',
'using mpi ',
': CC ',
': <find-shared-library>mpich ',
': %s' % self.cfg['mpi_launcher'],
';',
'',
])
else:
raise EasyBuildError("Bailing out: only PrgEnv-gnu supported for now")
else:
user_config.append("using mpi : %s ;" % os.getenv("MPICXX"))
write_file('user-config.jam', '\n'.join(user_config), append=True)
def build_boost_variant(self, bjamoptions, paracmd):
"""Build Boost library with specified options for bjam."""
# build with specified options
cmd = "%s ./%s %s %s %s" % (self.cfg['prebuildopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
# install built Boost library
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, bjamoptions, paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
# clean up before proceeding with next build
run_cmd("./%s %s --clean-all" % (self.bjamcmd, bjamoptions), log_all=True, simple=True)
def build_step(self):
"""Build Boost with bjam tool."""
self.bjamoptions = " --prefix=%s --user-config=user-config.jam" % self.objdir
if 'toolset=' not in self.cfg['buildopts']:
self.bjamoptions += " toolset=" + self.toolset
cxxflags = os.getenv('CXXFLAGS')
# only disable -D_GLIBCXX_USE_CXX11_ABI if use_glibcxx11_abi was explicitly set to False
# None value is the default, which corresponds to default setting (=1 since GCC 5.x)
if self.cfg['use_glibcxx11_abi'] is not None:
cxxflags += ' -D_GLIBCXX_USE_CXX11_ABI='
if self.cfg['use_glibcxx11_abi']:
cxxflags += '1'
else:
cxxflags += '0'
if cxxflags is not None:
self.bjamoptions += " cxxflags='%s'" % cxxflags
ldflags = os.getenv('LDFLAGS')
if ldflags is not None:
self.bjamoptions += " linkflags='%s'" % ldflags
# specify path for bzip2/zlib if module is loaded
for lib in ["bzip2", "zlib"]:
libroot = get_software_root(lib)
if libroot:
self.bjamoptions += " -s%s_INCLUDE=%s/include" % (lib.upper(), libroot)
self.bjamoptions += " -s%s_LIBPATH=%s/lib" % (lib.upper(), libroot)
self.paracmd = ''
if self.cfg['parallel']:
self.paracmd = "-j %s" % self.cfg['parallel']
if self.cfg['only_python_bindings']:
# magic incantation to only install Boost Python bindings is... --with-python
# see http://boostorg.github.io/python/doc/html/building/installing_boost_python_on_your_.html
self.bjamoptions += " --with-python"
self.log.info("Building boost with single and multi threading")
self.bjamoptions += " threading=single,multi --layout=tagged"
if self.cfg['boost_mpi']:
self.log.info("Building boost_mpi library")
mpi_bjamoptions = " --with-mpi"
self.build_boost_variant(self.bjamoptions + mpi_bjamoptions, self.paracmd)
self.log.info("Building boost libraries")
# build with specified options
cmd = "%s ./%s %s %s %s" % (self.cfg['prebuildopts'], self.bjamcmd, self.bjamoptions, self.paracmd, self.cfg['buildopts'])
run_cmd(cmd, log_all=True, simple=True)
def install_step(self):
"""Install Boost by copying files to install dir."""
# install boost libraries
self.log.info("Installing boost libraries")
cmd = "%s ./%s %s install %s %s" % (
self.cfg['preinstallopts'], self.bjamcmd, self.bjamoptions, self.paracmd, self.cfg['installopts'])
run_cmd(cmd, log_all=True, simple=True)
self.log.info("Copying %s to installation dir %s", self.objdir, self.installdir)
if self.cfg['only_python_bindings'] and 'Python' in self.cfg['multi_deps'] and self.iter_idx > 0:
self.log.info("Main installation should already exist, only copying over missing Python libraries.")
copy(glob.glob(os.path.join(self.objdir, 'lib', 'libboost_python*')), os.path.join(self.installdir, 'lib'), symlinks=True)
else:
copy(glob.glob(os.path.join(self.objdir, '*')), self.installdir, symlinks=True)
# Link tagged multi threaded libs as the default libs
lib_mt_suffix = '-mt'
if LooseVersion(self.version) >= LooseVersion("1.69.0"):
if get_cpu_architecture() == AARCH64:
lib_mt_suffix += '-a64'
elif get_cpu_architecture() == POWER:
lib_mt_suffix += '-p64'
else:
lib_mt_suffix += '-x64'
shlib_ext = get_shared_lib_ext()
for source_shared_lib in glob.glob(os.path.join(self.installdir, 'lib', 'lib*%s.%s.%s' % (lib_mt_suffix, shlib_ext, self.version))):
target_shared_lib = source_shared_lib.replace('%s.%s' % (lib_mt_suffix, shlib_ext), '.%s' % shlib_ext)
source_static_lib = source_shared_lib.replace('%s.%s.%s' % (lib_mt_suffix, shlib_ext, self.version), '%s.a' % lib_mt_suffix)
target_static_lib = source_static_lib.replace('%s.a' % lib_mt_suffix, '.a')
symlink(os.path.basename(source_shared_lib), target_shared_lib, use_abspath_source=False)
symlink(os.path.basename(target_shared_lib), target_shared_lib.replace('.%s' % self.version, ''), use_abspath_source=False)
symlink(os.path.basename(source_static_lib), target_static_lib, use_abspath_source=False)
def sanity_check_step(self):
"""Custom sanity check for Boost."""
shlib_ext = get_shared_lib_ext()
custom_paths = {
'files': [],
'dirs': ['include/boost']
}
if self.cfg['only_python_bindings']:
for pyver in self.pyvers:
pymajorver = pyver.split('.')[0]
pyminorver = pyver.split('.')[1]
if LooseVersion(self.version) >= LooseVersion("1.67.0"):
suffix = '%s%s' % (pymajorver, pyminorver)
elif int(pymajorver) >= 3:
suffix = pymajorver
else:
suffix = ''
custom_paths['files'].append(os.path.join('lib', 'libboost_python%s.%s' % (suffix, shlib_ext)))
else:
custom_paths['files'].append(os.path.join('lib', 'libboost_system.%s' % shlib_ext))
lib_mt_suffix = '-mt'
# MT libraries gained an extra suffix from v1.69.0 onwards
if LooseVersion(self.version) >= LooseVersion("1.69.0"):
if get_cpu_architecture() == AARCH64:
lib_mt_suffix += '-a64'
elif get_cpu_architecture() == POWER:
lib_mt_suffix += '-p64'
else:
lib_mt_suffix += '-x64'
custom_paths['files'].append(os.path.join('lib', 'libboost_thread%s.%s' % (lib_mt_suffix, shlib_ext)))
if self.cfg['boost_mpi']:
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi.%s' % shlib_ext))
custom_paths['files'].append(os.path.join('lib', 'libboost_mpi%s.%s' % (lib_mt_suffix, shlib_ext)))
super(EB_Boost, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set up a BOOST_ROOT environment variable to e.g. ease Boost handling by cmake"""
txt = super(EB_Boost, self).make_module_extra()
if not self.cfg['only_python_bindings']:
txt += self.module_generator.set_environment('BOOST_ROOT', self.installdir)
return txt
| gpl-2.0 | -4,422,243,499,139,912,700 | 47.096491 | 140 | 0.600888 | false | 3.706399 | true | false | false |
derwolfe/teiler | teiler/peerdiscovery.py | 1 | 7702 | """
peerdiscovery
The process is simple.
1) Start up the client and broadcast a UDP datagram on a defined interval.
2) Listen for other datagrams
3) When another datagram is heard, pull it into the list of the peers.
But, if the peer is already in the list, do nothing.
4) On disconnect, the client sends an exit message, letting the other
users know that they are no longer online; making it safe for the
client to disconnect
"""
import json
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from twisted.python import log
HEARTBEAT = "HEARTBEAT"
EXIT = "EXIT"
class PeerList(object):
"""
A simple structure meant to manage the other peers. Supports a limited
set of operations, such as add, remove, exists, and count.
"""
def __init__(self):
self._peers = {}
def add(self, peer):
self._peers[peer.peerId] = peer
def get(self, peerId):
return self._peers.get(peerId)
def remove(self, peerId):
del self._peers[peerId]
def exists(self, peerId):
return self._peers.get(peerId) is not None
def count(self):
return len(self._peers.keys())
def all(self):
""" return an iterable """
return self._peers.itervalues()
class PeerDiscoveryMessage(object):
"""
Contains basic location information for clients to use to initiate a
connection with this peer. Basically, just the user is, what ip they
are using, and what port to connect on
"""
def __init__(self, message, name, address, port):
if isinstance(message, str):
message = unicode(message, "utf-8")
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(message, str):
address = unicode(address, "utf-8")
self.message = message
self.name = name
self.address = address
self.port = port
def serialize(self):
return json.dumps({
"message": self.message.encode("utf-8"),
"name": self.name.encode("utf-8"),
"address": self.address.encode("utf-8"),
"port": self.port
})
@classmethod
def parseDatagram(klass, datagram):
"""
Given a datagram formatted using JSON, return a new message object.
"""
msg = json.loads(datagram)
peerMsg = msg["message"]
peerName = msg["name"]
peerAddress = msg["address"]
peerPort = msg["port"]
return klass(peerMsg, peerName, peerAddress, peerPort)
class Peer(object):
"""
A peer is another user located on a different system. Maintains the user"s
peerId, username, IP address, and port.
"""
def __init__(self, name, address, port):
self.peerId = makePeerId(name, address, port)
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(address, str):
address = unicode(address, "utf-8")
self.name = name
self.address = address
self.port = port
def serialize(self):
return json.dumps({
"peerId": self.peerId.encode("utf-8"),
"name": self.name.encode("utf-8"),
"address": self.address.encode("utf-8"),
"port": self.port
})
def __eq__(self, other):
return self.peerId == other.peerId
def makePeerId(name, address, port):
"""
Create a unique peerId for a peer.
:param name: the name of a peer
:param address: the ip address of a peer
:param port: the port being used
:returns string: an peerId
"""
if isinstance(name, str):
name = unicode(name, "utf-8")
if isinstance(name, str):
address = unicode(name, "utf-8")
return u"{0}_{1}_{2}".format(name, address, port)
class PeerDiscoveryProtocol(DatagramProtocol):
"""
UDP protocol used to find others running the same program.
The protocol will do several things, on program start, a connection
message will be sent; basically announcing itself as a node to the network.
Then the protocol will regularly send a heartbeat message at a defined
interval.
Once the peer has decided to disconnect, it will send an exit message to
alert the other nodes of its demise.
:param reactor: the reactor being used.
:param peers: a data structure in which peers can be stored, implements
IPeerList
:param name: the username you"d like to broadcast.
:param multiCastAddress: the multicast address to broadcast.
:param multiCastPort: the port on which to broadcast.
:param address: the IP address to broadcast. This is for the current user.
:param port: the Port to broadcast where other users can connect.
"""
def __init__(self, reactor, peerList, name, multiCastAddress,
multiCastPort, address, port):
"""
Set up an instance of the PeerDiscovery protocol by creating
the message information needed to broadcast other instances
of the protocol running on the same network.
"""
self._peers = peerList
self.peerId = makePeerId(name, address, port)
self.name = name
self.reactor = reactor
# these need to be strings
self.multiCastAddress = multiCastAddress
self.multiCastPort = multiCastPort
self.address = address
self.port = port
self.loop = None
def sendMessage(self, message):
self.transport.write(message,
(self.multiCastAddress, self.multiCastPort))
def startProtocol(self):
self.transport.setTTL(5)
self.transport.joinGroup(self.multiCastAddress)
self.loop = task.LoopingCall(self.sendHeartBeat)
self.loop.start(5)
def sendHeartBeat(self):
"""
Sends message alerting other peers to your presence.
"""
message = PeerDiscoveryMessage(HEARTBEAT,
self.name,
self.address,
self.port).serialize()
self.sendMessage(message)
log.msg("Sent ", message)
def stopProtocol(self):
"""
Gracefully tell peers to remove you.
"""
# XXX this needs to occur at shut down!
message = PeerDiscoveryMessage(EXIT,
self.name,
self.address,
self.port).serialize()
self.sendMessage(message)
if self.loop is not None:
self.loop.stop()
log.msg("Exit ", message)
def datagramReceived(self, datagram, address):
"""
Handles how datagrams are read when they are received. Here, as this
is a json serialised message, we are pulling out the peer information
and placing it in a list.
"""
parsed = PeerDiscoveryMessage.parseDatagram(datagram)
peerId = makePeerId(parsed.name, parsed.address, parsed.port)
# ignore those messages from yourself
if parsed.address == self.address:
return
log.msg("Decoding:{0} from {1}", datagram, address)
if parsed.message == EXIT:
if self._peers.exists(peerId):
self._peers.remove(peerId)
log.msg("dropping peer:", address)
elif parsed.message == HEARTBEAT:
if not self._peers.exists(peerId):
newPeer = Peer(parsed.name, parsed.address, parsed.port)
self._peers.add(newPeer)
log.msg("new Peer: address: {0}", parsed.name)
| mit | 3,262,398,523,487,847,400 | 32.633188 | 79 | 0.604778 | false | 4.201855 | false | false | false |
zenefits/sentry | src/sentry/status_checks/base.py | 6 | 1898 | from __future__ import absolute_import
import six
from functools import total_ordering
from sentry.utils.compat import implements_to_string
@implements_to_string
@total_ordering
class Problem(object):
# Used for issues that may render the system inoperable or have effects on
# data integrity (e.g. issues in the processing pipeline.)
SEVERITY_CRITICAL = 'critical'
# Used for issues that may cause the system to operate in a degraded (but
# still operational) state, as well as configuration options that are set
# in unexpected ways or deprecated in future versions.
SEVERITY_WARNING = 'warning'
# Mapping of severity level to a priority score, where the greater the
# score, the more critical the issue. (The numeric values should only be
# used for comparison purposes, and are subject to change as levels are
# modified.)
SEVERITY_LEVELS = {
SEVERITY_CRITICAL: 2,
SEVERITY_WARNING: 1,
}
def __init__(self, message, severity=SEVERITY_CRITICAL, url=None):
assert severity in self.SEVERITY_LEVELS
self.message = six.text_type(message)
self.severity = severity
self.url = url
def __eq__(self, other):
return self.SEVERITY_LEVELS[self.severity] == self.SEVERITY_LEVELS[other.severity]
def __lt__(self, other):
return self.SEVERITY_LEVELS[self.severity] < self.SEVERITY_LEVELS[other.severity]
def __str__(self):
return self.message
@classmethod
def threshold(cls, severity):
threshold = cls.SEVERITY_LEVELS[severity]
def predicate(problem):
return cls.SEVERITY_LEVELS[problem.severity] >= threshold
return predicate
class StatusCheck(object):
def check(self):
"""
Perform required checks and return a list of ``Problem`` instances.
"""
raise NotImplementedError
| bsd-3-clause | 6,380,959,031,924,183,000 | 29.612903 | 90 | 0.679663 | false | 4.021186 | false | false | false |
denfromufa/comtypes | comtypes/tools/typedesc_base.py | 6 | 5414 | # typedesc.py - classes representing C type descriptions
try:
set
except NameError:
from sets import Set as set
class Argument(object):
"a Parameter in the argument list of a callable (Function, Method, ...)"
def __init__(self, atype, name):
self.atype = atype
self.name = name
class _HasArgs(object):
def __init__(self):
self.arguments = []
def add_argument(self, arg):
assert isinstance(arg, Argument)
self.arguments.append(arg)
def iterArgTypes(self):
for a in self.arguments:
yield a.atype
def iterArgNames(self):
for a in self.arguments:
yield a.name
def fixup_argtypes(self, typemap):
for a in self.arguments:
a.atype = typemap[a.atype]
################
class Alias(object):
# a C preprocessor alias, like #define A B
def __init__(self, name, alias, typ=None):
self.name = name
self.alias = alias
self.typ = typ
class Macro(object):
# a C preprocessor definition with arguments
def __init__(self, name, args, body):
# all arguments are strings, args is the literal argument list
# *with* the parens around it:
# Example: Macro("CD_INDRIVE", "(status)", "((int)status > 0)")
self.name = name
self.args = args
self.body = body
class File(object):
def __init__(self, name):
self.name = name
class Function(_HasArgs):
location = None
def __init__(self, name, returns, attributes, extern):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
self.attributes = attributes # dllimport, __stdcall__, __cdecl__
self.extern = extern
class Constructor(_HasArgs):
location = None
def __init__(self, name):
_HasArgs.__init__(self)
self.name = name
class OperatorFunction(_HasArgs):
location = None
def __init__(self, name, returns):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
class FunctionType(_HasArgs):
location = None
def __init__(self, returns, attributes):
_HasArgs.__init__(self)
self.returns = returns
self.attributes = attributes
class Method(_HasArgs):
location = None
def __init__(self, name, returns):
_HasArgs.__init__(self)
self.name = name
self.returns = returns
class FundamentalType(object):
location = None
def __init__(self, name, size, align):
self.name = name
if name != "void":
self.size = int(size)
self.align = int(align)
class PointerType(object):
location = None
def __init__(self, typ, size, align):
self.typ = typ
self.size = int(size)
self.align = int(align)
class Typedef(object):
location = None
def __init__(self, name, typ):
self.name = name
self.typ = typ
class ArrayType(object):
location = None
def __init__(self, typ, min, max):
self.typ = typ
self.min = min
self.max = max
class StructureHead(object):
location = None
def __init__(self, struct):
self.struct = struct
class StructureBody(object):
location = None
def __init__(self, struct):
self.struct = struct
class _Struct_Union_Base(object):
location = None
def get_body(self):
return self.struct_body
def get_head(self):
return self.struct_head
class Structure(_Struct_Union_Base):
def __init__(self, name, align, members, bases, size, artificial=None):
self.name = name
self.align = int(align)
self.members = members
self.bases = bases
self.artificial = artificial
if size is not None:
self.size = int(size)
else:
self.size = None
self.struct_body = StructureBody(self)
self.struct_head = StructureHead(self)
class Union(_Struct_Union_Base):
def __init__(self, name, align, members, bases, size, artificial=None):
self.name = name
self.align = int(align)
self.members = members
self.bases = bases
self.artificial = artificial
if size is not None:
self.size = int(size)
else:
self.size = None
self.struct_body = StructureBody(self)
self.struct_head = StructureHead(self)
class Field(object):
def __init__(self, name, typ, bits, offset):
self.name = name
self.typ = typ
self.bits = bits
self.offset = int(offset)
class CvQualifiedType(object):
def __init__(self, typ, const, volatile):
self.typ = typ
self.const = const
self.volatile = volatile
class Enumeration(object):
location = None
def __init__(self, name, size, align):
self.name = name
self.size = int(size)
self.align = int(align)
self.values = []
def add_value(self, v):
self.values.append(v)
class EnumValue(object):
def __init__(self, name, value, enumeration):
self.name = name
self.value = value
self.enumeration = enumeration
class Variable(object):
location = None
def __init__(self, name, typ, init=None):
self.name = name
self.typ = typ
self.init = init
################################################################
| mit | -2,682,827,743,539,516,000 | 25.409756 | 76 | 0.575175 | false | 3.906205 | false | false | false |
DenysGurin/solnce | dgcrm/views.py | 1 | 51890 | import random
import re
import json
import operator
from itertools import chain
from datetime import datetime, timedelta, date, time
import calendar
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect, Http404, JsonResponse
from django.views import View
from django.views.generic.detail import DetailView
from django.conf import settings
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.datastructures import MultiValueDictKeyError
from django.core.mail import send_mail
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from .forms import *
from main.models import ServiceCategory, Service, DetailedService
from .models import *
from django.db.models import F, Q
from .Day import Day
def dayPeriods(hour=9, minute=0, second=0):
start_day = datetime.time(hour, minute, second)
class Search(object):
def __init__(self, search_str):
# super(Search, self).__init__(search_feadback)
self.to_find_fn_and_ln = re.match(u"(?P<first_name>[\u0400-\u0500]+) (?P<last_name>[\u0400-\u0500]+)",
search_str, re.U)
self.to_find_fn_or_ln = re.match(u"^(?P<some_name>[\u0400-\u0500]+)$|^([\u0400-\u0500]+[\s]+)$",
search_str, re.U)
self.to_find_tel = re.match(r"^(?:([+]\d{1,2}))?[\s.-]?(\d{3})?[\s.-]?(\d{3})?[\s.-]?(\d{2})?[\s.-]?(\d{2})$",
search_str, re.U)
self.to_find_email = re.match(r'(?:[a-z0-9!#$%&*+/=?^_`{|}~-]+'
r'(?:\.[a-z0-9!#$%&*+/=?^_`{|}~-]+)*|'
r'"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|'
r'\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+'
r'[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:'
r'(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|'
r'\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])', search_str, re.U)
def serchFeadback(self):
if self.to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=self.to_find_fn_and_ln.group('first_name'))
q_last = Q(last=self.to_find_fn_and_ln.group('last_name'))
feadback_list = Feadback.objects.filter(q_first, q_last).order_by('id')
elif self.to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', self.to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
feadback_list = Feadback.objects.filter(q_some1 | q_some2).order_by('id')
elif self.to_find_tel:
q_tel = Q(tel=self.to_find_tel.group())
feadback_list = Feadback.objects.filter(q_tel).order_by('id')
else:
feadback_list = [None]
return feadback_list
def serchClient(self):
if self.to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=self.to_find_fn_and_ln.group('first_name'))
q_last = Q(last=self.to_find_fn_and_ln.group('last_name'))
client_list = Client.objects.filter(q_first, q_last).order_by('id')
elif self.to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', self.to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
client_list = Client.objects.filter(q_some1 | q_some2).order_by('id')
elif self.to_find_tel:
q_tel = Q(tel=self.to_find_tel.group())
client_list = Client.objects.filter(q_tel).order_by('id')
elif self.to_find_email:
q_email = Q(email=self.to_find_tel.group())
client_list = Client.objects.filter(q_email).order_by('id')
else:
client_list = [None]
return client_list
def searchByNameTel(search_feadback):
to_find_fn_and_ln = re.match(u"(?P<first_name>[\u0400-\u0500]+) (?P<last_name>[\u0400-\u0500]+)", search_feadback,
re.U)
# print(to_find_fn_and_ln)
to_find_fn_or_ln = re.match(u"^(?P<some_name>[\u0400-\u0500]+)$|^([\u0400-\u0500]+[\s]+)$", search_feadback, re.U)
# print(to_find_fn_or_ln)
to_find_tel = re.match(r"^(?:([+]\d{1,2}))?[\s.-]?(\d{3})?[\s.-]?(\d{3})?[\s.-]?(\d{2})?[\s.-]?(\d{2})$",
search_feadback, re.U)
# print(to_find_tel)
if to_find_fn_and_ln:
# print(to_find_fn_and_ln.group('first_name'))
q_first = Q(first=to_find_fn_and_ln.group('first_name'))
q_last = Q(last=to_find_fn_and_ln.group('last_name'))
feadback_list = Feadback.objects.filter(q_first, q_last).order_by('id')
elif to_find_fn_or_ln:
some_name = re.findall(u'[\u0400-\u0500]+', to_find_fn_or_ln.group(0), re.U)[0]
# print(some_name)
q_some1 = Q(first__contains=some_name)
q_some2 = Q(last__contains=some_name)
feadback_list = Feadback.objects.filter(q_some1 | q_some2).order_by('id')
elif to_find_tel:
q_tel = Q(tel=to_find_tel.group())
feadback_list = Feadback.objects.filter(q_tel).order_by('id')
else:
feadback_list = [None]
return feadback_list
class QueryByPeriod(object):
@classmethod
def byDay(cls, min_datetime=None):
if min_datetime:
min_date = min_datetime.date()
else:
min_date = datetime.date(timezone.now())
max_date = min_date + timedelta(days=1)
q_object = Q()
q_object &= Q(date__gte=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@classmethod
def byWeek(cls, min_datetime=None):
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=datetime.weekday(min_datetime)))
else:
min_date = datetime.date(
datetime.now(timezone.utc) - timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
max_date = min_date+timedelta(days=7)
q_object = Q()
q_object &= Q(date__gt=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@classmethod
def byMonth(cls, min_datetime=None):
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1))
else:
min_date = datetime.date(datetime.now(timezone.utc) - timedelta(days=datetime.now(timezone.utc).day - 1))
max_date = date(int(min_date.year), int(min_date.month)+1, 1)
q_object = Q()
q_object &= Q(date__gte=min_date)
q_object &= Q(date__lt=max_date)
return q_object
@staticmethod
def queryOnday(query_obj, min_datetime = None):
print(query_obj)
query = query_obj.filter(QueryByPeriod.byDay(min_datetime))
print(query)
if query.count() < 1:
return None
return query.order_by('date')
@staticmethod
def queryOnweek(query_obj, min_datetime = None):
query = query_obj.filter(QueryByPeriod.byWeek(min_datetime))
if query.count() < 1:
return None
return query.order_by('date')
@staticmethod
def queryOnmonth(query_obj, min_datetime = None):
query = query_obj.filter(QueryByPeriod.byMonth(min_datetime))
if query.count() < 1:
return None
return query.order_by('date')
class EventList(object):
@classmethod
def byDay(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(timezone.now())
else:
min_datetime = min_datetime.date()
max_datetime = min_datetime + timedelta(days=1)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gt=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@classmethod
def byWeek(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
else:
min_datetime = datetime.date(min_datetime-timedelta(days=datetime.weekday(min_datetime)))
max_datetime = min_datetime+timedelta(days=7)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gt=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@classmethod
def byMonth(cls, min_datetime):
if not min_datetime:
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.now(timezone.utc).day-1))
else:
min_datetime = datetime.date(min_datetime-timedelta(days=min_datetime.day-1))
max_datetime = date(int(min_datetime.year), int(min_datetime.month)+1, 1)
q_object = Q()
q_object &= ~Q(status="failed")
q_object &= Q(date_time__gte=min_datetime)
q_object &= Q(date_time__lt=max_datetime)
return q_object
@staticmethod
def eventsOnday(min_datetime = None):
events = Event.objects.all().filter(EventList.byDay(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
@staticmethod
def eventsOnweek(min_datetime = None):
events = Event.objects.all().filter(EventList.byWeek(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
@staticmethod
def eventsOnmonth(min_datetime = None):
events = Event.objects.all().filter(EventList.byMonth(min_datetime))
if events.count() < 1:
return None
return events.order_by('date_time')
class EventPeriod(object):
def isInPast(self):
if self.event_start < timezone.now():
return True
return False
def __init__(self, event_obj):
self.event_start = event_obj.date_time
self.event_end = event_obj.date_time + timedelta(hours=event_obj.duration.hour, minutes=event_obj.duration.minute)
self.is_in_past = EventPeriod.isInPast(self)
def __str__(self):
return "%s %s"%(str(self.event_start), str(self.event_end))
class Period(object):
def __init__(self, start_period, length_period = timedelta(minutes=30)):
self.period_datetime = str(start_period)
self.render = "{0:%H:%M}".format(start_period.time())
self.start_period = start_period
self.length_period = length_period
self.end_period = start_period + length_period
self.start_event = None
self.event = None
# self.contain_event = None
self.in_past = None
def __str__(self):
return "%s %s"%(self.start_period, self.event)
def isPeriodInPast(self):
if self.start_period < timezone.now():
self.in_past = True
return True
return False
def isEventStartInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None or event_obj == None:
return False
eve_st_gte_per_st = event_period_obj.event_start >= self.start_period
eve_st_lt_per_en = event_period_obj.event_start < self.end_period
if eve_st_gte_per_st and eve_st_lt_per_en:
# Period.extendPeriod(self, event_period_obj)
# while self.end_period < event_period_obj.event_end:
# self.end_period += self.length_period
self.start_event = True
return True
return False
def isEventEndInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None or event_obj == None:
return False
eve_en_gt_per_st = event_period_obj.event_end > self.start_period
eve_en_lte_per_en = event_period_obj.event_end <= self.end_period
if eve_en_gt_per_st and eve_en_lte_per_en:
# Period.extendPeriod(self, event_period_obj)
# while self.end_period < event_period_obj.event_end:
# self.end_period += self.length_period
# self.event = event_obj
return True
return False
def extendPeriod(self, event_obj):
event_period_obj = EventPeriod(event_obj)
while self.end_period < event_period_obj.event_end:
self.end_period += self.length_period
def isEventInPeriod(self, event_obj=None):
event_period_obj = EventPeriod(event_obj)
if event_period_obj == None:
return False
# self.contain_event = False
eve_st_gte_per_st = event_period_obj.event_start >= self.start_period
eve_st_lt_per_en = event_period_obj.event_start < self.end_period
eve_st_lt_per_st = event_period_obj.event_start < self.start_period
eve_en_gt_per_st = event_period_obj.event_end > self.start_period
eve_en_lte_per_en = event_period_obj.event_end <= self.end_period
eve_en_gt_per_en = event_period_obj.event_end > self.end_period
# if (eve_st_gte_per_st and eve_st_lt_per_en) or (eve_en_gte_per_st and eve_en_lt_per_en):
if (eve_st_gte_per_st and eve_st_lt_per_en) or \
(eve_st_lt_per_st and eve_en_gt_per_en) or \
(eve_en_gt_per_en and eve_en_lte_per_en) or \
(eve_en_gt_per_st and eve_en_lte_per_en):
# self.contain_event = True
self.event = event_obj
return True
return False
class Day(object):
def timePeriods(self):
period = Period(self.start_day)
period_list = []
if self.event_list:
event_list = list(self.event_list)
else:
event_list = []
stop = 20
while period.end_period <= self.end_day:# or stop > 0:
stop -= 1
# print(period)
period.isPeriodInPast()
if len(event_list) > 0:
event_obj = event_list[0]
# event_period_obj = EventPeriod(event_obj)
# print(event_period_obj)
# print(period.isEventStartInPeriod(event_period_obj, event_obj))
if period.isEventStartInPeriod(event_obj):
period.isEventInPeriod(event_obj)
# period.event = event_obj
period.extendPeriod(event_obj)
event_list.pop(0)
period_list.append(period)
period = Period(period_list[-1].end_period)
return period_list
def __init__(self, event_list=None, day_date=datetime.date(timezone.now()), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
weekdays_name = {1:"Понедельник",
2:"Вторник",
3:"Среда",
4:"Четверг",
5:"Пятница",
6:"Суббота",
7:"Воскресенье"}
self.event_list = event_list
self.day_date = day_date
self.start_day = datetime.combine(self.day_date, start_day)
self.end_day = datetime.combine(self.day_date, end_day)
# print(self.start_day, self.end_day)
self.calendar_data = self.day_date.isocalendar()
self.day_of_week = weekdays_name[self.day_date.isoweekday()]
self.time_periods = Day.timePeriods(self)
self.sorted_time_periods = sorted(self.time_periods, key=lambda x: x.start_period)
# self.sorted_time_periods = sorted(self.time_periods.items(), key=operator.itemgetter(0))
def __str__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __repr__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __iter__(self):
return iter(self.sorted_time_periods)
class WeekDay(Day):
def timePeriods(self):
period = Period(self.start_day)
period_list = []
if self.event_list:
event_list = list(self.event_list)
else:
event_list = []
stop = 20
while period.end_period <= self.end_day:# or stop > 0:
stop -= 1
# print(period)
period.isPeriodInPast()
if len(event_list) > 0:
event_obj = event_list[0]
# event_period_obj = EventPeriod(event_obj)
# print(event_period_obj)
# print(period.isEventStartInPeriod(event_obj))
# print(period.isEventEndInPeriod(event_obj))
# if period.isEventInPeriod(event_obj):
# period.contain_event = True
# if period.isEventStartInPeriod(event_obj):
# period.event = event_obj
period.isEventInPeriod(event_obj)
period.isEventStartInPeriod(event_obj)
if period.isEventEndInPeriod(event_obj):
event_list.pop(0)
period_list.append(period)
period = Period(period_list[-1].end_period)
return period_list
def __init__(self, event_list=None, day_date=datetime.date(datetime.now(timezone.utc)), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
Day.__init__(self, event_list, day_date, start_day, end_day)
weekdays_name = {1:"Понедельник",
2:"Вторник",
3:"Среда",
4:"Четверг",
5:"Пятница",
6:"Суббота",
7:"Воскресенье"}
self.calendar_data = self.day_date.isocalendar()
self.day_of_week = weekdays_name[self.day_date.isoweekday()]
self.time_periods = WeekDay.timePeriods(self)
self.sorted_time_periods = sorted(self.time_periods, key=lambda x: x.start_period)
def __str__(self):
return "{0:%d} {0:%m} {0:%Y}".format(self.day_date)
def __iter__(self):
return iter(self.sorted_time_periods)
class Week(object):
def weekDays(self):
days_list = []
current_day = self.start_week
next_day = self.start_week + timedelta(days=1)
for day in range(7):
q_start_day = Q(date_time__gte=current_day)
q_end_day = Q(date_time__lt=next_day)
day_event_list = self.event_list
if self.event_list:
day_event_list = self.event_list.filter(q_start_day, q_end_day)
# print(day_event_list)
day = WeekDay(day_event_list, current_day)
# print(day)
days_list.append(day)
# print(current_day)
current_day += timedelta(days=1)
next_day += timedelta(days=1)
return days_list
def __init__(self, event_list=None, date_time=datetime.now(timezone.utc), start_day=time(9, 0, 0), end_day=time(20, 0, 0)):
self.start_week = datetime.date(date_time-timedelta(days=datetime.weekday(date_time)))
self.end_week = self.start_week + timedelta(days=7)
self.event_list = event_list
self.day_date = datetime.date(datetime.now(timezone.utc))
self.start_day = datetime.combine(self.day_date, start_day)
self.end_day = datetime.combine(self.day_date, end_day)
self.week_days = Week.weekDays(self)
self.start_date = str(self.start_week)
def __str__(self):
return str(self.week_days)
def __iter__(self):
return iter(self.week_days)
class Login(View):
def get(self, request):
login_form = MyAuthenticationForm()
return render(request, "login.html", {"login_form": login_form})
def post(self, request):
if request.POST.get("submit") == "login":
login_form = MyAuthenticationForm(None, data=request.POST)
# return HttpResponse(login_form)
if login_form.is_valid():
# return HttpResponse(123)
client_data = login_form.cleaned_data
user = authenticate(request, **login_form.cleaned_data)
if user is not None:
login(request, user)
return redirect('/crm/')
return HttpResponse(user)
# return HttpResponse("isn't valid")
elif request.POST.get("submit") == "logout":
logout(request)
return redirect('/crm/login/')
class LoginRequiredView(LoginRequiredMixin, View):
login_url = '/crm/login/'
redirect_field_name = '/crm/login/'
class CrmMain(LoginRequiredView):
# @classmethod
# def addClientToEvent(self):
# clients = Client.objects.all()
# events = Event.objects.all()
# for event in events:
# if not event.client:
# tel = event.feadback.tel
# for client in clients:
# if client.tel == tel:
# event.client = client
# event.save()
def get(self, request):
# CrmMain.addClientToEvent(self)
context = {"user": request.user}
serch_form = SearchForm()#initial={'search': '%sпоиск'%search_icon})
serch_feadback_form = SearchFeadbackForm()
feadback_list = Feadback.objects.all()
feadback_list_inwork = feadback_list.filter(has_event=False)
if feadback_list_inwork:
feadback_list_inwork.order_by('date')
feadback_list_done = feadback_list.filter(has_event=True)
if feadback_list_done:
feadback_list_done.order_by('-date')
feadback_list = list(feadback_list_inwork)+list(feadback_list_done)
# print(Task.objects.filter(done=False))
task_list = Task.objects.all().order_by("-date")
context["task_list"] = task_list
periods = Day(event_list=EventList.eventsOnday())
# return HttpResponse(EventList.eventsOnday(self))
context["serch_form"] = serch_form
context["serch_feadback_form"] = serch_feadback_form
context["feadback_list"] = feadback_list
context["periods"] = periods
return render(request, "crm_main.html", context)
def post(self, request):
context = {}
search_feadback = u"%s"%str(request.POST.get("search_feadback"))
# print(search_feadback)
serch_all = re.findall(u'[\u0400-\u0500]+', search_feadback, re.U)
# print(serch_all)
if request.is_ajax() and search_feadback:
print(Search(search_feadback).serchFeadback())
context["feadback_list"] = Search(search_feadback).serchFeadback()
# context["feadback_list"] = searchByNameTel(search_feadback)
return render(request, "crm_main/feadback_list_ajax.html", context)
class CrmCalendar(LoginRequiredView):
def get(self, request):
context = {"user": request.user}
periods = Day(event_list=EventList.eventsOnday())
week_periods = Week(event_list=EventList.eventsOnweek())
# return HttpResponse(EventList.eventsOnday(self))
context["periods"] = periods
context["week_periods"] = week_periods
return render(request, "crm_calendar.html", context)
def post(self, request):
pass
class CrmClients(LoginRequiredView):
def get(self, request):
context = {}
serch_client_form = SearchClientForm()
client_list = Client.objects.all()
context["serch_client_form"] = serch_client_form
context["client_list"] = client_list
return render(request, "crm_clients.html", context)
def post(self, request):
context = {}
search_client = u"%s" % str(request.POST.get("search_client"))
# print(search_client)
serch_all = re.findall(u'[\u0400-\u0500]+', search_client, re.U)
# print(serch_all)
if request.is_ajax() and search_client:
# print(Search(search_feadback).serchFeadback())
context["client_list"] = Search(search_client).serchClient()
# context["feadback_list"] = searchByNameTel(search_feadback)
return render(request, "crm_clients/clients_list_ajax.html", context)
class QByPeriod(object):
@classmethod
def DaysInMonths(cls, from_year_month, to_year_month):
### from_year_month == (year, month)
### to_year_month == (year, month)
days_sum = 0
if from_year_month[0] < to_year_month[0] or \
(from_year_month[0] == to_year_month[0] and from_year_month[1] < to_year_month[1]):
for year in range(from_year_month[0], to_year_month[0]+1):
print(year)
if year == to_year_month[0]:
print(range(from_year_month[1], to_year_month[1]+1))
for month in range(from_year_month[1], to_year_month[1]+1):
print(month)
days_sum += calendar.monthrange(year, month)[1]
else:
print(range(from_year_month[1], 13))
for month in range(from_year_month[1], 13):
days_sum += calendar.monthrange(year, month)[1]
from_year_month[1] = 1
print(days_sum)
return days_sum
else:
raise "from_month has to be less than to_month"
@classmethod
def byMonth(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1))
else:
min_date = datetime.date(current - timedelta(days=current.day - 1))
max_date = date(int(min_date.year), int(min_date.month) + 1, 1)
print(min_date, max_date)
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
@classmethod
def byThreeMonths(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
to_month = (current.year, current.month-1)
if (current.month - 2) > 0:
from_month = (current.year, current.month - 2)
else:
from_month = (current.year-1, 12 - (current.month - 2))
days = QByPeriod.DaysInMonths(from_month,to_month)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1 + days))
else:
min_date = datetime.date(
current - timedelta(days=current.day - 1 + days))
max_date = date(int(min_date.year), int(min_date.month) + 3, 1)
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
@classmethod
def byTwelveMonths(cls, field_name, min_datetime=None):
current = datetime.now(timezone.utc)
to_month = [current.year, current.month - 1]
if (current.month - 12) > 0:
from_month = [current.year, current.month - 12]
else:
from_month = [current.year - 1, 12 + (current.month - 12)]
print(from_month, to_month)
days = QByPeriod.DaysInMonths(from_month, to_month)
if min_datetime:
min_date = datetime.date(min_datetime - timedelta(days=min_datetime.day - 1 + days))
else:
min_date = datetime.date(
current - timedelta(days=current.day - 1 + days))
max_date = min_date + timedelta(days=days+calendar.monthrange(current.year, current.month)[1])
filter__gte = field_name + '__' + 'gte'
filter__lt = field_name + '__' + 'lt'
q_object = Q()
q_object &= Q(**{filter__gte: min_date})
q_object &= Q(**{filter__lt: max_date})
return q_object
class QuerySetByPeriod(QByPeriod):
def __init__(self, Query_set, field_name, min_datetime=None):
self.Query_set = Query_set
self.field_name = field_name
self.min_datetime = min_datetime
def getByMounth(self):
return self.Query_set.filter(self.byMonth(self.field_name, self.min_datetime))
def getThreeMonths(self):
return self.Query_set.filter(self.byThreeMonths(self.field_name, self.min_datetime))
def getByTwelveMonths(self):
return self.Query_set.filter(self.byTwelveMonths(self.field_name, self.min_datetime))
def __str__(self):
return str(self.Query_set)
class DataSets(object):
@staticmethod
def RatingByEventFrequency(query_set):
from operator import itemgetter
services_raiting = {}
services_list = []
for event in query_set:
if event.detailed_service in services_list:
services_raiting[event.detailed_service] += 1
else:
services_list.append(event.detailed_service)
services_raiting[event.detailed_service] = 0
return sorted(services_raiting.items(), key=itemgetter(1), reverse=True)
@staticmethod
def RatingByDaysLoad(query_set):
services_raiting = {}
for day in range(1,8):
services_raiting[day] = []
for event in query_set:
day = event.date_time.isoweekday()
services_raiting[day].append(event.detailed_service)
return sorted(services_raiting.items(), key=lambda item: len(item[1]), reverse=True)
class CrmStatistic(LoginRequiredView):
def get(self, request):
context = {}
clients = QuerySetByPeriod(Client.objects.all(), "registration")
print(clients)
events = QuerySetByPeriod(Event.objects.all(), "date_time")
print(clients)
new_clients_by_month = clients.getByMounth()
context["new_clients_by_month"] = new_clients_by_month
print(new_clients_by_month)
new_clients_by_three_month = clients.getThreeMonths()
context["new_clients_by_three_month"] = new_clients_by_three_month
print(new_clients_by_three_month)
new_clients_by_twelve_month = clients.getByTwelveMonths()
context["new_clients_by_twelve_month"] = new_clients_by_twelve_month
print(new_clients_by_twelve_month)
new_events_by_month = events.getByMounth()
context["new_events_by_month"] = new_events_by_month
print("#################")
print(new_events_by_month)
new_events_by_three_month = events.getThreeMonths()
context["new_events_by_three_month"] = new_events_by_three_month
print(new_events_by_three_month)
new_events_by_twelve_month = events.getByTwelveMonths()
context["new_events_by_twelve_month"] = new_events_by_twelve_month
print(new_events_by_twelve_month)
raiting_by_event_frequency_sorted = DataSets.RatingByEventFrequency(new_events_by_month)
context["raiting_by_event_frequency_sorted"] = raiting_by_event_frequency_sorted
print(raiting_by_event_frequency_sorted)
raiting_by_days_load_sorted = DataSets.RatingByDaysLoad(new_events_by_month)
context["raiting_by_days_load_sorted"] = raiting_by_days_load_sorted
print(raiting_by_days_load_sorted)
return render(request, "crm_statistic.html", context)
class CrmFinance(LoginRequiredView):
pass
class DetailedFeadback(LoginRequiredView):
@classmethod
def getModelInstanceData(cls, instance):
data = {}
for f in instance._meta.get_fields():
if not f.auto_created:
data[f.name] = getattr(instance, f.name)
# print(type(f))
return data
def get(self, request, feadback_id):
feadback = get_object_or_404(Feadback, pk=feadback_id)
event_form = EventForm(initial={'feadback': feadback})
context = {}
context["feadback"] = feadback
context["event_form"] = event_form
return render(request, "detailed_feadback.html", context)
def post(self, request, feadback_id):
feadback = get_object_or_404(Feadback, pk=feadback_id)
# print(DetailedFeadback.getModelInstanceData(self, feadback))
client_data = DetailedFeadback.getModelInstanceData(feadback)
del client_data['wish']
del client_data['date']
del client_data['has_event']
client_obj, created = Client.objects.get_or_create(tel=feadback.tel, defaults=client_data)
event_form = EventForm(request.POST)
if event_form.is_valid():
event_data = event_form.cleaned_data
event_data["client"] = client_obj
event_update_obj, event_create_bool = Event.objects.update_or_create(feadback=feadback, defaults=event_data)
feadback.has_event = True
feadback.save()
# if event_create_bool:
# event_form["feadback"].client.services.add(event_update_obj.detailed_service)
# else:
# event_form["feadback"].client.services.
return redirect('/crm/')
class ClientCard(LoginRequiredView):
@classmethod
def updateModelInstanceData(cls, model_inst, data_dict):
for key in data_dict.keys():
setattr(model_inst, key, data_dict[key])
model_inst.save()
@classmethod
def getPrice(cls, event_obj):
try:
return getattr(event_obj, "price")
except AttributeError:
return None
def get(self, request, event_id=None, client_id=None):
print(event_id, client_id)
context = {}
if event_id and not client_id:
print("event")
event = get_object_or_404(Event, pk=event_id)
event_period = EventPeriod(event)
client = event.client
event_list = Event.objects.filter(client=client).order_by("-date_time")
event_price = self.getPrice(event)
print(event_price)
client_form = ClientForm(initial=client.__dict__)
price_form = PriceForm(initial=event_price.__dict__)
result_form = ResultForm(initial={
'date': (event.date_time + timedelta(hours=event.duration.hour, minutes=event.duration.minute))})
pay_form = PayForm(initial={
'date_time': (event.date_time + timedelta(hours=event.duration.hour, minutes=event.duration.minute))})
detailed_event_form = DetailedEventForm(initial={})
context["event_id"] = int(event_id)
context["event"] = event
context["event_period"] = event_period
elif client_id and not event_id:
print("client")
client = get_object_or_404(Client, pk=client_id)
event_list = Event.objects.filter(client=client).order_by("-date_time")
client_form = ClientForm(initial=client.__dict__)
price_form = PriceForm(initial={})
result_form = ResultForm(initial={'date': timezone.now()})
pay_form = PayForm(initial={'date_time': timezone.now()})
detailed_event_form = DetailedEventForm(initial={})
context["client"] = client
context["event_list"] = event_list
context["client_form"] = client_form
context["price_form"] = price_form
context["result_form"] = result_form
context["pay_form"] = pay_form
context["detailed_event_form"] = detailed_event_form
if event_id and not client_id:
return render(request, "detailed_event.html", context)
elif client_id and not event_id:
return render(request, "detailed_client.html", context)
def post(self, request, event_id=None, client_id=None):
result_form = ResultForm(request.POST, request.FILES)
pay_form = PayForm(request.POST)
detailed_event_form = DetailedEventForm(request.POST)
client_form = ClientForm(request.POST)
price_form = PriceForm(request.POST)
event = get_object_or_404(Event, pk=request.POST.get("event_id"))
if result_form.is_valid() and request.POST.get("submit") == "add_result":
result_data = result_form.cleaned_data
result_data["client"] = event.client
result_data["detailed_service"] = event.detailed_service
result = Result.objects.create(**result_data)
event.results.add(result)
# return render(request, "create_event.html", {})
if pay_form.is_valid() and request.POST.get("submit") == "add_pay":
pay_data = pay_form.cleaned_data
pay_data["client"] = event.client
pay_data["detailed_service"] = event.detailed_service
# pay_update_obj, pay_create_bool = Pay.objects.update_or_create(event=event, defaults=pay_data)
pay = Pay.objects.create(**pay_data)
event.pays.add(pay)
# return render(request, "create_event.html", {})
if client_form.is_valid() and request.POST.get("submit") == "edit_client":
client_data = client_form.cleaned_data
# print(client_data)
self.updateModelInstanceData(event.client, client_data)
# event.status = detailed_event_form.cleaned_data["status"]
event.save()
print(price_form.is_valid())
if price_form.is_valid() and request.POST.get("submit") == "edit_price":
price_data = price_form.cleaned_data
# print(price_data)
self.updateModelInstanceData(event.price, price_data)
# event.status = detailed_event_form.cleaned_data["status"]
event.save()
if detailed_event_form.is_valid() and request.POST.get("submit") == None:
detailed_event_data = detailed_event_form.cleaned_data
event.status = detailed_event_form.cleaned_data["status"]
# print(detailed_event_form.cleaned_data["status"])
event.save()
# return render(request, "create_event.html", {})
return redirect("/crm/event/%s" % event_id)
class CreateTask(LoginRequiredView):
def get(self, request):
task_form = TaskForm()#initial={'date_time': datetime, 'duration': duration})
context = {}
context["task_form"] = task_form
return render(request, "create_task.html", context)
def post(self, request):
task_form = TaskForm(request.POST)
if task_form.is_valid():
task_data = task_form.cleaned_data
Task.objects.create(**task_data)
return redirect("/crm")
return HttpResponse(task_form.is_valid())
class TaskActions(LoginRequiredView):
def post(self, request):
if request.method == "POST" and request.is_ajax():
context = {}
task_id = request.POST.get("task_id")
action_flag = request.POST.get("action_flag")
print(action_flag)
if action_flag == "done":
event = get_object_or_404(Task, pk=task_id)
event.done = True
event.save()
context["task_list"] = Task.objects.all()
return render(request, "crm_main/task_list_ajax.html", context)
class CreateEvent(LoginRequiredView):
def get(self, request):
# feadback = get_object_or_404(Feadback, pk=feadback_id)
datetime = request.GET.get("datetime")
duration = time(1, 0)
feadback_form = FeadbackForm(initial={})
event_form = EventForm(initial={'date_time': datetime, 'duration': duration})
price_form = PriceForm(initial={'discount': 0})
context = {}
context["feadback_form"] = feadback_form
context["event_form"] = event_form
context["price_form"] = price_form
return render(request, "create_event.html", context)
def post(self, request):
feadback_form = FeadbackForm(request.POST)
event_form = EventForm(request.POST)
price_form = PriceForm(request.POST)
print(price_form)
if event_form.is_valid() and feadback_form.is_valid():
feadback_data = feadback_form.cleaned_data
price_data = price_form.cleaned_data
client_data = feadback_form.cleaned_data.copy()
del client_data['wish']
client_obj, created = Client.objects.get_or_create(tel=client_data['tel'], defaults=client_data)
feadback_data["has_event"] = True
event_data = event_form.cleaned_data
feadback = Feadback.objects.create(**feadback_data)
price = Price.objects.create(**price_data)
event_data["feadback"] = feadback
event_data["price"] = price
event_data["client"] = client_obj
q1 = Q(date_time__gte=event_data["date_time"])
q2 = Q(date_time__lt=(event_data["date_time"] + timedelta(hours=event_data["duration"].hour, minutes=event_data["duration"].minute)))
if Event.objects.filter( q1 & q2).count() < 1:
event = Event.objects.create(**event_data)
return redirect('/crm/')
# event_update_obj, event_create_bool = Event.objects.update_or_create(feadback=feadback, defaults=event_data)
# feadback.has_event = True
# feadback.save()
# if event_create_bool:
# event_form["feadback"].client.services.add(event_update_obj.detailed_service)
# else:
# event_form["feadback"].client.services.
else:
return HttpResponse('недостаточно времени')
return HttpResponse('feadback_form {} event_form {}'%(event_form.is_valid(), feadback_form.is_valid()))
class TransferEvent(CrmMain):
def get(self, request, event_id):
context = {"event_id": event_id}
week_periods = Week(event_list=EventList.eventsOnweek())
context["week_periods"] = week_periods
return render(request, "transfer_event_calendar.html", context)
class DeleteEvent(LoginRequiredView):
def get(self, request, event_id):
event = get_object_or_404(Event, pk=event_id)
context = {"event": event}
return render(request, "delete_event.html", context)
def post(self, request, event_id):
event = get_object_or_404(Event, pk=event_id)
if request.POST.get("submit") == "yes":
event.delete()
return redirect('/crm/')
elif request.POST.get("submit") == "no":
return redirect('/crm/event/%s/'%event_id)
# data = {}
# for f in Event._meta.get_fields():
# if not f.auto_created:
# data[f.name] = getattr(event, f.name)
# # print(type(data["feadback"]))
# # return HttpResponse(type(data["feadback"]))
# # CanceledEvent.objects.create()
# # obj, created = CanceledEvent.objects.update_or_create(defaults=data)
# event.delete()
# # return HttpResponse(obj)
return redirect('/crm/')
class DeleteResult(LoginRequiredView):
def get(self, request, event_id, result_id):
result = get_object_or_404(Result, pk=result_id)
context = {"result": result}
return render(request, "delete_result.html", context)
def post(self, request, event_id, result_id):
result = get_object_or_404(Result, pk=result_id)
if request.POST.get("submit") == "yes":
result.delete()
return redirect('/crm/event/%s/'%event_id)
def transferEvent(request, event_id):
datetime = request.GET.get("datetime")
event = get_object_or_404(Event, pk=event_id)
data = {"date_time" : datetime}
obj, created = Event.objects.update_or_create(pk=event_id, defaults=data)
return redirect('/crm/')
def searchFeadback(request):
pass
def feadbackBar(request):
if request.method== "POST" and request.is_ajax():
context = {}
if request.POST.get("filter_type") == "all":
feadback_list_inwork = Feadback.objects.filter(has_event=False).order_by('date')
feadback_list_done = Feadback.objects.filter(has_event=True).order_by('-date')
feadback_list = list(feadback_list_inwork)+list(feadback_list_done)
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
elif request.POST.get("filter_type") == "to_work":
feadback_list = Feadback.objects.filter(has_event=False).order_by('date')
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
elif request.POST.get("filter_type") == "processed":
feadback_list = Feadback.objects.filter(has_event=True).order_by('-date')
context["feadback_list"] = feadback_list
return render(request, "crm_main/feadback_list_ajax.html", context)
def taskBar(request):
if request.method == "POST" and request.is_ajax():
context = {}
all_tasks = Task.objects.all()
print("all tasks")
print(all_tasks)
if request.POST.get("filter_type") == "all":
task_list_inwork = all_tasks.filter(done=False)
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = all_tasks.filter(done=True)
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
print(task_list)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_day":
task_list_inwork = QueryByPeriod.queryOnday(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnday(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_week":
task_list_inwork = QueryByPeriod.queryOnweek(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnweek(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
elif request.POST.get("filter_type") == "on_month":
task_list_inwork = QueryByPeriod.queryOnmonth(all_tasks.filter(done=False))
print(task_list_inwork)
if task_list_inwork:
task_list_inwork = task_list_inwork.order_by('date')
else:
task_list_inwork = []
task_list_done = QueryByPeriod.queryOnmonth(all_tasks.filter(done=True))
print(task_list_done)
if task_list_done:
task_list_done = task_list_done.order_by('-date')
else:
task_list_done = []
task_list = list(task_list_inwork) + list(task_list_done)
context["task_list"] = task_list
return render(request, "crm_main/task_list_ajax.html", context)
def changeWeekCalendar(request):
context = {}
if request.method== "POST" and request.is_ajax():
if request.POST.get("go_to_week") == "prev":
print(request.POST.get("start_date"))
start_date = request.POST.get("start_date")
start_date = start_date.split("-")
min_datetime = datetime(int(start_date[0]), int(start_date[1]), int(start_date[2]))-timedelta(days=7)
week_periods = Week(event_list=EventList.eventsOnweek(min_datetime), date_time=min_datetime)
context["week_periods"] = week_periods
print(week_periods)
if request.POST.get("transfer_event"):
context["event_id"] = request.POST.get("event_id")
return render(request, "transfer_event_week_calendar.html", context)
return render(request, "crm_calendar/week_calendar.html", context)
elif request.POST.get("go_to_week") == "next":
print(request.POST.get("start_date"))
start_date = request.POST.get("start_date")
start_date = start_date.split("-")
min_datetime = datetime(int(start_date[0]), int(start_date[1]), int(start_date[2]))+timedelta(days=7)
week_periods = Week(event_list=EventList.eventsOnweek(min_datetime), date_time=min_datetime)
context["week_periods"] = week_periods
print(week_periods)
# print(request.POST.get("transfer_event"))
if request.POST.get("transfer_event"):
context["event_id"] = request.POST.get("event_id")
return render(request, "transfer_event_week_calendar.html", context)
return render(request, "crm_calendar/week_calendar.html", context)
def timeView(request):
min_datetime = datetime.date(datetime.now(timezone.utc)-timedelta(days=datetime.weekday(datetime.now(timezone.utc))))
max_datetime = min_datetime+timedelta(days=7)
# return HttpResponse(max_d)
return HttpResponse(datetime.combine(min_datetime, datetime.min.time()))
def updateTotalPaid(request):
clients = Client.objects.all()
pays = Pay.objects.all()
for client in clients:
client_pays = pays.filter(client=client)
client.total_paid =sum((pay.pay for pay in client_pays))
client.save()
return redirect("/crm/") | mit | -1,042,679,687,851,345,500 | 36.53227 | 145 | 0.583507 | false | 3.493015 | false | false | false |
merenlab/anvio | anvio/mcgclassifier.py | 2 | 35383 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""
Classes to classify genes based on coverages across metagenomes.
anvi-mcg-classifier is the default client using this module
"""
import os
import anvio
import numpy as np
import pandas as pd
import matplotlib
# TODO: according to the warning, this call to set the back-hand is meaningless
# I need to experiment to see what happens if I delete it.
matplotlib.use('pdf')
import anvio.utils as utils
import matplotlib.pyplot as plt
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from scipy import odr as odr
from anvio.mcgops import MCGPlots
from anvio.errors import ConfigError, FilesNPathsError
from matplotlib.backends.backend_pdf import PdfPages
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Alon Shaiber"
__email__ = "[email protected]"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
columns_for_samples_coverage_stats_dict = ['non_outlier_mean_coverage', 'non_outlier_coverage_std']
class MetagenomeCentricGeneClassifier:
def __init__(self, args, run=run, progress=progress):
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.output_file_prefix = A('output_file_prefix')
self.alpha = A('alpha')
self.collection_name = A('collection_name')
self.bin_id = A('bin_id')
self.bin_ids_file_path = A('bin_ids_file')
self.exclude_samples = A('exclude_samples')
self.include_samples = A('include_samples')
self.outliers_threshold = A('outliers_threshold')
self.zeros_are_outliers = A('zeros_are_outliers')
self.gen_figures = A('gen_figures')
self.overwrite_output_destinations = A('overwrite_output_destinations')
self.split_coverage_values_per_nt_dict = None
self.gene_level_coverage_stats_dict = None
self.gene_level_coverage_stats_dict_of_dataframes = None
self.profile_db = {}
self.coverage_values_per_nt = None
self.gene_coverages = {}
self.gene_detections = None
self.samples = None
self.positive_samples = []
self.number_of_positive_samples = None
self.negative_samples = {}
self.number_of_negative_samples = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.additional_description = ''
self.total_length = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.samples_to_exclude = set([])
self.samples_to_include = set([])
self.write_output_to_files = None
if self.exclude_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.exclude_samples)
self.samples_to_exclude = set([l.split('\t')[0].strip() for l in open(self.exclude_samples, 'rU').readlines()])
if not self.samples_to_exclude:
raise ConfigError("You asked to exclude samples, but provided an empty list.")
run.info('Excluding Samples', 'The following samples will be excluded: %s' % self.samples_to_exclude,)
if self.include_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.include_samples)
self.samples_to_include = set([l.split('\t')[0].strip() for l in open(self.include_samples, 'rU').readlines()])
if not self.samples_to_include:
raise ConfigError("You provided an empty list of samples to include.")
run.info('Including Samples', 'The following samples will be included: %s' % self.samples_to_include,)
# run sanity check on all input arguments
self.sanity_check()
def init(self, gene_level_coverage_stats_dict=None, split_coverage_values_per_nt_dict=None, additional_description=None):
""" setting the dictionaries for gene coverage stats and for split coverage per nucleotide"""
if gene_level_coverage_stats_dict is None and split_coverage_values_per_nt_dict is None:
raise ConfigError("MCGC needs at least one of the following in order to work: "
"gene_level_coverage_stats_dict or/and split_coverage_values_per_nt_dict")
# We want to make sure these are empty in case we use "init" multiple times for different bins
self.coverage_values_per_nt = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.gene_level_coverage_stats_dict = gene_level_coverage_stats_dict
self.split_coverage_values_per_nt_dict = split_coverage_values_per_nt_dict
if additional_description:
self.additional_description = '-' + additional_description
try:
samples = next(iter(self.gene_level_coverage_stats_dict.values())).keys()
except:
samples = next(iter(self.split_coverage_values_per_nt_dict.values())).keys()
self.init_samples(samples)
def sanity_check(self):
"""Basic sanity check for class inputs"""
if self.output_file_prefix:
filesnpaths.is_output_file_writable(self.output_file_prefix + '-additional-layers.txt', ok_if_exists=self.overwrite_output_destinations)
try:
if self.gen_figures:
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots'
os.makedirs(plot_dir, exist_ok=self.overwrite_output_destinations)
except FileExistsError as e:
raise FilesNPathsError("%s already exists, if you would like to overwrite it, then use -W (see help menu)." % plot_dir)
# checking alpha
if not isinstance(self.alpha, float):
raise ConfigError("alpha value must be a type float.")
# alpha must be a min of 0 and smaller than 0.5
if self.alpha < 0 or self.alpha >= 0.5:
raise ConfigError("alpha must be a minimum of 0 and smaller than 0.5")
if self.exclude_samples and self.include_samples:
raise ConfigError("You cannot use both --include-samples and --exclude-samples! Please choose one.")
def init_samples(self, samples_list):
""" Create the set of samples according to user input and store it in self.samples"""
# remove the samples that should be excluded
samples = set(samples_list) - self.samples_to_exclude
if self.include_samples:
samples_to_include_that_are_not_there = self.samples_to_include - samples
if samples_to_include_that_are_not_there:
raise ConfigError("You requested to include some samples that are not in the profile database. Here are the samples in the profile database: %s. "
"And here are the samples you requested, and that are not there: %s" % (samples, samples_to_include_that_are_not_there))
samples = self.samples_to_include
self.samples = samples
def init_gene_level_coverage_stats_dict_of_dataframes(self):
""" converts the dictionaries of gene_level_coverage_stats_dict to dataframes"""
self.gene_level_coverage_stats_dict_of_dataframes = {}
for key in ['mean_coverage', 'detection', 'non_outlier_mean_coverage', 'non_outlier_coverage_std']:
# Only include samples that the user want
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=True, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
for key in ['gene_coverage_values_per_nt', 'non_outlier_positions']:
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=False, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
def init_samples_coverage_stats_dict(self):
""" populate the samples_coverage_stats_dict, and determine positive, negative, and ambiguous samples with the genome detection information
(--alpha, --genome-detection-uncertainty)
The samples_coverage_stats_dict dataframe is used to calculate the gene consistency information.
It is also used for plotting purposes (both for the nucleotide-coverage-distribution plots and the gene-consistency plots).
The coverage_values_per_nt is used to calculate the detection value (portion of nucleotides
covered) for a sample. Then, a cutoff for detection values is used to determine the presence
or absence of the genome in each sample.
"""
if self.coverage_values_per_nt is None:
self.coverage_values_per_nt = get_coverage_values_per_nucleotide(self.split_coverage_values_per_nt_dict, samples=self.samples)
total_length = len(next(iter(self.coverage_values_per_nt.values())))
MCG_samples_information_table_structure = ['samples', 'presence', 'detection', 'number_of_taxon_specific_core_detected']
# create an empty dataframe
samples_information = pd.DataFrame(index=self.samples, columns=MCG_samples_information_table_structure[1:])
positive_samples = []
negative_samples = []
self.progress.new("Finding nucleotide positions in samples with outlier coverage values")
progress.update('...')
num_samples, counter = len(self.samples), 1
detection = {}
total_length = len(next(iter(self.coverage_values_per_nt.values())))
self.samples_coverage_stats_dicts = pd.DataFrame(index=self.samples, columns=columns_for_samples_coverage_stats_dict)
for sample in self.samples:
if num_samples > 100 and counter % 100 == 0:
self.progress.update('%d of %d samples...' % (counter, num_samples))
# get the non-outlier information
non_outlier_indices, self.samples_coverage_stats_dicts.loc[sample,] = get_non_outliers_information(self.coverage_values_per_nt[sample], MAD_threshold=self.outliers_threshold, zeros_are_outliers=self.zeros_are_outliers)
self.non_outlier_indices[sample] = non_outlier_indices
number_of_non_outliers = len(self.non_outlier_indices[sample])
if anvio.DEBUG:
self.run.info_single('The mean and std of non-outliers in sample %s are: %s, %s respectively' % (sample, self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample], self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]))
self.run.info_single('The number of non-outliers is %s of %s (%.2f%%)' % (number_of_non_outliers, total_length, 100.0 * number_of_non_outliers / total_length))
detection[sample] = np.count_nonzero(self.coverage_values_per_nt[sample]) / total_length
samples_information['presence'][sample] = get_presence_absence_information(number_of_non_outliers/total_length, self.alpha)
if detection[sample] <= 0.5:
samples_information['presence'][sample] = False
if samples_information['presence'][sample]:
positive_samples.append(sample)
elif samples_information['presence'][sample] == False:
negative_samples.append(sample)
samples_information['detection'][sample] = detection[sample]
counter += 1
self.positive_samples = positive_samples
self.number_of_positive_samples = len(self.positive_samples)
self.negative_samples = negative_samples
self.samples_detection_information = samples_information
self.run.warning('The number of positive samples is %s' % self.number_of_positive_samples)
self.run.warning('The number of negative samples is %s' % len(self.negative_samples))
self.samples_coverage_stats_dicts_was_initiated = True
self.progress.end()
def plot_nucleotide_coverage_distribution(self):
""" Creates a pdf file with the following plots for each sample the sorted nucleotide coverages \
(with the outliers in red and non-outliers in blue), and a histogram of coverages for the non-outliers"""
# Creating a dircetory for the plots. If running on bins, each bin would be in a separate sub-directory
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots' + '/'
self.progress.new('Saving figures of taxon specific distributions to pdf')
progress.update('...')
number_of_fininshed = 0
for sample in self.positive_samples:
coverages_pdf_output = plot_dir + sample + self.additional_description + '-coverages.pdf'
pdf_output_file = PdfPages(coverages_pdf_output)
v = self.coverage_values_per_nt[sample]
# Using argsort so we can use the non_oulier indices
sorting_indices = np.argsort(v)
# we would need the reverse of the sorting of the indices to create the x axis for the non-outliers
reverse_sorted_indices = np.zeros(len(sorting_indices))
reverse_sorted_indices[sorting_indices] = range(len(reverse_sorted_indices))
# plotting the ordered coverage values (per nucleotide)
# the non-outliers are plotted in blue
# the outlier values are plotted in red
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Nucleotide Number (ordered)'
ax.set_ylabel = r'$Nucleotide Coverage^2$'
x1 = range(len(v)) # FIXME: this shouldn't be in the loop (only here because I need to fix the mock data)
x2 = reverse_sorted_indices[self.non_outlier_indices[sample]]
#y2 = v[self.non_outlier_indices[sample]]
# plot all in red
ax.semilogy(x1,v[sorting_indices],'r.', rasterized=True)
# plot on top the non-outliers in blue
ax.semilogy(x2,v[self.non_outlier_indices[sample]],'b.', rasterized=True)
fig.suptitle("%s - sorted coverage values with outliers" % sample)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# plotting a histogram of the non-outliers
# This would allow to see if they resemble a normal distribution
hist_range = (min(v[self.non_outlier_indices[sample]]),max(v[self.non_outlier_indices[sample]]))
# computing the number of bins so that the width of a bin is ~1/4 of the standard deviation
# FIXME: need to make it so the bins are only of integers (so the smallest bin is of width 1
# and that bins are integers)
number_of_hist_bins = np.ceil((hist_range[1] - hist_range[0]) / (self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]/4)).astype(int) # setting the histogram bins to be of the width of a quarter of std
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Coverage'
ax.hist(v[self.non_outlier_indices[sample]], number_of_hist_bins,hist_range, rasterized=True)
fig.suptitle("%s - histogram of non-outliers" % sample)
# adding the mean and std of the non-outliers as text to the plot
text_for_hist = u'$\mu = %d$\n $\sigma = %d$' %\
(self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample],\
self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample])
ax.text(0.8, 0.9, text_for_hist, ha='center', va='center', transform=ax.transAxes)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# close the pdf file
pdf_output_file.close()
number_of_fininshed += 1
self.progress.update("Finished %d of %d" % (number_of_fininshed, self.number_of_positive_samples))
self.progress.end()
def init_gene_presence_absence_in_samples(self):
""" Determining presence and absence of genes in samples according to gene detection values."""
if not self.gene_level_coverage_stats_dict:
raise ConfigError("gene presence/absence in samples cannot be determined without a gene_level_coverage_stats_dict,\
but it seems that you don't have one. maybe you should run init()?")
if self.gene_level_coverage_stats_dict_of_dataframes is None:
self.init_gene_level_coverage_stats_dict_of_dataframes()
gene_callers_id = self.gene_level_coverage_stats_dict_of_dataframes['detection'].index
self.gene_presence_absence_in_samples = pd.DataFrame(index=gene_callers_id, columns=self.samples)
T = lambda x: get_presence_absence_information(sum(x)/len(x), self.alpha)
self.progress.new('Computing gene presence/absence in samples')
progress.update('...')
genes_above_outlier_threshold = pd.DataFrame.from_dict(self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_positions'], orient='index').applymap(T)
genes_with_detection_above_half = self.gene_level_coverage_stats_dict_of_dataframes['detection'].applymap(lambda x: x > 0.5)
self.gene_presence_absence_in_samples = genes_above_outlier_threshold & genes_with_detection_above_half
self.gene_presence_absence_in_samples_initiated = True
self.progress.end()
def init_gene_coverage_consistency_information(self):
""" Perform orthogonal distance regression for each gene to determine coverage consistency.
The question that we are trying to ask is:
Do the non-outlier nt coverage of the gene in samlpes correlates to the non-outlier
nt coverage of the genome in samples?
The regression is performed only for positive samples.
For each gene, the regression is performed only according to samples in which
the gene is present (according to the detection critrea).
"""
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
if not self.gene_presence_absence_in_samples_initiated:
self.init_gene_presence_absence_in_samples()
self.progress.new("Computing coverage consistency for all genes.")
progress.update('...')
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
# samples in which the gene is present
_samples = self.gene_presence_absence_in_samples.loc[gene_id,self.gene_presence_absence_in_samples.loc[gene_id,]==True].index
# mean and std of non-outlier nt in each sample
x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_mean_coverage'].values)
if "non_outlier_coverage_std" in self.samples_coverage_stats_dicts:
# we only expect to have the sample coverage std in "full" mode
std_x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_coverage_std'].values)
else:
std_x = None
if len(_samples) > 1:
# mean and std of non-outlier nt in the gene (in each sample)
y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_mean_coverage'].loc[gene_id, _samples].values
std_y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_coverage_std'].loc[gene_id, _samples].values
# performing the regression using ODR
_data = odr.RealData(x, y, std_x, std_y)
_model = lambda B, c: B[0] * c
_odr = odr.ODR(_data, odr.Model(_model), beta0=[3])
odr_output = _odr.run()
# store results
self.gene_coverage_consistency_dict[gene_id] = {}
self.gene_coverage_consistency_dict[gene_id]['slope'] = odr_output.beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_std'] = odr_output.sd_beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_precision'] = odr_output.sd_beta[0] / odr_output.beta[0]
# compute R squered
f = lambda b: lambda _x: b*_x
R_squered = 1 - sum((np.apply_along_axis(f(odr_output.beta[0]),0,x)-y)**2) / sum((y-np.mean(y))**2)
# Check if converged
self.gene_coverage_consistency_dict[gene_id]['R_squered'] = R_squered
if odr_output.stopreason[0] == 'Sum of squares convergence':
self.gene_coverage_consistency_dict[gene_id]['converged'] = True
else:
self.gene_coverage_consistency_dict[gene_id]['converged'] = False
self.gene_coverage_consistency_dict_initiated = True
self.progress.end()
def get_gene_specificity(self, gene_id):
""" return True for gene if it occurs in positive samples and doesn't occur in negative samples.
Ambiguous occurences are not counted as anything. This means that if a gene is ambiguously
occuring in a negative sample it could still be counted as "specific". It also means that
if a gene is only ambiguously occuring in positive samples then it would be considered
as "non-specific".
"""
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] > 1 and self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] == 0:
return True
else:
return False
# TODO: if there are no occurences of the gene at all, then we should maybe return None instead of False
def get_gene_coverage_consistency(self, gene_id):
""" return true if the gene's coverage is consistent accross positive samples, False otherwise."""
# TODO: make sure coverage_consistency_dict has been initiated
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 0:
# if the gene doesn't occur in positive samlpes then there is no classification
return None
elif self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 1:
# if the gene occurs only in one positive sample then return True.
# XXX: we might prefer to return None, we should consider this in the future.
return True
elif self.gene_coverage_consistency_dict[gene_id]['converged']:
# FIXME: this is where we use an arbitrary threshold again :-(
# if the slope precision is smaller than the threshold then the regression
# fit is considered accurate enough and the gene coverage is considered consistent.
return self.gene_coverage_consistency_dict[gene_id]['slope_precision'] < 0.5
else:
# The regression didn't converege so the coverage is probably not consistent.
return False
def determine_if_gene_is_core(self, gene_id, gene_specificity):
""" return True for core gene, False for accessory gene
If the gene is specific to positive samples, then core would be considered if it
occurs in all positive samples. Otherwise it would be considered core if it
occurs in all positive AND all negative samples.
Ambiguous occurences of a gene are not considered (i.e. they are the same as absence).
"""
if gene_specificity:
# return True if the the gene occurs in all positive samples.
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == len(self.positive_samples)
else:
# return True if the gene occurs in all positive AND all negative samples
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] == len(self.positive_samples) + len(self.negative_samples)
def init_gene_class_df(self):
""" generate dictionary with the class information per gene.
This dictionary could be later use to produce an additional-layer
text file for vizualization.
"""
# TODO: make sure gene presence absence was calculated
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
# XXX: only negative and positive samples are used here
# ambiguous samples are ignored as if they were never
# there. This is not ideal, but is easy to do.
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
self.gene_class_df = pd.DataFrame(index=gene_ids)
for gene_id in gene_ids:
# determine the number of occurences in positive samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] = len([s for s in self.positive_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# determine the number of occurences in negative samples
self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] = len([s for s in self.negative_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# set the occurence_in_positive_and_negative_samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] = self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] + self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples']
gene_specificity = self.get_gene_specificity(gene_id)
gene_coverage_consistency = self.get_gene_coverage_consistency(gene_id)
# determine core accessory
gene_is_core = self.determine_if_gene_is_core(gene_id, gene_specificity)
self.gene_class_df.loc[gene_id, 'specificity'] = gene_specificity
self.gene_class_df.loc[gene_id, 'coverage_consistency'] =gene_coverage_consistency
self.gene_class_df.loc[gene_id, 'core'] = gene_is_core
self.gene_class_df.loc[gene_id, 'MCG_class'] = get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core)
def update_samples_information_from_gene_class_df(self):
# after running classification we sum up some information regarding
# the results of the classifier per sample
for sample in self.samples_detection_information:
TSC = [g for g in self.gene_class_df.index if (self.gene_class_df.loc[g,'coverage_consistency'] and \
self.gene_class_df.loc[g,'core'])]
self.samples_detection_information['number_of_taxon_specific_core_detected'] = len(TSC)
def gen_gene_consistency_plots(self):
""" generate and save the gene consistency plots for each gene."""
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
progress.new('Plotting gene consistency information')
progress.update('...')
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
p = MCGPlots(self, gene_id, run=run, progress=progress)
p.plot()
progress.end()
def save_gene_class_information_in_additional_layers(self):
output_file_path = self.output_file_prefix + self.additional_description + '-additional-layers.txt'
self.gene_class_df.to_csv(output_file_path, sep='\t', index_label='gene_callers_id')
def save_samples_information(self):
samples_information_file_name = self.output_file_prefix + self.additional_description + '-samples-information.txt'
samples_information = pd.concat([self.samples_detection_information, self.samples_coverage_stats_dicts], axis=1, sort=True)
samples_information.to_csv(samples_information_file_name, sep='\t', index_label='samples')
def classify(self):
self.init_gene_class_df()
self.update_samples_information_from_gene_class_df()
if self.write_output_to_files:
self.save_gene_class_information_in_additional_layers()
self.save_samples_information()
if self.gen_figures:
# Create the plots for nucleotide-level coverage data per sample.
self.plot_nucleotide_coverage_distribution()
# generate plots for coverage consistency information for each gene.
self.gen_gene_consistency_plots()
def get_coverage_values_per_nucleotide(split_coverage_values_per_nt_dict, samples=None):
""" Helper function that accepts a split_coverage_values_per_nt_dict and returns a dictionary with
samples as keys and the concatenated coverage values for all splits as one array
"""
if not split_coverage_values_per_nt_dict:
raise ConfigError("You did not provide a split_coverage_values_per_nt_dict, and we need it...")
progress.new('Merging coverage values accross splits')
progress.update('...')
d = {}
if samples is None:
samples = next(iter(split_coverage_values_per_nt_dict.values())).keys()
number_of_samples = len(samples)
number_of_finished = 0
# find the combined legnth of all contigs first
total_length = 0
for split in split_coverage_values_per_nt_dict:
total_length += len(split_coverage_values_per_nt_dict[split][next(iter(samples))])
for sample in samples:
# create an array of zero with the total length
# this is much faster than appending the vectors of splits
d[sample] = np.zeros(total_length)
pos = 0
for split in split_coverage_values_per_nt_dict:
split_values = split_coverage_values_per_nt_dict[split][sample]
split_len = len(split_values)
d[sample][pos:pos+split_len] = split_values
pos += split_len
#d[sample] = np.array(d[sample])
number_of_finished += 1
progress.update("Finished sample %d of %d" % (number_of_finished,number_of_samples))
progress.end()
return d
def get_non_outliers_information(v, MAD_threshold=2.5, zeros_are_outliers=False):
""" returns the non-outliers for the input pandas series using MAD"""
d = pd.Series(index=columns_for_samples_coverage_stats_dict)
outliers = utils.get_list_of_outliers(v, threshold=MAD_threshold, zeros_are_outliers=zeros_are_outliers)
non_outliers = np.logical_not(outliers)
non_outlier_indices = np.where(non_outliers)[0]
if not(len(non_outlier_indices)):
non_outlier_indices = np.array([])
d['non_outlier_mean_coverage'] = 0.0
d['non_outlier_coverage_std'] = 0.0
else:
d['non_outlier_mean_coverage'] = np.mean(v[non_outlier_indices])
d['non_outlier_coverage_std'] = np.std(v[non_outlier_indices])
return non_outlier_indices, d
# The order of the strings is very important since it is used in get_class_string
class_short_names = ['NNA', 'SNA', 'NCA',\
'SCA', 'NNC', 'SNC',\
'NCC', 'SCC']
class_long_names = ['Non-specific_Non-consistent_Accessory', 'Specific_Non-consistent_Accessory', 'Non-specific_Consistent_Accessory',\
'Specific_Consistent_Accessory', 'Non-specific_Non-consistent_Core', 'Specific_Non-consistent_Core',\
'Non-specific_Consistent_Core', 'Specific_Consistent_Core']
class_short_name_long_name_dict = dict(zip(class_short_names,class_long_names))
def get_class_long_name_from_short_name(short_name):
return class_short_name_long_name_dict[short_name]
def get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core):
""" Takes the values of the three categories and returns a string to represent the class."""
value_list = [gene_specificity, gene_coverage_consistency, gene_is_core]
if None in value_list:
return 'NA'
# converting the list of booleans to a number
# this solution was takes from here: https://stackoverflow.com/a/4066807/7115450
index = sum(1<<i for i, b in enumerate(value_list) if b)
return class_short_names[index]
def get_presence_absence_information(number_of_non_outliers, alpha):
""" Helper function to determine presence/absence according to a threshold."""
##### WHAT WE SHOULD DO IN THE FUTURE #####
# Arbitrary cut-offs are terrible.
# If we assume there are no accessory genes (we will get back to this later),
# then if the gnomes is present, then we expect ALL of it to be present. Thus,
# if we had an unlimited number of reads, then we expect detection to be 1.
# as the number of reads gets smaller, the expected detection value is smaller.
# for a given genome size, a given read length, and the number of reads mapped to
# the genome, we can compute the following value: "what is the probability that
# the detection value will be greater than the actual detection value", if that
# probability is high, then that is a good sign that the genome is not present
# in the sample, and that any reads that we got are due to non-specific coverage.
# the same thing could be calculated for a given gene.
# we can create a measure for agreement between the mean coverage of a gene
# and the detection of the gene. It would simply be the probability that the
# coverage of the gene would exist with a detection that is higher than the
# actual detection of the gene. All we need for that is the read length,
# gene/genome length, and the expected genomic portion shared by two genomes that
# belong to the population in question.
if number_of_non_outliers >= 0.5 + alpha:
return True
elif np.sum(number_of_non_outliers) <= 0.5 - alpha:
return False
else:
return None
| gpl-3.0 | -6,849,942,037,077,747,000 | 51.575037 | 264 | 0.655795 | false | 3.819821 | false | false | false |
Venturi/cms | env/lib/python2.7/site-packages/cms/wizards/views.py | 2 | 5841 | # -*- coding: utf-8 -*-
import os
from django.forms import Form
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import NoReverseMatch
from django.db import transaction
from django.template.response import SimpleTemplateResponse
from django.utils.translation import get_language_from_request
try:
# This try/except block can be removed when we stop supporting Django 1.6
from django.contrib.formtools.wizard.views import SessionWizardView
except ImportError: # pragma: no cover
# This is fine from Django 1.7
from formtools.wizard.views import SessionWizardView
from cms.models import Page
from .wizard_pool import wizard_pool
from .forms import (
WizardStep1Form,
WizardStep2BaseForm,
step2_form_factory,
)
class WizardViewMixin(object):
language_code = None
@transaction.atomic()
def dispatch(self, request, *args, **kwargs):
self.language_code = get_language_from_request(request, check_path=True)
response = super(WizardViewMixin, self).dispatch(
request, *args, **kwargs)
return response
def get_form_kwargs(self):
kwargs = super(WizardViewMixin, self).get_form_kwargs()
kwargs.update({'wizard_language': self.language_code})
return kwargs
class WizardCreateView(WizardViewMixin, SessionWizardView):
template_name = 'cms/wizards/start.html'
file_storage = FileSystemStorage(
location=os.path.join(settings.MEDIA_ROOT, 'wizard_tmp_files'))
form_list = [
('0', WizardStep1Form),
# Form is used as a placeholder form.
# the real form will be loaded after step 0
('1', Form),
]
def get_current_step(self):
"""Returns the current step, if possible, else None."""
try:
return self.steps.current
except AttributeError:
return None
def is_first_step(self, step=None):
step = step or self.get_current_step()
return step == '0'
def is_second_step(self, step=None):
step = step or self.get_current_step()
return step == '1'
def get_context_data(self, **kwargs):
context = super(WizardCreateView, self).get_context_data(**kwargs)
if self.is_second_step():
context['wizard_entry'] = self.get_selected_entry()
return context
def get_form(self, step=None, data=None, files=None):
if step is None:
step = self.steps.current
# We need to grab the page from pre-validated data so that the wizard
# has it to prepare the list of valid entries.
if data:
page_key = "{0}-page".format(step)
self.page_pk = data.get(page_key, None)
else:
self.page_pk = None
if self.is_second_step(step):
self.form_list[step] = self.get_step_2_form(step, data, files)
return super(WizardCreateView, self).get_form(step, data, files)
def get_form_kwargs(self, step=None):
"""This is called by self.get_form()"""
kwargs = super(WizardCreateView, self).get_form_kwargs()
kwargs['wizard_user'] = self.request.user
if self.is_second_step(step):
kwargs['wizard_page'] = self.get_origin_page()
else:
page_pk = self.page_pk or self.request.GET.get('page', None)
if page_pk and page_pk != 'None':
kwargs['wizard_page'] = Page.objects.filter(pk=page_pk).first()
else:
kwargs['wizard_page'] = None
return kwargs
def get_form_initial(self, step):
"""This is called by self.get_form()"""
initial = super(WizardCreateView, self).get_form_initial(step)
if self.is_first_step(step):
initial['page'] = self.request.GET.get('page')
return initial
def get_step_2_form(self, step=None, data=None, files=None):
entry_form_class = self.get_selected_entry().form
step_2_base_form = self.get_step_2_base_form()
form = step2_form_factory(
mixin_cls=step_2_base_form,
entry_form_class=entry_form_class,
)
return form
def get_step_2_base_form(self):
"""
Returns the base form to be used for step 2.
This form is sub classed dynamically by the form defined per module.
"""
return WizardStep2BaseForm
def get_template_names(self):
if self.is_first_step():
template_name = self.template_name
else:
template_name = self.get_selected_entry().template_name
return template_name
def done(self, form_list, **kwargs):
"""
This step only runs if all forms are valid. Simply emits a simple
template that uses JS to redirect to the newly created object.
"""
form_two = list(form_list)[1]
instance = form_two.save()
url = self.get_success_url(instance)
if not url:
page = self.get_origin_page()
if page:
try:
url = page.get_absolute_url(self.language_code)
except NoReverseMatch:
url = '/'
else:
url = '/'
return SimpleTemplateResponse("cms/wizards/done.html", {"url": url})
def get_selected_entry(self):
data = self.get_cleaned_data_for_step('0')
return wizard_pool.get_entry(data['entry'])
def get_origin_page(self):
data = self.get_cleaned_data_for_step('0')
return data.get('page')
def get_success_url(self, instance):
entry = self.get_selected_entry()
success_url = entry.get_success_url(
obj=instance,
language=self.language_code
)
return success_url
| gpl-2.0 | 7,294,099,598,501,229,000 | 32.1875 | 80 | 0.612395 | false | 3.878486 | false | false | false |
rtevans/tacc_stats_old | analyze/process_pickles/tspl.py | 1 | 3978 | import cPickle as pickle
import numpy
import glob, os, stat, time, datetime
import re
class TSPLException(Exception):
def __init__(self,arg):
self.value=arg
print self.value
class TSPLBase:
def __init__(self,file,k1,k2):
self.f=open(file)
self.j=pickle.load(self.f)
self.f.close()
try:
self.wayness=int(re.findall('\d+',self.j.acct['granted_pe'])[0])
except KeyError:
try:
self.wayness=self.j.acct['cores']/self.j.acct['nodes']
except ZeroDivisionError:
print "Read zero nodes, assuming 16 way job"
self.wayness=16
try:
self.owner=self.j.acct['owner']
except KeyError:
self.owner=self.j.acct['uid']
self.numhosts=len(self.j.hosts.keys())
if self.numhosts == 0:
raise TSPLException('No hosts')
elif 'amd64_core' in self.j.hosts.values()[0].stats:
self.pmc_type='amd64'
elif 'intel_pmc3' in self.j.hosts.values()[0].stats:
self.pmc_type='intel'
else:
raise TSPLException('No PMC data for: ' + self.j.id)
if self.pmc_type in k1:
self.k1=k1[self.pmc_type]
self.k2=k2[self.pmc_type]
else:
self.k1=k1
self.k2=k2
self.t=(self.j.times-self.j.times[0])
if len(k1) != len(k2):
raise TSPLException('Lengths don\'t match')
self.index=[ self.j.get_schema(self.k1[i])[self.k2[i]].index
for i in range(len(self.k1))]
g=self.j.hosts[self.j.hosts.keys()[0]]
self.size=len(g.stats[self.k1[0]].values()[0])
d=datetime.datetime.fromtimestamp(self.j.acct['end_time'])
self.end_date=d.strftime('%Y-%m-%d %H:%M:%S')
self.title='ID: %(ID)s, u: %(u)s, N: %(name)s, D: %(date)s, NH: %(nh)d' % \
{ 'ID' : self.j.id,'u': self.owner,
'name': self.j.acct['name'], 'nh' : self.numhosts,
'date': self.end_date }
# Create an array of dictionaries of lists initialized and constructed using
# derived class methods for the keys of interest.
# self.index embedds the location of self.k2 in the sechma
self.data=[]
for i in range(len(self.k1)):
self.data.append({})
for k in self.j.hosts.keys():
h=self.j.hosts[k]
self.data[i][k]=self.data_init()
for s in h.stats[self.k1[i]].values():
self.data_assign(self.data[i][k],s[:,self.index[i]])
# Initialize to an empty array and accumulate with appending
def data_init(self):
return []
def data_assign(self,d,v):
d.append(v)
# Generate a label for title strings
def label(self,k1,k2,mod=1.):
u=''
if mod==1e9 or mod == 1024.**3:
u='G'
elif mod==1e6 or mod == 1024.**2:
u='M'
l=k1 + ' ' + k2
s=self.j.get_schema(k1)[k2]
if not s.unit is None:
l+=' ' + u + s.unit
if len(l) > 10:
l=k1 + '\n' + k2
s=self.j.get_schema(k1)[k2]
if not s.unit is None:
l+=' ' + u + s.unit
return l
# These iterator fuctions iterate linearly over the array of dictionaries. We
# should probably create a sorted version, but this works for now.
def __iter__(self):
self.ind=-1
self.a=len(self.data)
self.b=len(self.data[0].keys())
self.c=len(self.data[0][self.data[0].keys()[0]])
return(self)
def next(self):
if self.ind == self.a*self.b*self.c-1:
raise StopIteration
self.ind += 1
inds=numpy.unravel_index(self.ind,(self.a,self.b,self.c))
k=self.data[inds[0]].keys()[inds[1]]
return self.data[inds[0]][k][inds[2]]
# Load a job file and sum a socket-based or core-based counter into
# time-dependent arrays for each key pair. Takes a tacc stats pickle file and
# two lists of keys.
class TSPLSum(TSPLBase):
def __init__(self,file,k1,k2):
TSPLBase.__init__(self,file,k1,k2)
# Initialize with an zero array and accumuluate to the first list element with
# a sum
def data_init(self):
return [numpy.zeros(self.size)]
def data_assign(self,d,v):
d[0]+=v
| lgpl-2.1 | 5,560,491,141,564,937,000 | 27.618705 | 80 | 0.599799 | false | 2.895197 | false | false | false |
wgsyd/wgtf | src/core/testing/plg_python27_unit_test/resources/Scripts/python27_test/__init__.py | 2 | 7087 | # Send print to logger
import scriptoutputwriter
# Access C++ module from Python
import reflectiontest
'''
Type testing
https://docs.python.org/2/library/types.html
Types not tested yet:
types.CodeType
types.MethodType
types.UnboundMethodType
types.BuiltinFunctionType
types.BuiltinMethodType
types.ModuleType
types.FileType
types.XRangeType
types.SliceType
types.EllipsisType
types.TracebackType
types.FrameType
types.BufferType
types.DictProxyType
types.NotImplementedType
types.GetSetDescriptorType
types.MemberDescriptorType
types.StringTypes
'''
class OldCallableClassTest:
def __call__( self, value ):
return "Callable class test " + value
class NewCallableClassTest( object ):
def __call__( self, value ):
return "Callable class test " + value
class DescriptorTest( object ):
def __init__( self, value ):
self.value = value
def __get__( self, obj, objtype ):
return self.value
def __set__( self, obj, value ):
self.value = value
def firstn(n):
'''Generator test'''
num = 0
while num < n:
yield num
num += 1
class ValueObjectTest( object ):
'''
Test object for reflected property paths.
The reflection system can get a path for "childTest.tupleTest[0]" only if
the value type is a Python object.
Basic types like int and string do not have path info stored on them.
'''
def __init__( self, value ):
self.value = value
class ChildObjectTest( object ):
def __init__( self ):
self.stringTest = "Child"
self.tupleTest = (ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 ) )
self.listTest = [ValueObjectTest( 0 ),
ValueObjectTest( 1 ),
ValueObjectTest( 2 ),
ValueObjectTest( 3 )]
self.dictTest = {ValueObjectTest( 'Bacon' ) : ValueObjectTest( 0 )}
class BadComparison( object ):
def __cmp__( self, other ):
raise Exception( "Bad comparison" )
class OldClassTest:
'''Test of old-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# '''
# print "setattr", self, name
# self.__dict__[ name ] = value
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# Old-style classes only
self.typeTest1 = type( OldClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = OldClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
def methodTest( self, testString ):
return "Method test " + testString
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1:
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2:
pass
def updateValues( self ):
OldClassTest.classIntTest = OldClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
class NewClassTest( object ):
'''Test of new-style classes'''
'''
Properties exposed to GUI.
In the format "attribute name" : "meta data name"
'''
_metaData = {
"floatTest" : "MetaSlider",
"readOnlyPropertyTest1" : "MetaReadOnly",
"readOnlyPropertyTest2" : "MetaReadOnly",
}
# Enable for testing
#def __setattr__( self, name, value ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "setattr", self, name
# super( NewClassTest, self ).__setattr__( name, value )
# Enable for testing
#def __delattr__( self, name ):
# '''
# Hook for notifying the GUI
# Note: descriptors will not be caught by this hook.
# '''
# print "delattr", self, name
# del object.name
classIntTest = 1
def __init__( self ):
self.noneTest = None
self.boolTest = True
self.intTest = 1
self.longTest = 1L
self.floatTest = 1.0
#self.complexTest = 1.0j
self.stringTest = "Spam"
self.unicodeTest = u"Spam"
self.childTest = ChildObjectTest()
self.tupleTest = (1, 2, 3, "Spam")
self.listTest = [0, 1, 2, 3]
self.dictTest = {'Bacon': 1, 'Ham': 0}
self.functionTest1 = \
lambda testString: "Function test " + testString
self.functionTest2 = OldCallableClassTest()
self.functionTest3 = NewCallableClassTest()
#self.generatorTest = firstn
self.badComparison = BadComparison()
# New-style classes only
self.typeTest1 = type( NewClassTest )
self.typeTest2 = type( self.typeTest1 )
self.classTest1 = NewClassTest
self.classTest2 = self.__class__
self.instanceTest = type( self )
self.propertyTest1_ = "Read-only Property"
self.propertyTest2_ = "Read-only Property"
self.descriptorTest = DescriptorTest( "Descriptor property" )
def methodTest( self, testString ):
return "Method test " + testString
def getReadOnlyPropertyTest1( self ):
'''Only works for new-style classes'''
return self.propertyTest1_
readOnlyPropertyTest1 = property( getReadOnlyPropertyTest1 )
@property
def readOnlyPropertyTest2( self ):
'''Only works for new-style classes'''
return self.propertyTest2_
@classmethod
def classMethodTest( cls, testString ):
return "Class method test " + testString
@staticmethod
def staticMethodTest( testString ):
return "Static method test " + testString
class ConstructorTest1( object ):
def __init__( self, value ):
self.constructorTest = "Constructor class test " + value
class ConstructorTest2( object ):
pass
def updateValues( self ):
NewClassTest.classIntTest = NewClassTest.classIntTest + 1
self.noneTest = None
self.boolTest = not self.boolTest
self.intTest = self.intTest + 1
self.longTest = self.longTest + 1
self.floatTest = self.floatTest + 1.0
self.stringTest = "Spam" + repr( self.intTest )
self.unicodeTest = u"Spam" + repr( self.intTest )
def run():
print "~~ Begin test"
print "~~ Python to C++"
oldClassTest = OldClassTest()
reflectiontest.oldStyleConversionTest( oldClassTest )
newClassTest = NewClassTest()
reflectiontest.newStyleConversionTest( object=newClassTest )
print "~~ Passed"
print "~~ End test"
| bsd-3-clause | 4,597,229,874,851,165,000 | 24.220641 | 74 | 0.698462 | false | 3.101532 | true | false | false |
dutchbot/FoscamBackupper | foscambackup/config.py | 1 | 1124 | """ Holds configuration class """
from foscambackup.util import helper
from foscambackup.constant import Constant
import foscambackup.util.file_helper as file_helper
class Config:
""" Hold the config options for use in program """
host = ""
port = 0
username = ""
password = ""
model = ""
currently_recording = False
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_model_serial(self, read_file):
""" get the model serial from file """
data = read_file.readlines()
data[len(data)-1] = 'model_serial:' + self.model
file_helper.open_write_file(Constant.settings_file, self.write_model_serial, data)
def write_model_serial(self, write_file, args):
""" Write the data to file """
write_file.writelines(args['data'])
def write_model_to_conf(self, model):
""" Retrieves the model_serial folder name and writes to conf """
self.model = model
file_helper.open_readonly_file(Constant.settings_file, self.get_model_serial)
| gpl-3.0 | -8,347,966,591,396,616,000 | 32.058824 | 90 | 0.630783 | false | 3.836177 | false | false | false |
ac001/moe | project/app/moe/paste/handlers.py | 1 | 3999 | # -*- coding: utf-8 -*-
"""
moe.paste.handlers
~~~~~~~~~~~~~~~~~~
Handlers for a really simple pastebin.
:copyright: 2010 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from tipfy import NotFound, request, Response, url_for, redirect_to
from tipfy.ext.i18n import _
from moe.base.handlers import AreaRequestHandler
from moe.paste.models import Paste, PasteForm
from moe.paste.highlighting import highlight
class PasteBaseHandler(AreaRequestHandler):
"""Base class for the pastebin."""
def __init__(self, app, request):
AreaRequestHandler.__init__(self, app, request)
# Set a flag in context for menus.
self.request.context['current_app'] = 'paste'
# Initialize list of breadcrumbs.
self.breadcrumbs = []
def get_breadcrumb(self, endpoint, text, **kwargs):
return (url_for(endpoint, area_name=self.area.name, **kwargs),
text)
def add_breadcrumb(self, endpoint, text, **kwargs):
self.breadcrumbs.append(self.get_breadcrumb(endpoint, text, **kwargs))
def render_response(self, filename, **values):
self.request.context['breadcrumbs'] = [
self.get_breadcrumb('home/index', _('Home')),
self.get_breadcrumb('paste/index', _('Paste'))] + self.breadcrumbs
return super(PasteBaseHandler, self).render_response(filename, **values)
class PasteNewHandler(PasteBaseHandler):
"""Displays a paste form and saves a new paste."""
form = None
def get(self, **kwargs):
context = {
'form': self.form or PasteForm(language=kwargs.pop('language',
'python')),
}
return self.render_response('paste/new.html', **context)
def post(self, **kwargs):
self.form = PasteForm(request.form)
if self.form.validate():
if self.current_user:
user_key = str(self.current_user.key())
else:
user_key = None
language_code = request.form.get('language')
code_raw = request.form.get('code', u'')
code = highlight(code_raw, language_code)
values = {
'area_key': str(self.area.key()),
'user_key': user_key,
'code_raw': code_raw,
'code': code,
'language': language_code,
}
paste = Paste(**values)
paste.put()
self.set_message('success', _('The paste was saved.'), flash=True)
return redirect_to('paste/view', paste_id=paste.id,
area_name=self.area.name)
else:
self.set_form_error(_('Ooops, code is empty! Please post '
'some lines.'))
return self.get()
class PasteViewHandler(PasteBaseHandler):
"""Displays a paste."""
def get(self, **kwargs):
paste_id = kwargs.pop('paste_id', None)
if not paste_id:
raise NotFound()
paste = Paste.get_by_id(paste_id)
if not paste:
raise NotFound()
self.add_breadcrumb('paste/view',
_('Paste #%(paste_id)s', paste_id=paste.id),
paste_id=paste.id)
form = PasteForm(code=paste.code_raw, language=paste.language)
context = {
'paste': paste,
'form': form,
}
return self.render_response('paste/view.html', **context)
class PasteViewRawHandler(PasteBaseHandler):
"""Displays a paste in raw mode, as text."""
def get(self, **kwargs):
paste_id = kwargs.pop('paste_id', None)
if not paste_id:
raise NotFound()
paste = Paste.get_by_id(paste_id)
if not paste:
raise NotFound()
return Response(paste.code_raw)
class PasteListHandler(PasteBaseHandler):
"""Not implemented."""
def get(self, **kwargs):
context = {
}
return self.render_response('paste/new.html', **context)
| bsd-3-clause | -6,296,441,456,934,692,000 | 29.295455 | 80 | 0.572143 | false | 3.834132 | false | false | false |
Hashi4/vmdgadgets | sample/sine_curve/circle.py | 1 | 1684 | import sys
sys.path.append('../../vmdgadgets')
import vmdutil
from vmdutil import vmddef
def replace_controlpoints(cp_all, cp, index):
for i in range(4):
cp_all[i][index] =cp[i]
return cp_all
sine1 = vmdutil.SINE1_CONTROLPOINTS # sin, [1x, 1y, 2x, 2y]
sine2 = vmdutil.SINE2_CONTROLPOINTS # 1 - cos
cp_all = vmddef.BONE_LERP_CONTROLPOINTS # [1x[X,Y,Z,R], 1y[],2x[],2y[]]
replace_controlpoints(cp_all, sine1, 2) # Z: sin
replace_controlpoints(cp_all, sine2, 0) # X: (1 - cos)
interpolation1 = vmddef.bone_controlpoints_to_vmdformat(cp_all)
cp_all = vmddef.BONE_LERP_CONTROLPOINTS
replace_controlpoints(cp_all, sine1, 0) # X: sin
replace_controlpoints(cp_all, sine2, 2) # Z: (1 - cos)
interpolation2 = vmddef.bone_controlpoints_to_vmdformat(cp_all)
bone = vmddef.BONE_SAMPLE
bone_frames = []
initial_frame = bone._replace(position=(30, 0, 0))
bone_frames.append(initial_frame)
# frame 30: X:sine2, Z:sine1, (0, 0, 30)
bone_frames.append(
bone._replace(
frame=30, position=(0, 0, 30), interpolation=interpolation1))
# frame 60: X:sine1, Z:sine2, (-30, 0, 0)
bone_frames.append(
bone._replace(
frame=60, position=(-30, 0, 0), interpolation=interpolation2))
# frame 90 X:sine2, Z:sine1, (0, 0, -30)
bone_frames.append(
bone._replace(
frame=90, position=(0, 0, -30), interpolation=interpolation1))
# frame 120 X:sine1, Z:sine2, (30, 0, 0)
bone_frames.append(
bone._replace(
frame=120, position=(30, 0, 0), interpolation=interpolation2))
vmdout = vmdutil.Vmdio()
vmdout.header = vmdout.header._replace(
model_name='circle_sample'.encode(vmddef.ENCODING))
vmdout.set_frames('bones', bone_frames)
vmdout.store('circle.vmd')
| apache-2.0 | -1,859,937,820,300,628,200 | 32.019608 | 72 | 0.687648 | false | 2.51719 | false | false | false |
kivhift/pu | src/pu/msp430/titxt.py | 1 | 3622 | #
# Copyright (c) 2013 Joshua Hughes <[email protected]>
#
import array
import cStringIO
import re
from pu.utils import is_an_integer
class Section(object):
def __init__(self, addr, buffer):
self.start_addr = addr
self.buffer = buffer
self.end_addr = addr + len(buffer) - 1
def __add__(self, other):
if self.end_addr + 1 != other.start_addr:
raise ValueError('Sections are not adjacent!')
return self.__class__(self.start_addr, self.buffer + other.buffer)
def __cmp__(self, other):
ssa = self.start_addr
osa = other.start_addr
if ssa < osa:
return -1
elif ssa > osa:
return 1
else:
return 0
def __len__(self):
return len(self.buffer)
def __str__(self):
ret = cStringIO.StringIO()
ret.write('@{:04x}\n'.format(self.start_addr))
i = 0
for b in self.buffer:
ret.write('{}{:02x}'.format(' ' if i else '', b))
i += 1
if 16 == i:
i = 0
ret.write('\n')
if i:
ret.write('\n')
return ret.getvalue()
class FirmwareImage(object):
def __init__(self, infile = None):
self.section = []
if infile: self.parse(infile)
def __str__(self):
ret = cStringIO.StringIO()
for sec in self.section:
ret.write(str(sec))
ret.write('q\n')
return ret.getvalue()
def __getitem__(self, key):
if is_an_integer(key):
for sec in self.section:
if key >= sec.start_addr and key <= sec.end_addr:
key -= sec.start_addr
return sec.buffer[key]
else:
start = key.start
if start is None:
raise IndexError('Must give start index.')
stop = key.stop
for sec in self.section:
if start >= sec.start_addr and start <= sec.end_addr:
start -= sec.start_addr
if stop is not None:
stop -= sec.start_addr
return sec.buffer[slice(start, stop, key.step)]
raise IndexError('Given index is invalid.')
def merge_sections(self):
self.section.sort()
sec = self.section
i = 0
while i < (len(sec) - 1):
if sec[i].end_addr + 1 == sec[i + 1].start_addr:
sec[i] += sec.pop(i + 1)
continue
i += 1
def parse(self, infile):
quit_re = re.compile('^[qQ]$')
addr_re = re.compile('^@[0-9a-fA-F]{4}$')
bytes_re = re.compile('^[0-9a-fA-F]{2}(\s+[0-9a-fA-F]{2}){15}$')
section = []
addr = None
buf = None
def _add_sec():
if buf is not None:
section.append(Section(addr, buf))
with open(infile, 'rb') as inf:
for i, line in enumerate(inf):
ln = line.strip()
if quit_re.match(ln):
_add_sec()
break
elif addr_re.match(ln):
_add_sec()
addr = int(ln[1:], 16)
buf = array.array('B')
elif bytes_re.match(ln):
buf.extend([int(x, 16) for x in ln.split()])
else:
raise ValueError('Invalid line @ %d: %r' % (i, line))
if not quit_re.match(ln):
raise ValueError('Ran out of file without finding "q".')
self.section = section
| mit | 7,021,850,990,924,910,000 | 28.933884 | 74 | 0.46963 | false | 3.882101 | false | false | false |
TurkuNLP/Finnish-dep-parser | morpho-sd2ud/210-remove-feats.py | 1 | 3243 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import codecs
from morphoconllu import read_conllu
from tools import warn
usage = '%s IN OUT' % os.path.basename(__file__)
VERB_TAGS = set(['VERB', 'AUX'])
def remove_Adv_CASE(word):
# Remove case feature from adverbs. Omorfi is only expected to
# assign the CASE feature value Dis (distributive) to adverbs, and
# only inconsistently. Distributive is not recognized as a Finnish
# case by ISK (http://scripta.kotus.fi/visk/sisallys.php?p=81).
# Decided to remove this altogether, resulting in a consistent
# treatment where no adjective has case.
# https://github.com/TurkuNLP/UniversalFinnish/issues/17
if word.cpostag != 'ADV':
return
fmap = word.feat_map()
if 'CASE' not in fmap:
return
value = fmap['CASE']
if value == 'Dis':
word.remove_feat('CASE', 'Dis')
else:
warn('unexpected CASE value for ADV: ' + value)
def remove_Inf1_CASE_Lat(word):
# Remove case feature with value Lat (lative) from infinitive
# verbs. Omorfi follows a dated analysis where the base form of
# the A-infinitive (Infinitive 1) is termed lative. Lative is not
# recognized by ISK (http://scripta.kotus.fi/visk/sisallys.php?p=81,
# see also http://scripta.kotus.fi/visk/sisallys.php?p=120 Huom 1).
# Decided to remove this case. Note that no information is removed,
# as the Lat value for case fully coincides with Inf1 and no other
# case in Omorfi.
# https://github.com/TurkuNLP/UniversalFinnish/issues/44
fmap = word.feat_map()
if 'CASE' not in fmap:
return
value = fmap['CASE']
if value != 'Lat':
return
if word.cpostag not in VERB_TAGS:
warn('unexpected CPOSTAG with CASE=Lat: ' + word.cpostag)
word.remove_feat('CASE', 'Lat')
def remove_Inf5(word):
# Remove Inf5 feature from verbs. Omorfi generates Inf5 *very*
# rarely (once in TDT) and inconsistently, and the "maisillaan"
# form termed as the "5th infinitive" is not considered as such by
# ISK (http://scripta.kotus.fi/visk/sisallys.php?p=120).
fmap = word.feat_map()
if 'INF' not in fmap:
return
value = fmap['INF']
if value != 'Inf5':
return
if word.cpostag not in VERB_TAGS:
warn('unexpected CPOSTAG with INF=Inf5: ' + word.cpostag)
word.remove_feat('INF', 'Inf5')
remove_funcs = [
remove_Adv_CASE,
remove_Inf1_CASE_Lat,
remove_Inf5,
]
def remove_feats(sentence):
for w in sentence.words():
for remove_func in remove_funcs:
remove_func(w)
def process(inf, outf):
for s in read_conllu(inf):
if not isinstance(s, basestring): # skip comments and sentence breaks
remove_feats(s)
print >> outf, unicode(s)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) != 3:
print >> sys.stderr, 'Usage:', usage
return 1
infn, outfn = argv[1], argv[2]
with codecs.open(infn, encoding='utf-8') as inf:
with codecs.open(outfn, 'w', encoding='utf-8') as outf:
process(inf, outf)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 | 3,964,848,423,123,441,000 | 27.447368 | 77 | 0.635831 | false | 3.163902 | false | false | false |
llarsson/image-renamer | image-renamer.py | 1 | 5349 | #!/usr/bin/env python
import tempfile
import datetime
import logging
import os
import ftputil # external dependency, must be installed via pip or similar
# CONFIGURATION START
# Set this to the hostname of the FTP server
FTP_ADDRESS = "some.host.name.here.com"
FTP_USERNAME = "anonymous" # change these to a user that can upload files!
FTP_PASSWORD = "anonymous@"
# List the folders we are supposed to work with. Remember to make them
# absolute, i.e., have them start with a slash.
FOLDERS = [
"/an/absolute/path",
"/some/other/absolute path",
"/note/that/spaces are not escaped",
"/at least not for windows hosts",
]
# The label we want to give all files. Will be used for naming, e.g., if set
# to "recent", most recent file will be called "most-recent".
TARGET_LABEL = "recent"
# What file types are we working with?
FILE_TYPE = ".jpg"
# Define interesting times of day here. The most recent file for each period
# will be found and uploaded to the server, with a name constructed as:
# TARGET_LABEL-PERIOD(name).FILE_TYPE
# e.g., "recent-morning.jpg".
# Make the list empty if there are no interesting times of day that should
# be dealt with particularly.
# Periods can overlap. This is intentional: if you want to find the most
# recent file overall, make a period that covers the entire day like in
# the example below, and call it "overall". A file can then match both
# a "morning" period and the "overall" period, for example.
PERIODS = [
dict(name="morning", start="04:00", end="09:59"),
dict(name="midday", start="10:00", end="14:59"),
dict(name="overall", start="00:00", end="23:59"),
dict(name="evening", start="15:00", end="22:00")
]
# CONFIGURATION END
class FileInfo(object):
def __init__(self, mtime=0.0, path=None, name=None):
self.mtime = mtime
self.path = path
self.name = name
def download_file(remote_abspath, local_abspath):
"Download the remote file to the local path, both absolute"
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
ftp.download(remote_abspath, local_abspath)
def upload_file(local_abspath, remote_abspath):
"Upload the local file to the remote path, both absolute"
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
ftp.upload(local_abspath, remote_abspath)
def within_period(modification_time, period):
"Checks if the given modification time is within the given period"
start_hour, start_minute = period["start"].split(":")
end_hour, end_minute = period["end"].split(":")
# TODO Can we always assume UTC works here?
mtime = datetime.datetime.utcfromtimestamp(modification_time).time()
start = datetime.time(hour=int(start_hour), minute=int(start_minute))
end = datetime.time(hour=int(end_hour), minute=int(end_minute))
result = start <= mtime and mtime <= end
logging.debug("%s within interval %s -- %s? %s",
str(mtime), period["start"], period["end"],
str(result))
return result
def construct_file_name(period):
"Construct file name for a given period."
return TARGET_LABEL + "-" + period["name"] + FILE_TYPE
def find_newest_files(folder):
"""Return absolute paths of newest files on server.
This function will descend into subdirectories of the folder.
:param folder: The folder on the FTP server where we shall find the
newest file. We will descend into subdirectories of this folder.
:type folder: str
:returns: The path name of the newest file, i.e., the one with the
most recent modification time.
"""
newest_in_period = {period["name"]: FileInfo(name=construct_file_name(period))
for period in PERIODS}
file_names_to_avoid = [construct_file_name(period) for period in PERIODS]
with ftputil.FTPHost(FTP_ADDRESS, FTP_USERNAME, FTP_PASSWORD) as ftp:
for dirpath, dirnames, files in ftp.walk(folder):
for f in [fname for fname in files
if fname.endswith(FILE_TYPE)
and fname not in file_names_to_avoid]:
fullpath_filename = dirpath + "/" + f
statinfo = ftp.stat(fullpath_filename)
mtime = statinfo.st_mtime
logging.debug("%s modified at %f",
fullpath_filename,
mtime)
for period in PERIODS:
if within_period(mtime, period):
nip = newest_in_period[period["name"]]
if mtime > nip.mtime:
nip.path = fullpath_filename
nip.mtime = mtime
newest_files = [fi for fi in newest_in_period.itervalues() if fi.path]
return newest_files
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
for folder in FOLDERS:
temporary_directory = tempfile.mkdtemp()
for fi in find_newest_files(folder):
local_abspath = os.path.join(temporary_directory, fi.name)
logging.info("File under %s (%s) saved temporarily as %s",
folder, fi.path, local_abspath)
download_file(fi.path, local_abspath)
upload_file(local_abspath, folder + "/" + fi.name)
| apache-2.0 | -8,128,057,505,430,299,000 | 33.509677 | 82 | 0.640494 | false | 3.87328 | false | false | false |
googlefonts/cu2qu | tools/ufo_benchmark.py | 2 | 1191 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import os
import random
from benchmark import run_benchmark
MAX_ERR_EM = 0.002
DATADIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'tests', 'data')
def setup_fonts_to_quadratic_defcon():
from defcon import Font
return [[Font(os.path.join(DATADIR, 'RobotoSubset-Regular.ufo'))],
MAX_ERR_EM]
def main():
run_benchmark(
'ufo_benchmark', 'cu2qu.ufo', 'fonts_to_quadratic',
setup_suffix='defcon', repeat=10)
if __name__ == '__main__':
random.seed(1)
main()
| apache-2.0 | -6,132,140,593,097,859,000 | 27.357143 | 74 | 0.701931 | false | 3.544643 | false | false | false |
mYstar/PEA4JSP | multistartnsga2.py | 1 | 4432 | """ Implementation of the Master-Slave NSGA-II Genetic Algorithm using JSPEval
to evaluate the individuals. The fitness function is evaluated in parallel by
the Slave processors.
"""
import random
import time
import numpy as np
from mpi4py import MPI
from deap import creator, base, tools, algorithms
from deap110 import emo
from JSPEval.jspsolution import JspSolution
from JSPEval.jspmodel import JspModel
from JSPEval.jspeval import JspEvaluator
import params
import operators
import output
# --- Setup ---
# MPI environment
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(size)
# read parameters
term_m, term_v, pop_size, f_out, f_model, _, _,\
mut_prob, mut_eta, xover_prob, xover_eta = params.get()
# start multiple runs
start = time.time()
# -- setup algorithm --
# init evaluator
model = JspModel(f_model)
evaluator = JspEvaluator(model)
# init GA
fitness_size = evaluator.metrics_count()
weights = tuple([-1 for _ in range(fitness_size)])
creator.create("FitnessMin", base.Fitness, weights=weights)
creator.create("Individual", JspSolution, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("values",
tools.initRepeat,
list,
random.random,
model.solution_length())
toolbox.register("individual", # alias
operators.init_individual, # generator function
creator.Individual, # individual class
model, # model to use
toolbox.values) # value generator
toolbox.register("population",
tools.initRepeat,
list,
toolbox.individual)
toolbox.register("mate", operators.crossover, eta=xover_eta)
toolbox.register("mutate", operators.mutation, indpb=mut_prob, eta=mut_eta)
toolbox.register("select", tools.selNSGA2)
# init first population
population = toolbox.population(n=pop_size)
fits = map(lambda x: operators.calc_fitness(x, evaluator), population)
for fit, i_pop in zip(fits, population):
i_pop.fitness.values = fit
# --- main GA loop ---
gen = 0
terminate = False
term_reqs = []
for node in range(size):
term_reqs.append(comm.irecv(source=node, tag=0))
while not terminate:
gen += 1
# -- execute genetic operators --
# selection
emo.assignCrowdingDist(population)
offspring = tools.selTournamentDCD(population, len(population))
# crossover and mutation
offspring = algorithms.varAnd(
offspring,
toolbox,
cxpb=xover_prob,
mutpb=1.0) # is taken care of by mutation operator
# fitness calculation
fits = map(
lambda x: operators.calc_fitness(x, evaluator),
offspring)
# -- select next population --
# assign fitness
for fit, i_off in zip(fits, offspring):
i_off.fitness.values = fit
# selection
offspring.extend(population)
population = toolbox.select(
offspring,
len(population))
terminate = operators.termination(term_m, term_v, gen, population)
# send a termination signal to all others
# needed for makespan termination
if terminate:
print('rank: {} termination, sending signal'.format(rank))
for node in range(size):
comm.isend(True, node, tag=0)
# test for termination of others
_, node_term, _ = MPI.Request.testany(term_reqs)
if node_term:
print('rank: {}, termination signal received'.format(rank))
terminate = terminate | node_term
# --- process results ---
# collect results
sol_values = np.empty([pop_size, model.solution_length()])
fit_values = np.empty([pop_size, fitness_size])
for i, ind in zip(range(pop_size), population):
sol_values[i] = ind.get_values()
fit_values[i] = ind.fitness.values
sol_all = None
fit_all = None
if rank == 0:
sol_all = np.empty([pop_size * size, model.solution_length()])
fit_all = np.empty([pop_size * size, fitness_size])
comm.Gather(sol_values, sol_all, root=0)
comm.Gather(fit_values, fit_all, root=0)
if rank == 0:
all_pop = toolbox.population(n=pop_size * size)
for i, ind in zip(range(pop_size * size), all_pop):
ind.set_values(sol_all[i])
ind.fitness.values = fit_all[i]
duration = time.time() - start
output.write_pareto_front(all_pop, f_out)
with open('{}.time'.format(f_out), 'a') as myfile:
myfile.write('{}\n'.format(duration))
| apache-2.0 | -4,041,595,893,003,669,000 | 28.157895 | 78 | 0.661327 | false | 3.446345 | false | false | false |
duncanwp/python_for_climate_scientists | course_content/optimisation_example/subset_by_region/subset_aerosolCCI_by_region_slowest.py | 2 | 2028 | import cis
import numpy as np
files = ["../../resources/WorkshopData2016/AerosolCCI/20080411002335-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31962-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411020411-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31963-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411034447-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31964-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411052523-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31965-fv03.04.nc",
"../../resources/WorkshopData2016/AerosolCCI/20080411070559-ESACCI-L2P_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ORAC_31966-fv03.04.nc"]
def subset_africa(ungridded_data):
northern_africa_lat_bounds = -20, 50
northern_africa_lon_bounds = 0, 40
southern_africa_lat_bounds = -40, 0
southern_africa_lon_bounds = 10, 50
africa_points = np.zeros(ungridded_data.shape, dtype=np.bool)
for i, d in enumerate(ungridded_data.data):
if ((northern_africa_lat_bounds[0] < ungridded_data.lat.points[i] < northern_africa_lat_bounds[1]) and
(northern_africa_lon_bounds[0] < ungridded_data.lon.points[i] < northern_africa_lon_bounds[1])) or \
((southern_africa_lat_bounds[0] < ungridded_data.lat.points[i] < southern_africa_lat_bounds[1]) and
(southern_africa_lon_bounds[0] < ungridded_data.lon.points[i] < southern_africa_lon_bounds[1])):
africa_points[i] = True
return ungridded_data[africa_points]
def subset_aerosol_cci_over_africa():
from subset_by_region.utils import stack_data_list
subsetted_data = []
for f in files:
d = cis.read_data(f, "AOD550")
subsetted_data.append(subset_africa(d))
subset = stack_data_list(subsetted_data)
return subset
if __name__ == '__main__':
import matplotlib.pyplot as plt
subset = subset_aerosol_cci_over_africa()
subset.plot(xaxis='longitude', yaxis='latitude')
plt.show()
| gpl-3.0 | -8,998,306,818,519,919,000 | 49.7 | 138 | 0.697239 | false | 2.63035 | false | false | false |
cokelaer/msdas | src/msdas/multimidas.py | 1 | 2875 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 3 16:40:58 2014
@author: cokelaer
"""
from .midas import XMIDAS
class MultiMIDAS(object):
"""Data structure to store multiple instances of MIDAS files
You can read a MIDAS file that contains several cell lines:
and acces to the midas files usig their cell line name
.. doctest::
>>> mm = MultiMIDAS(cnodata("EGFR-ErbB_PCB2009.csv"))
>>> mm.cellLines
['HepG2', 'PriHu']
>>> mm["HepG2"].namesCues
['TGFa', 'MEK12', 'p38', 'PI3K', 'mTORrap', 'GSK3', 'JNK']
where the list of cell line names is available in the :attr:`cellLines`
attribute.
Or you can start from an empty list and add instance later on using :meth:`addMIDAS`
method.
"""
def __init__(self, filename=None):
""".. rubric:: constructor
:param str filename: a valid MIDAS file (optional)
"""
self._midasList = []
self._names = []
if filename:
self.readMIDAS(filename)
def addMIDAS(self, midas):
"""Add an existing MIDAS instance to the list of MIDAS instances
.. doctest::
>>> from cellnopt.core import *
>>> m = MIDASReader(cnodata("MD-ToyPB.csv"))
>>> mm = MultiMIDAS()
>>> mm.addMIDAS(m)
"""
if midas.celltypeName not in self._names:
self._midasList.append(midas)
self._names.append(midas.celltypeName)
else:
raise ValueError("midsa with same celltype already in the list")
def readMIDAS(self, filename):
"""read MIDAS file and extract individual cellType/cellLine
This function reads the MIDAS and identifies the cellLines. Then, it
creates a MIDAS instance for each cellLines and add the MIDAS instance to the
:attr:`_midasList`. The MIDAS file can then be retrieved using their
cellLine name, which list is stored in :attr:`cellLines`.
:param str filename: a valid MIDAS file containing any number of cellLines.
"""
raise NotImplementedError
m = XMIDAS(filename)
self.addMIDAS(m)
def _get_cellLines(self):
names = [x.celltypeName for x in self._midasList]
return names
cellLines = property(_get_cellLines,
doc="return names of all cell lines, which are the MIDAS instance identifier ")
def __getitem__(self, name):
index = self.cellLines.index(name)
return self._midasList[index]
def plot(self):
"""Call plot() method for each MIDAS instances in different figures
More sophisticated plots to easily compare cellLines could be
implemented.
"""
for i,m in enumerate(self._midasList):
from pylab import figure, clf
figure(i+1)
clf()
m.plot()
| gpl-3.0 | 2,209,642,933,023,126,000 | 28.336735 | 88 | 0.604174 | false | 3.787879 | false | false | false |
MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_mmtf.py | 1 | 2899 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
import numpy as np
from numpy.testing import (
assert_almost_equal,
)
from MDAnalysisTests.datafiles import MMTF, MMTF_gz, MMTF_skinny2
from MDAnalysis.coordinates.MMTF import MMTFReader
class TestMMTFReader(object):
@pytest.fixture(scope='class')
def r(self):
return MMTFReader(MMTF)
def test_read_frame_size(self, r):
assert r.ts.n_atoms == 512
def test_read_positions(self, r):
assert_almost_equal(r.ts.positions[0],
np.array([-0.798, 12.632, 23.231]),
decimal=4)
assert_almost_equal(r.ts.positions[-1],
np.array([10.677, 15.517, 11.1]),
decimal=4)
def test_velocities(self, r):
assert not r.ts.has_velocities
def test_forces(self, r):
assert not r.ts.has_forces
def test_len(self, r):
# should be single frame
assert len(r) == 1
class TestMMTFReaderGZ(object):
@pytest.fixture(scope='class')
def r(self):
return MMTFReader(MMTF_gz)
def test_read_frame_size(self, r):
assert r.ts.n_atoms == 1140
def test_read_positions(self, r):
assert_almost_equal(r.ts.positions[0],
np.array([38.428, 16.440, 28.841]),
decimal=4)
assert_almost_equal(r.ts.positions[-1],
np.array([36.684, 27.024, 20.468]),
decimal=4)
def test_velocities(self, r):
assert not r.ts.has_velocities
def test_forces(self, r):
assert not r.ts.has_forces
def test_len(self, r):
# should be single frame
assert len(r) == 1
def test_dimensionless():
r = MMTFReader(MMTF_skinny2)
assert r.ts.dimensions is None
| gpl-2.0 | 533,040,876,382,829,630 | 31.211111 | 79 | 0.619524 | false | 3.117204 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.