repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
betoesquivel/CIE | app/models.py | 1 | 1574 | #!flask/bin/python
from app import db
from werkzeug import generate_password_hash, check_password_hash
COMISSIONER_ROLE = 0
MARKETING_ROLE = 1
ADMIN_ROLE = 2
class Staff(db.Model):
__tablename__ = 'staff'
id = db.Column(db.Integer, primary_key = True)
studentNumber = db.Column(db.Integer)
name = db.Column(db.String(64, convert_unicode=True))
email = db.Column(db.String(64, convert_unicode=True), unique = True)
pwdhash = db.Column(db.String(54, convert_unicode=True))
role = db.Column(db.Integer)
council = db.relationship('Council', uselist=False, backref='staff')
def __init__(self, name, studentNumber, email, password, role):
self.name = name
self.studentNumber = studentNumber
self.email = email.lower()
self.set_password(password)
self.role = role
def set_password(self, password):
self.pwdhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
def __repr__(self):
return '<Staff %r %r>' % (self.role, self.name)
class Council(db.Model):
__tablename__ = 'council'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64, convert_unicode=True), unique = True)
description = db.Column(db.String(1000, convert_unicode=True))
website = db.Column(db.String(64, convert_unicode=True), unique = True)
comissionerId = db.Column(db.Integer, db.ForeignKey('staff.id'))
def __repr__(self):
return '<Council %r>' % (self.name)
| mit | 419,842,146,019,052,300 | 33.977778 | 75 | 0.662643 | false | 3.356077 | false | false | false |
mybluevan/gospel-preaching | gospel_preaching/simple_orders/admin.py | 1 | 1882 | from models import Product, Order, OrderItem
from django.contrib import admin
from datetime import date
from calc_fields import CalcAdmin
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 10
class ProductAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
search_fields = ['title', 'description']
list_display = ('title', 'price', 'qoh')
class OrderAdmin(CalcAdmin):
calc_defs = {'shipping_cost': ('ship_cost','$%#.2f'), 'total': ('total','$%#.2f')}
fields = ('user', 'ship_date', 'ship_name', 'ship_addr', 'ship_city', 'ship_state', 'ship_zip', 'phone', 'email', 'instructions', 'shipped', 'paid', 'payment_method')
calc_fields = fields + ('shipping_cost', 'total')
date_hierarchy = 'date'
list_display = ('date', 'ship_name', 'phone', 'email', 'shipped', 'paid', 'total')
list_filter = ('shipped', 'paid')
actions = ['mark_shipped', 'mark_paid']
inlines = [OrderItemInline]
save_on_top = True
def mark_shipped(self, request, queryset):
rows_updated = queryset.update(shipped=True, ship_date=date.today())
if rows_updated == 1:
message_bit = "1 order was"
else:
message_bit = "%s orders were" % rows_updated
self.message_user(request, "%s successfully marked as shipped." % message_bit)
mark_shipped.short_description = "Mark selected orders as shipped"
def mark_paid(self, request, queryset):
rows_updated = queryset.update(paid=True)
if rows_updated == 1:
message_bit = "1 order was"
else:
message_bit = "%s orders were" % rows_updated
self.message_user(request, "%s successfully marked as paid." % message_bit)
mark_paid.short_description = "Mark selected orders as paid"
admin.site.register(Product, ProductAdmin)
admin.site.register(Order, OrderAdmin)
| gpl-3.0 | -6,975,373,972,628,733,000 | 42.767442 | 170 | 0.643464 | false | 3.591603 | false | false | false |
google-research/google-research | tunas/schema_io.py | 1 | 8358 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Utilities for schema serialization and deserialization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from typing import Any, ByteString, Callable, Dict, Iterable, Optional, Sequence, Text, Tuple, Type, TypeVar, Union
import six
from six.moves import map
from six.moves import zip
from tunas import schema
# Primitive types (like integers or strings) that are supported in both Python
# and JSON.
_PRIMITIVE_TYPES = (int, float) + six.string_types
# We don't have a good way to identify namedtuples to the Python type system,
# except that they're subclasses of tuple.
_NamedTupleTypeVar = TypeVar('_NamedTupleTypeVar', bound=Tuple[Any, Ellipsis])
# Registration interface for namedtuple serialization and deserialization.
# typing.Type[] is not currently supported inside type annotation comments,
# so we use the annotation Any instead.
_NAMEDTUPLE_NAME_TO_CLASS = {} # type: Dict[Text, Any]
_NAMEDTUPLE_CLASS_TO_NAME = {} # type: Dict[Any, Text]
_NAMEDTUPLE_CLASS_TO_DEFAULTS = {} # type: Dict[Any, Dict[Text, Any]]
def register_namedtuple(
name,
deprecated_names = None,
defaults = None,
):
"""Register a namedtuple class for serialization/deserialization.
Namedtuples that are registered can be serialized and deserialized using
the utilities in this file.
Example usage:
@schema_io.register_namedtuple('package.C')
class C(collections.namedtuple('C', ['field1'])):
pass
# Later in the code
serialized = schema_io.serialize(C('foo')) # returns a serialized string
restored = schema_io.deserialize(serialized) # returns a namedtuple
Args:
name: String, globally unique identifier for the registered class.
deprecated_names: Optional list of Strings constaining deprecated names for
the registered class.
defaults: Optional list of default argument values. This makes it possible
to add new fields to a namedtuple while preserving backwards-compatibility
for old objects which are loaded from disk.
Returns:
A class decorator.
"""
def decorator(cls):
"""Register a new class instance."""
if name in _NAMEDTUPLE_NAME_TO_CLASS:
raise ValueError('Duplicate name in registry: {:s}'.format(name))
if cls in _NAMEDTUPLE_CLASS_TO_NAME:
raise ValueError('Duplicate class in registry: {:s}'.format(name))
if not issubclass(cls, tuple) or not hasattr(cls, '_fields'):
raise ValueError(
'Cannot register class {}.{} because it is not a namedtuple'
.format(cls.__module__, cls.__name__))
_NAMEDTUPLE_NAME_TO_CLASS[name] = cls
_NAMEDTUPLE_CLASS_TO_NAME[cls] = name
if deprecated_names:
for deprecated_name in deprecated_names:
if deprecated_name in _NAMEDTUPLE_NAME_TO_CLASS:
raise ValueError(
'Duplicate name registered: {:s}'.format(deprecated_name))
_NAMEDTUPLE_NAME_TO_CLASS[deprecated_name] = cls
if defaults:
for field in sorted(defaults.keys()):
if field not in cls._fields:
raise ValueError(
'Field {} appears in defaults but not in class {}.{}'
.format(field, cls.__module__, cls.__name__))
_NAMEDTUPLE_CLASS_TO_DEFAULTS[cls] = dict(defaults)
return cls
return decorator
def namedtuple_class_to_name(cls):
if cls not in _NAMEDTUPLE_CLASS_TO_NAME:
raise KeyError(
'Namedtuple class {}.{} is not registered. Did you forget to use a '
'@schema_io.register_namedtuple() decorator?'
.format(cls.__module__, cls.__name__))
return _NAMEDTUPLE_CLASS_TO_NAME[cls]
def namedtuple_name_to_class(name):
if name not in _NAMEDTUPLE_NAME_TO_CLASS:
raise KeyError(
'Namedtuple name {} is not registered. Did you forget to use a '
'@schema_io.register_namedtuple() decorator?'
.format(repr(name)))
return _NAMEDTUPLE_NAME_TO_CLASS[name]
def _to_json(structure):
"""Convert a nested datastructure to pure JSON."""
if structure is None or isinstance(structure, _PRIMITIVE_TYPES):
return structure
elif isinstance(structure, schema.OneOf):
result = ['oneof']
result.append(['choices', _to_json(structure.choices)])
result.append(['tag', _to_json(structure.tag)])
return result
elif isinstance(structure, list):
result = ['list']
result.extend(map(_to_json, structure))
return result
elif isinstance(structure, tuple) and hasattr(structure, '_fields'):
result = ['namedtuple:' + namedtuple_class_to_name(structure.__class__)]
result.extend(zip(structure._fields, map(_to_json, structure)))
return result
elif isinstance(structure, tuple):
result = ['tuple']
result.extend(map(_to_json, structure))
return result
elif isinstance(structure, dict):
result = ['dict']
for k in sorted(structure):
result.append((_to_json(k), _to_json(structure[k])))
return result
else:
raise ValueError('Unrecognized type: {}'.format(type(structure)))
def _namedtuple_from_json(
cls, kv_pairs):
"""Convert a JSON data structure to a namedtuple."""
# Start with a list of default keyword arguments.
if cls in _NAMEDTUPLE_CLASS_TO_DEFAULTS:
kwargs = dict(_NAMEDTUPLE_CLASS_TO_DEFAULTS[cls])
else:
kwargs = dict()
# Add all the user-provided key-value pairs.
for key, value in kv_pairs:
if key not in cls._fields:
raise ValueError(
'Invalid field: {} for class: {}, permitted values: {}'
.format(key, cls, cls._fields))
kwargs[key] = value
# Make sure we've provided all the arguments we need.
for field in cls._fields:
if field not in kwargs:
raise ValueError(
'Missing field: {} for class: {}'.format(field, cls))
# Now wrap the key-value pairs in a namedtuple.
return cls(**kwargs)
def _from_json(structure):
"""Converted a pure JSON data structure to one with namedtuples and OneOfs."""
if structure is None or isinstance(structure, _PRIMITIVE_TYPES):
return structure
elif isinstance(structure, list):
assert structure
typename = structure[0]
structure = structure[1:]
if typename == 'dict':
return {_from_json(k): _from_json(v) for (k, v) in structure}
elif typename.startswith('namedtuple:'):
cls = namedtuple_name_to_class(typename[len('namedtuple:'):])
kv_pairs = [(_from_json(k), _from_json(v)) for (k, v) in structure]
return _namedtuple_from_json(cls, kv_pairs)
elif typename == 'oneof':
keys = tuple(_from_json(k) for (k, v) in structure)
assert keys == ('choices', 'tag'), keys
return schema.OneOf(*(_from_json(v) for (k, v) in structure))
elif typename == 'list':
return list(map(_from_json, structure))
elif typename == 'tuple':
return tuple(map(_from_json, structure))
else:
raise ValueError('Unsupported __type: {}'.format(typename))
else:
raise ValueError('Unrecognized JSON type: {}'.format(type(structure)))
def serialize(structure):
"""Serialize a nested data structure to a string.
Args:
structure: A recursive data structure, possibly consisting of integers,
strings, tuples, dictionaries, and namedtuples. Namedtuples must be
registered with the @register_namedtuple decorator above.
Returns:
A json-serialized string.
"""
return json.dumps(_to_json(structure), sort_keys=True, indent=2)
def deserialize(serialized):
"""Convert a serialized string to a nested data structure.
Args:
serialized: A json-serialized string returned by serialize().
Returns:
A (possibly nested) data structure.
"""
return _from_json(json.loads(serialized))
| apache-2.0 | 1,837,907,914,271,440,400 | 33.53719 | 115 | 0.68581 | false | 3.922102 | false | false | false |
ragupta-git/ImcSdk | imcsdk/imcfilter.py | 1 | 7963 | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pyparsing as pp
from . import imcgenutils
from . import imccoreutils
from .imcfiltertype import OrFilter, AndFilter, NotFilter
from .imcbasetype import FilterFilter
types = {"eq": "EqFilter",
"ne": "NeFilter",
"ge": "GeFilter",
"gt": "GtFilter",
"le": "LeFilter",
"lt": "LtFilter",
"re": "WcardFilter"
}
class ParseFilter(object):
"""
Supporting class to parse filter expression.
"""
def __init__(self, class_id, is_meta_classid):
self.class_id = class_id
self.is_meta_classid = is_meta_classid
def parse_filter_obj(self, toks):
"""
Supporting class to parse filter expression.
"""
# print toks[0] #logger
prop_ = toks[0]["prop"]
value_ = toks[0]["value"]
type_ = "re"
if "type_exp" in toks[0]:
type_ = toks[0]["type_exp"]["types"]
flag_ = "C"
if "flag_exp" in toks[0]:
flag_ = toks[0]["flag_exp"]["flags"]
# print prop_, value_, type_, flag_ #logger
if flag_ == "I":
value_ = re.sub(
r"[a-zA-Z]",
lambda x: "[" +
x.group().upper() +
x.group().lower() +
"]",
value_)
if self.is_meta_classid:
class_obj = imccoreutils.load_class(self.class_id)
prop_ = imccoreutils.get_prop_meta(class_obj, prop_)
prop_ = prop_.xml_attribute
sub_filter = create_basic_filter(types[type_],
class_=imcgenutils.word_l(
self.class_id),
property=prop_,
value=value_)
return sub_filter
@staticmethod
def and_operator(toks):
"""
method to support logical 'and' operator expression
"""
# print str, loc, toks
# print toks[0][0::2]
and_filter = AndFilter()
for op_filter in toks[0][0::2]:
and_filter.child_add(op_filter)
return and_filter
@staticmethod
def or_operator(toks):
"""
method to support logical 'or' operator expression
"""
# print str, loc, toks
# print toks[0][0::2]
or_filter = OrFilter()
for op_filter in toks[0][0::2]:
or_filter.child_add(op_filter)
return or_filter
@staticmethod
def not_operator(toks):
"""
method to support logical 'and' operator expression
"""
not_filter = NotFilter()
for op_filter in toks[0][1:]:
not_filter.child_add(op_filter)
return not_filter
def parse_filter_str(self, filter_str):
"""
method to parse filter string
"""
prop = pp.WordStart(pp.alphas) + pp.Word(pp.alphanums +
"_").setResultsName("prop")
value = (pp.QuotedString("'") | pp.QuotedString('"') | pp.Word(
pp.printables, excludeChars=",")).setResultsName("value")
types_ = pp.oneOf("re eq ne gt ge lt le").setResultsName("types")
flags = pp.oneOf("C I").setResultsName("flags")
comma = pp.Literal(',')
quote = (pp.Literal("'") | pp.Literal('"')).setResultsName("quote")
type_exp = pp.Group(pp.Literal("type") + pp.Literal(
"=") + quote + types_ + quote).setResultsName("type_exp")
flag_exp = pp.Group(pp.Literal("flag") + pp.Literal(
"=") + quote + flags + quote).setResultsName("flag_exp")
semi_expression = pp.Forward()
semi_expression << pp.Group(pp.Literal("(") +
prop + comma + value +
pp.Optional(comma + type_exp) +
pp.Optional(comma + flag_exp) +
pp.Literal(")")
).setParseAction(
self.parse_filter_obj).setResultsName("semi_expression")
expr = pp.Forward()
expr << pp.operatorPrecedence(semi_expression, [
("not", 1, pp.opAssoc.RIGHT, self.not_operator),
("and", 2, pp.opAssoc.LEFT, self.and_operator),
("or", 2, pp.opAssoc.LEFT, self.or_operator)
])
result = expr.parseString(filter_str)
return result
def generate_infilter(class_id, filter_str, is_meta_class_id):
"""
Create FilterFilter object
Args:
class_id (str): class_id
filter_str (str): filter expression
is_meta_class_id (bool)
Returns:
True on successful connect
Example:
generate_infilter("LsServer",
'("usr_lbl, "mysp", type="eq", flag="I)',
True)
"""
parse_filter = ParseFilter(class_id=class_id,
is_meta_classid=is_meta_class_id)
result = parse_filter.parse_filter_str(filter_str)
in_filter = FilterFilter()
in_filter.child_add(result[0])
return in_filter
def handle_filter_max_component_limit(handle, l_filter):
"""
Method checks the filter count and if the filter count exceeds
the max_components(number of filters), then the given filter
objects get distributed among small groups and then again binded
together in complex filters(like and , or) so that the
count of filters can be reduced.
"""
from .imccore import AbstractFilter
from .imcfiltertype import AndFilter, OrFilter
max_components = 10
if l_filter is None or l_filter.child_count() <= max_components:
return l_filter
if not isinstance(l_filter, AndFilter) and not isinstance(l_filter,
OrFilter):
return l_filter
if isinstance(l_filter, AndFilter):
parent_filter = AndFilter()
child_filter = AndFilter()
parent_filter.child_add(child_filter)
for childf in l_filter.child:
if isinstance(childf, AbstractFilter):
if child_filter.child_count() == max_components:
child_filter = AndFilter()
parent_filter.child_add(child_filter)
child_filter.child_add(childf)
result_filter = parent_filter
else:
parent_filter = OrFilter()
child_filter = OrFilter()
parent_filter.child_add(child_filter)
for childf in l_filter.child:
if isinstance(childf, AbstractFilter):
if child_filter.child_count() == max_components:
child_filter = OrFilter()
parent_filter.child_add(child_filter)
child_filter.child_add(childf)
result_filter = parent_filter
return handle_filter_max_component_limit(handle, result_filter)
def create_basic_filter(filter_name, **kwargs):
"""
Loads filter class
"""
from . import imcmeta
fq_module_name = imcmeta.OTHER_TYPE_CLASS_ID[filter_name]
module_import = __import__(fq_module_name, globals(), locals(),
[filter_name], level=1)
filter_obj = getattr(module_import, filter_name)()
filter_obj.create(**kwargs)
return filter_obj
| apache-2.0 | 4,576,562,915,621,964,000 | 31.635246 | 76 | 0.550044 | false | 4.04624 | false | false | false |
hopped/wikipedia-edit-wars | wikiparser.py | 1 | 2271 | import xml.sax.handler
__author__ = 'Dennis Hoppe ([email protected])'
class WikiArticle(object):
title = ""
id = 0
revisions = []
class WikiRevision(object):
timestamp = ""
username = ""
userid = 0
revid = 0
comment = ""
text = ""
class WikiParser(xml.sax.handler.ContentHandler):
def __init__(self):
self.wikiArticle = WikiArticle()
self.wikiRevision = WikiRevision()
self.inPage = 0
self.inTitle = 0
self.inRevision = 0
self.inText = 0
self.inId = 0
self.inUsername = 0
self.inContributor = 0
self.inTimestamp = 0
self.inComment = 0
def startElement(self, name, attributes):
self.buffer = ""
if name == "page":
self.inPage = 1
elif name == "title":
self.inTitle = 1
elif name == "revision":
self.inRevision = 1
self.wikiRevision = WikiRevision()
elif name == "username":
self.inUsername = 1
elif name == "contributor":
self.inContributor = 1
elif name == "text":
self.inText == 1
elif name == "id":
self.inId = 1
elif name == "timestamp":
self.inTimestamp = 1
elif name == "comment":
self.inComment = 1
def characters(self, data):
self.buffer += data
def endElement(self, name):
if name == "page":
self.inPage = 0
elif name == "title":
self.inTitle = 0
self.wikiArticle.title = self.buffer
elif name == "revision":
self.inRevision = 0
self.wikiArticle.revisions.append(self.wikiRevision)
elif name == "username":
self.inUsername = 0
self.wikiRevision.username = self.buffer
elif name == "contributor":
self.inContributor = 0
elif name == "id":
self.id = 0
if self.inRevision:
if self.inContributor:
self.wikiRevision.userid = self.buffer
else:
self.wikiRevision.revid = self.buffer
else:
self.wikiArticle.id = self.buffer
print self.buffer
elif name == "text":
self.inText == 0
self.wikiRevision.text = self.buffer
elif name == "timestamp":
self.inTimestamp == 0
self.wikiRevision.timestamp = self.buffer
elif name == "comment":
self.inComment = 0
self.wikiRevision.comment = self.buffer
| mit | -2,681,335,567,877,862,400 | 23.419355 | 58 | 0.601057 | false | 3.616242 | false | false | false |
mirusresearch/staticdhcpd | libpydhcpserver/libpydhcpserver/dhcp.py | 1 | 34945 | # -*- encoding: utf-8 -*-
"""
libpydhcpserver.dhcp
====================
Handles send/receive and internal routing for DHCP packets.
Legal
-----
This file is part of libpydhcpserver.
libpydhcpserver is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
(C) Neil Tallim, 2014 <[email protected]>
(C) Matthew Boedicker, 2011 <[email protected]>
(C) Mathieu Ignacio, 2008 <[email protected]>
"""
import collections
import platform
import select
import socket
import threading
from dhcp_types.ipv4 import IPv4
from dhcp_types.mac import MAC
from dhcp_types.packet import (DHCPPacket, FLAGBIT_BROADCAST)
from dhcp_types.constants import (
FIELD_CIADDR, FIELD_YIADDR, FIELD_SIADDR, FIELD_GIADDR,
)
#IP constants
_IP_GLOB = IPv4('0.0.0.0') #: The internal "everything" address.
_IP_BROADCAST = IPv4('255.255.255.255') #: The broadcast address.
IP_UNSPECIFIED_FILTER = (_IP_GLOB, _IP_BROADCAST, None) #: A tuple of addresses that reflect non-unicast targets.
_ETH_P_SNAP = 0x0005
"""
Internal-only Ethernet-frame-grabbing for Linux.
Nothing should be addressable to the special response socket, but better to avoid wasting memory.
"""
Address = collections.namedtuple("Address", ('ip', 'port'))
"""
An inet layer-3 address.
.. py:attribute:: ip
An :class:`IPv4 <dhcp_types.ipv4.IPv4>` address
.. py:attribute:: port
A numeric port value.
"""
class DHCPServer(object):
"""
Handles internal packet-path-routing logic.
"""
_server_address = None #: The IP associated with this server.
_network_link = None #: The I/O-handler; you don't want to touch this.
def __init__(self, server_address, server_port, client_port, pxe_port=None, response_interface=None, response_interface_qtags=None):
"""
Sets up the DHCP network infrastructure.
:param server_address: The IP address on which to run the DHCP service.
:type server_address: :class:`IPv4 <dhcp_types.ipv4.IPv4>`
:param int port: The port on which DHCP servers and relays listen in this network.
:param int client_port: The port on which DHCP clients listen in this network.
:param int pxe_port: The port on which DHCP servers listen for PXE traffic in this
network; ``None`` to disable.
:param str response_interface: The interface on which to provide raw packet support,
like ``"eth0"``, or ``None`` if not requested.
:param sequence response_interface_qtags: Any qtags to insert into raw packets, in
order of appearance. Definitions take the following form:
(pcp:`0-7`, dei:``bool``, vid:`1-4094`)
:except Exception: A problem occurred during setup.
"""
self._server_address = server_address
self._network_link = _NetworkLink(str(server_address), server_port, client_port, pxe_port, response_interface, response_interface_qtags=response_interface_qtags)
def _getNextDHCPPacket(self, timeout=60, packet_buffer=2048):
"""
Blocks for up to ``timeout`` seconds while waiting for a packet to
arrive; if one does, a thread is spawned to process it.
Have a thread blocking on this at all times; restart it immediately after it returns.
:param int timeout: The number of seconds to wait before returning.
:param int packet_buffer: The size of the buffer to use for receiving packets.
:return tuple(2): (DHCP-packet-received:``bool``,
:class:`Address <dhcp.Address>` or ``None`` on
timeout)
"""
(source_address, data, pxe) = self._network_link.getData(timeout=timeout, packet_buffer=packet_buffer)
if data:
try:
packet = DHCPPacket(data=data)
except ValueError:
pass
else:
if packet.isDHCPRequestPacket():
threading.Thread(target=self._handleDHCPRequest, args=(packet, source_address, pxe)).start()
elif packet.isDHCPDiscoverPacket():
threading.Thread(target=self._handleDHCPDiscover, args=(packet, source_address, pxe)).start()
elif packet.isDHCPInformPacket():
threading.Thread(target=self._handleDHCPInform, args=(packet, source_address, pxe)).start()
elif packet.isDHCPReleasePacket():
threading.Thread(target=self._handleDHCPRelease, args=(packet, source_address, pxe)).start()
elif packet.isDHCPDeclinePacket():
threading.Thread(target=self._handleDHCPDecline, args=(packet, source_address, pxe)).start()
elif packet.isDHCPLeaseQueryPacket():
threading.Thread(target=self._handleDHCPLeaseQuery, args=(packet, source_address, pxe)).start()
return (True, source_address)
return (False, source_address)
def _handleDHCPDecline(self, packet, source_address, pxe):
"""
Processes a DECLINE packet.
Override this with your own logic to handle DECLINEs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _handleDHCPDiscover(self, packet, source_address, pxe):
"""
Processes a DISCOVER packet.
Override this with your own logic to handle DISCOVERs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _handleDHCPInform(self, packet, source_address, pxe):
"""
Processes an INFORM packet.
Override this with your own logic to handle INFORMs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _handleDHCPLeaseQuery(self, packet, source_address, pxe):
"""
Processes a LEASEQUERY packet.
Override this with your own logic to handle LEASEQUERYs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _handleDHCPRelease(self, packet, source_address):
"""
Processes a RELEASE packet.
Override this with your own logic to handle RELEASEs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _handleDHCPRequest(self, packet, source_address, pxe):
"""
Processes a REQUEST packet.
Override this with your own logic to handle REQUESTs.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
"""
def _sendDHCPPacket(self, packet, source_address, pxe):
"""
Encodes and sends a DHCP packet to its destination.
**Important**: during this process, the packet may be modified, but
will be restored to its initial state by the time this method returns.
If any threadsafing is required, it must be handled in calling logic.
:param packet: The packet to be processed.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param source_address: The address from which the request was received.
:type source_address: :class:`Address <dhcp.Address>`
:param bool pxe: ``True`` if the packet was received on the PXE port.
:return int: The number of bytes transmitted.
:except Exception: A problem occurred during serialisation or
transmission.
"""
return self._network_link.sendData(packet, source_address, pxe)
class _NetworkLink(object):
"""
Handles network I/O.
"""
_client_port = None #: The port on which clients expect to receive DHCP traffic.
_server_port = None #: The port on which servers expect to receive DHCP traffic.
_pxe_port = None #: The port on which PXE clients expect to receive traffic.
_pxe_socket = None #: The internal socket to use for PXE traffic.
_responder_dhcp = None #: The internal socket to use for responding to DHCP requests.
_responder_pxe = None #: The internal socket to use for responding to PXE requests.
_responder_broadcast = None #: The internal socket to use for responding to broadcast requests.
_listening_sockets = None #: All sockets on which to listen for activity.
_unicast_discover_supported = False #: Whether unicast responses to DISCOVERs are supported.
def __init__(self, server_address, server_port, client_port, pxe_port, response_interface=None, response_interface_qtags=None):
"""
Sets up the DHCP network infrastructure.
:param str server_address: The IP address on which to run the DHCP service.
:param int server_port: The port on which DHCP servers and relays listen in this network.
:param int client_port: The port on which DHCP clients listen in this network.
:param int|None pxe_port: The port on which DHCP servers listen for PXE traffic in this
network.
:param str|None response_interface: The interface on which to provide raw packet support,
like 'eth0', or None if not requested.
:param sequence|None response_interface_qtags: Any qtags to insert into raw packets, in
order of appearance. Definitions take the following form:
(pcp:`0-7`, dei:``bool``, vid:`1-4094`)
:except Exception: A problem occurred during setup.
"""
self._client_port = client_port
self._server_port = server_port
self._pxe_port = pxe_port
#Create and bind unicast sockets
(dhcp_socket, pxe_socket) = self._setupListeningSockets(server_port, pxe_port)
if pxe_socket:
self._listening_sockets = (dhcp_socket, pxe_socket)
self._pxe_socket = pxe_socket
else:
self._listening_sockets = (dhcp_socket,)
#Wrap the sockets with appropriate logic and set options
self._responder_dhcp = _L3Responder(socketobj=dhcp_socket)
self._responder_pxe = _L3Responder(socketobj=pxe_socket)
#Either create a raw-response socket or a generic broadcast-response socket
if response_interface:
try:
self._responder_broadcast = _L2Responder_AF_PACKET(server_address, response_interface, qtags=response_interface_qtags)
except Exception:
try:
self._responder_broadcast = _L2Responder_pcap(server_address, response_interface, qtags=response_interface_qtags)
except Exception, e:
import errno
raise EnvironmentError(errno.ELIBACC, "Raw response-socket requested on %(interface)s, but neither AF_PACKET/PF_PACKET nor libpcap are available, or the interface does not exist" % {'interface': response_interface,})
self._unicast_discover_supported = True
else:
self._responder_broadcast = _L3Responder(server_address=server_address)
def _setupListeningSockets(self, server_port, pxe_port):
"""
Creates and binds the listening sockets.
:param int server_port: The port on which to listen for DHCP traffic.
:param int pxe_port: The port on which to listen for PXE traffic.
:return tuple(2): The DHCP and PXE sockets, the latter of which may be ``None`` if not
requested.
:except socket.error: Sockets could not be created or bound.
"""
dhcp_socket = pxe_socket = None
try:
dhcp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if pxe_port:
pxe_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error, msg:
raise Exception('Unable to create socket: %(err)s' % {'err': str(msg),})
try:
dhcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if pxe_socket:
pxe_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error, msg:
import warnings
warnings.warn('Unable to set SO_REUSEADDR; multiple DHCP servers cannot be run in parallel: %(err)s' % {'err': str(msg),})
if platform.system() != 'Linux':
try:
dhcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if pxe_port:
pxe_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except socket.error, msg:
import warnings
warnings.warn('Unable to set SO_REUSEPORT; multiple DHCP servers cannot be run in parallel: %(err)s' % {'err': str(msg),})
try:
dhcp_socket.bind(('', server_port))
if pxe_port:
pxe_socket.bind(('', pxe_port))
except socket.error, e:
raise Exception('Unable to bind sockets: %(error)s' % {
'error': str(e),
})
return (dhcp_socket, pxe_socket)
def getData(self, timeout, packet_buffer):
"""
Runs `select()` over all relevant sockets, providing data if available.
:param int timeout: The number of seconds to wait before returning.
:param int packet_buffer: The size of the buffer to use for receiving packets.
:return tuple(3):
0. :class:`Address <dhcp.Address>` or ``None``: None if the timeout was reached.
1. The received data as a ``str`` or ``None`` if the timeout was reached.
2. A ``bool`` indicating whether the data was received via PXE.
:except select.error: The `select()` operation did not complete gracefully.
"""
pxe = False
active_sockets = select.select(self._listening_sockets, [], [], timeout)[0]
if active_sockets:
active_socket = active_sockets[0]
pxe = active_socket == self._pxe_socket
(data, source_address) = active_socket.recvfrom(packet_buffer)
if data:
return (Address(IPv4(source_address[0]), source_address[1]), data, pxe)
return (None, None, False)
def sendData(self, packet, address, pxe):
"""
Writes the packet to to appropriate socket, addressed to the appropriate recipient.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param address: The address from which the original packet was received.
:type address: :class:`Address <dhcp.Address>`
:param bool pxe: Whether the request was received via PXE.
:return tuple(2):
0. The number of bytes written to the network.
1. The :class:`Address <dhcp.Address>` ultimately used.
:except Exception: A problem occurred during serialisation or transmission.
"""
ip = None
relayed = False
port = self._client_port
source_port = self._server_port
responder = self._responder_dhcp
if address.ip in IP_UNSPECIFIED_FILTER: #Broadcast source; this is never valid for PXE
if (not self._unicast_discover_supported #All responses have to be via broadcast
or packet.getFlag(FLAGBIT_BROADCAST)): #Broadcast bit set; respond in kind
ip = _IP_BROADCAST
else: #The client wants unicast and this host can handle it
ip = packet.extractIPOrNone(FIELD_YIADDR)
responder = self._responder_broadcast
else: #Unicast source
ip = address.ip
relayed = bool(packet.extractIPOrNone(FIELD_GIADDR))
if relayed: #Relayed request.
port = self._server_port
else: #Request directly from client, routed or otherwise.
if pxe:
ip = packet.extractIPOrNone(FIELD_CIADDR) or ip
port = address.port or self._pxe_port #BSD doesn't seem to preserve port information
source_port = self._pxe_port
responder = self._responder_pxe
return responder.send(packet, ip, port, relayed, source_port=source_port)
class _Responder(object):
"""
A generic responder-template, which defines common logic.
"""
def send(self, packet, ip, port, relayed, **kwargs):
"""
Performs final sanity-checking and address manipulation, then submits the packet for
transmission.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param ip: The address to which the packet should be sent.
:type ip: :class:`IPv4 <dhcp_types.IPv4>`
:param int port: The port to which the packet should be sent.
:param bool relayed: ``True`` if the packet came from a relay.
:param \*\*kwargs: Any technology-specific arguments.
:return tuple(2):
0. The number of bytes written to the network.
1. The :class:`Address <dhcp.Address>` ultimately used.
:except Exception: An error occurred during serialisation or transmission.
"""
if relayed:
broadcast_source = packet.extractIPOrNone(FIELD_CIADDR) in IP_UNSPECIFIED_FILTER
else:
broadcast_source = ip in IP_UNSPECIFIED_FILTER
(broadcast_changed, original_was_broadcast) = packet.setFlag(FLAGBIT_BROADCAST, broadcast_source)
#Perform any necessary packet-specific address-changes
if not original_was_broadcast: #Unicast behaviour permitted; use the packet's IP override, if set
ip = packet.response_ip or ip
port = packet.response_port or port
if packet.response_source_port is not None:
kwargs['source_port'] = packet.response_source_port
bytes_sent = self._send(packet, str(ip), port, **kwargs)
if broadcast_changed: #Restore the broadcast bit, in case the packet needs to be used for something else
packet.setFlag(FLAGBIT_BROADCAST, original_was_broadcast)
return (bytes_sent, Address(IPv4(ip), port))
def _send(self, packet, ip, port, **kwargs):
"""
Handles technology-specific transmission; must be implemented by subclasses.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param ip: The address to which the packet should be sent.
:type ip: :class:`IPv4 <dhcp_types.IPv4>`
:param int port: The port to which the packet should be sent.
:param \*\*kwargs: Any technology-specific arguments.
:return int: The number of bytes written to the network.
:except Exception: An error occurred during serialisation or transmission.
"""
raise NotImplementedError("_send() must be implemented in subclasses")
class _L3Responder(_Responder):
"""
Defines rules and logic needed to respond at layer 3.
"""
_socket = None #: The socket used for responses.
def __init__(self, socketobj=None, server_address=None):
"""
Wraps an existing socket or creates an arbitrarily bound new socket with broadcast
capabilities.
:param socket.socket|None socketobj: The socket to be bound; if ``None``, a new one is
created.
:param str|None server_address: The address to which a new socket should be bound.
:except Exception: Unable to bind a new socket.
"""
if socketobj:
self._socket = socketobj
else:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
except socket.error, e:
raise Exception('Unable to set SO_BROADCAST: %(err)s' % {'err': e,})
try:
self._socket.bind((server_address or '', 0))
except socket.error, e:
raise Exception('Unable to bind socket: %(error)s' % {'error': e,})
def _send(self, packet, ip, port, **kwargs):
"""
Serialises and sends the packet.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param str ip: The address to which the packet should be sent.
:param int port: The port to which the packet should be sent.
:param \*\*kwargs: Any technology-specific arguments.
:return int: The number of bytes written to the network.
:except Exception: An error occurred during serialisation or transmission.
"""
return self._socket.sendto(packet.encodePacket(), (ip, port))
class _L2Responder(_Responder):
"""
Defines rules and logic needed to respond at layer 2.
"""
_ethernet_id = None #: The source MAC and Ethernet payload-type (and qtags, if applicable).
_server_address = None #: The server's IP.
#Locally cached module functions
_array_ = None #: `array.array`
_pack_ = None #: `struct.pack`
def __init__(self, server_address, mac, qtags=None):
"""
Constructs the Ethernet header for all L2 communication.
:param str server_address: The server's IP as a dotted quad.
:param str mac: The MAC of the responding interface, in network-byte order.
:param sequence qtags: Any qtags to insert into raw packets, in order of appearance.
Definitions take the following form: (pcp:`0-7`, dei:``bool``, vid:`1-4094`)
"""
import struct
self._pack_ = struct.pack
import array
self._array_ = array.array
self._server_address = socket.inet_aton(str(server_address))
ethernet_id = [mac,] #Source MAC
if qtags:
for (pcp, dei, vid) in qtags:
ethernet_id.append("\x81\x00") #qtag payload-type
qtag_value = pcp << 13 #Priority-code-point (0-7)
qtag_value += int(dei) << 12 #Drop-eligible-indicator
qtag_value += vid #vlan-identifier
ethernet_id.append(self._pack('!H', qtag_value))
ethernet_id.append("\x08\x00") #IP payload-type
self._ethernet_id = ''.join(ethernet_id)
def _checksum(self, data):
"""
Computes the RFC768 checksum of ``data``.
:param sequence data: The data to be checksummed.
:return int: The data's checksum.
"""
if sum(len(i) for i in data) & 1: #Odd
checksum = sum(self._array_('H', ''.join(data)[:-1]))
checksum += ord(data[-1][-1]) #Add the final byte
else: #Even
checksum = sum(self._array_('H', ''.join(data)))
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += (checksum >> 16)
return ~checksum & 0xffff
def _ipChecksum(self, ip_prefix, ip_destination):
"""
Computes the checksum of the IPv4 header.
:param str ip_prefix: The portion of the IPv4 header preceding the `checksum` field.
:param str ip_destination: The destination address, in network-byte order.
:return int: The IPv4 checksum.
"""
return self._checksum([
ip_prefix,
'\0\0', #Empty checksum field
self._server_address,
ip_destination,
])
def _udpChecksum(self, ip_destination, udp_addressing, udp_length, packet):
"""
Computes the checksum of the UDP header and payload.
:param str ip_destination: The destination address, in network-byte order.
:param str udp_addressing: The UDP header's port section.
:param str udp_length: The length of the UDP payload plus header.
:param str packet: The serialised packet.
:return int: The UDP checksum.
"""
return self._checksum([
self._server_address,
ip_destination,
'\0\x11', #UDP spec padding and protocol
udp_length,
udp_addressing,
udp_length,
'\0\0', #Dummy UDP checksum
packet,
])
def _assemblePacket(self, packet, mac, ip, port, source_port):
"""
Assembles the Ethernet, IPv4, and UDP headers, serialises the packet, and provides a
complete Ethernet frame for injection into the network.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param mac: The MAC to which the packet is addressed.
:type mac: :class:`MAC <dhcp_types.mac.MAC>`
:param str ip: The IPv4 to which the packet is addressed, as a dotted quad.
:param int port: The port to which the packet is addressed.
:param int source_port: The port from which the packet is addressed.
:return str: The complete binary packet.
"""
binary = []
#<> Ethernet header
if _IP_BROADCAST == ip:
binary.append('\xff\xff\xff\xff\xff\xff') #Broadcast MAC
else:
binary.append(''.join(chr(i) for i in mac)) #Destination MAC
binary.append(self._ethernet_id) #Source MAC and Ethernet payload-type
#<> Prepare packet data for transmission and checksumming
binary_packet = packet.encodePacket()
packet_len = len(binary_packet)
#<> IP header
binary.append(self._pack_("!BBHHHBB",
69, #IPv4 + length=5
0, #DSCP/ECN aren't relevant
28 + packet_len, #The UDP and packet lengths in bytes
0, #ID, which is always 0 because we're the origin
packet_len <= 560 and 0b0100000000000000 or 0, #Flags and fragmentation
128, #Make the default TTL sane, but not maximum
0x11, #Protocol=UDP
))
ip_destination = socket.inet_aton(ip)
binary.extend((
self._pack_("<H", self._ipChecksum(binary[-1], ip_destination)),
self._server_address,
ip_destination
))
#<> UDP header
binary.append(self._pack_("!HH", source_port, port))
binary.append(self._pack_("!H", packet_len + 8)) #8 for the header itself
binary.append(self._pack_("<H", self._udpChecksum(ip_destination, binary[-2], binary[-1], binary_packet)))
#<> Payload
binary.append(binary_packet)
return ''.join(binary)
def _send(self, packet, ip, port, source_port=0, **kwargs):
"""
Serialises and sends the packet.
:param packet: The packet to be written.
:type packet: :class:`DHCPPacket <dhcp_types.packet.DHCPPacket>`
:param str ip: The address to which the packet should be sent.
:param int port: The port to which the packet should be sent.
:param int source_port: The UDP port from which to claim the packet originated.
:param \*\*kwargs: Any technology-specific arguments.
:return int: The number of bytes written to the network.
:except Exception: An error occurred during serialisation or transmission.
"""
mac = (packet.response_mac and MAC(packet.response_mac)) or packet.getHardwareAddress()
binary_packet = self._assemblePacket(packet, mac, ip, port, source_port)
return self._send_(binary_packet)
class _L2Responder_AF_PACKET(_L2Responder):
"""
A Linux-specific layer 2 responder that uses AF_PACKET/PF_PACKET.
"""
_socket = None #: The socket used for responses.
def __init__(self, server_address, response_interface, qtags=None):
"""
Creates and configures a raw socket on an interface.
:param str server_address: The server's IP as a dotted quad.
:param str response_interface: The interface on which to provide raw packet support, like
``"eth0"``.
:param sequence qtags: Any qtags to insert into raw packets, in order of appearance.
Definitions take the following form: (pcp:`0-7`, dei:``bool``, vid:`1-4094`)
:except socket.error: The socket could not be configured.
"""
socket_type = ((hasattr(socket, 'AF_PACKET') and socket.AF_PACKET) or (hasattr(socket, 'PF_PACKET') and socket.PF_PACKET))
if not socket_type:
raise Exception("Neither AF_PACKET nor PF_PACKET found")
self._socket = socket.socket(socket_type, socket.SOCK_RAW, socket.htons(_ETH_P_SNAP))
self._socket.bind((response_interface, _ETH_P_SNAP))
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2 ** 12)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2 ** 12)
mac = self._socket.getsockname()[4]
_L2Responder.__init__(self, server_address, mac, qtags=qtags)
def _send_(self, packet):
"""
Sends the packet.
:param str packet: The packet to be written.
:return int: The number of bytes written to the network.
:except Exception: An error occurred during transmission.
"""
return self._socket.send(packet)
class _L2Responder_pcap(_L2Responder):
"""
A more general Unix-oriented layer 2 responder that uses libpcap.
"""
_fd = None #: The file-descriptor of the socket used for responses.
_inject = None #: The "send" function to invoke from libpcap.
#Locally cached module functions
_c_int_ = None #: `ctypes.c_int`
def __init__(self, server_address, response_interface, qtags=None):
"""
Creates and configures a raw socket on an interface.
:param str server_address: The server's IP as a dotted quad.
:param str response_interface: The interface on which to provide raw packet support, like
``"eth0"``.
:param sequence qtags: Any qtags to insert into raw packets, in order of appearance.
Definitions take the following form: (pcp:`0-7`, dei:``bool``, vid:`1-4094`)
:except Exception: Interfacing with libpcap failed.
"""
import ctypes
self._c_int_ = ctypes.c_int
import ctypes.util
pcap = ctypes.util.find_library('pcap')
if not pcap:
raise Exception("libpcap not found")
pcap = ctypes.cdll.LoadLibrary(pcap)
errbuf = ctypes.create_string_buffer(256)
self._fd = pcap.pcap_open_live(response_interface, ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0), errbuf)
if not self._fd:
import errno
raise IOError(errno.EACCES, errbuf.value)
elif errbuf.value:
import warnings
warnings.warn(errbuf.value)
try:
mac = self._getMAC(response_interface)
except Exception:
pcap.pcap_close(self._fd)
raise
else:
_L2Responder.__init__(self, server_address, mac, qtags=qtags)
self._inject = pcap.pcap_inject
def _getMAC(self, response_interface):
"""
Mostly portable means of getting the MAC address for the interface.
:param str response_interface: The interface on which to provide raw packet support, like
``"eth0"``.
:return str: The MAC address, in network-byte order.
:except Exception: The MAC could not be retrieved.
"""
import subprocess
import re
if platform.system() == 'Linux':
command = ('/sbin/ip', 'link', 'show', response_interface)
else:
command = ('/sbin/ifconfig', response_interface)
ifconfig_output = subprocess.check_output(command)
m = re.search(r'\b(?P<mac>(?:[0-9A-Fa-f]{2}:){5}(?:[0-9A-Fa-f]{2}))\b', ifconfig_output)
if not m:
raise Exception("Unable to determine MAC of %(interface)s" % {
'interface': response_interface,
})
return ''.join(chr(i) for i in MAC(m.group('mac')))
def _send_(self, packet):
"""
Sends the packet.
:param str packet: The packet to be written.
:return int: The number of bytes written to the network.
:except Exception: An error occurred during transmission.
"""
return self._inject(self._fd, packet, self._c_int_(len(packet)))
| gpl-3.0 | -4,686,634,937,923,367,000 | 44.443433 | 236 | 0.61614 | false | 4.156655 | false | false | false |
erigones/esdc-ce | api/mon/base/tasks.py | 1 | 5346 | from celery.utils.log import get_task_logger
from api.mon import get_monitoring, del_monitoring
from api.mon.exceptions import RemoteObjectDoesNotExist, RemoteObjectAlreadyExists
from api.mon.vm.tasks import mon_vm_sync
from api.mon.node.tasks import mon_node_sync
# noinspection PyProtectedMember
from api.mon.alerting.tasks import mon_all_groups_sync
from api.task.utils import mgmt_lock, mgmt_task
from que.exceptions import MgmtTaskException
from que.erigonesd import cq
from que.internal import InternalTask
from que.mgmt import MgmtTask
from vms.models import Dc, Node
__all__ = (
'mon_sync_all',
'mon_template_list',
'mon_hostgroup_list',
'mon_hostgroup_get',
'mon_hostgroup_create',
'mon_hostgroup_delete',
)
logger = get_task_logger(__name__)
def mon_clear_zabbix_cache(dc, full=True):
"""
Clear Zabbix instance from global zabbix cache used by get_monitoring() if full==True.
Reset internal zabbix instance cache if full==False and the zabbix instance exists in global zabbix cache.
Should be reviewed with every new backend implemented.
"""
if full:
if del_monitoring(dc):
logger.info('Zabbix instance for DC "%s" was successfully removed from global cache', dc)
else:
logger.info('Zabbix instance for DC "%s" was not found in global cache', dc)
else:
zx = get_monitoring(dc)
zx.reset_cache()
logger.info('Cleared cache for zabbix instance %s in DC "%s"', zx, dc)
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_sync_all', base=InternalTask)
@mgmt_lock(key_args=(1,), wait_for_release=True)
def mon_sync_all(task_id, dc_id, clear_cache=True, sync_groups=True, sync_nodes=True, sync_vms=True, **kwargs):
"""
Clear Zabbix cache and sync everything in Zabbix.
Related to a specific DC.
Triggered by dc_settings_changed signal.
"""
dc = Dc.objects.get_by_id(int(dc_id))
if clear_cache:
logger.info('Clearing zabbix cache in DC %s', dc)
mon_clear_zabbix_cache(dc)
get_monitoring(dc) # Cache new Zabbix instance for tasks below
if sync_groups:
logger.info('Running monitoring group synchronization for all user groups in DC %s', dc)
mon_all_groups_sync.call(task_id, dc_name=dc.name)
if sync_nodes:
logger.info('Running monitoring host synchronization for all compute nodes')
for node in Node.all():
mon_node_sync.call(task_id, node_uuid=node.uuid)
if sync_vms:
logger.info('Running monitoring host synchronization for all VMs in DC %s', dc)
for vm_uuid in dc.vm_set.values_list('uuid', flat=True):
mon_vm_sync.call(task_id, vm_uuid=vm_uuid)
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_template_list', base=MgmtTask)
@mgmt_task(log_exception=False)
def mon_template_list(task_id, dc_id, full=False, extended=False, **kwargs):
"""
Return list of templates available in Zabbix.
"""
dc = Dc.objects.get_by_id(int(dc_id))
return get_monitoring(dc).template_list(full=full, extended=extended)
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_hostgroup_list', base=MgmtTask)
@mgmt_task(log_exception=False)
def mon_hostgroup_list(task_id, dc_id, dc_bound=True, full=False, extended=False, **kwargs):
"""
Return list of hostgroups available in Zabbix.
"""
dc = Dc.objects.get_by_id(int(dc_id))
return get_monitoring(dc).hostgroup_list(dc_bound=dc_bound, full=full, extended=extended)
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_hostgroup_get', base=MgmtTask)
@mgmt_task(log_exception=False)
def mon_hostgroup_get(task_id, dc_id, hostgroup_name, dc_bound=True, **kwargs):
dc = Dc.objects.get_by_id(int(dc_id))
mon = get_monitoring(dc)
try:
return mon.hostgroup_detail(hostgroup_name, dc_bound=dc_bound)
except RemoteObjectDoesNotExist as exc:
raise MgmtTaskException(exc.detail)
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_hostgroup_create', base=MgmtTask)
@mgmt_task(log_exception=True)
def mon_hostgroup_create(task_id, dc_id, hostgroup_name, dc_bound=True, **kwargs):
dc = Dc.objects.get_by_id(int(dc_id))
mon = get_monitoring(dc)
try:
result = mon.hostgroup_create(hostgroup_name, dc_bound=dc_bound)
except RemoteObjectAlreadyExists as exc:
raise MgmtTaskException(exc.detail)
detail = 'Monitoring hostgroup "%s" was successfully created' % hostgroup_name
mon.task_log_success(task_id, obj=mon.server_class(dc), detail=detail, **kwargs['meta'])
return result
# noinspection PyUnusedLocal
@cq.task(name='api.mon.base.tasks.mon_hostgroup_delete', base=MgmtTask)
@mgmt_task(log_exception=True)
def mon_hostgroup_delete(task_id, dc_id, hostgroup_name, dc_bound=True, **kwargs):
dc = Dc.objects.get_by_id(int(dc_id))
mon = get_monitoring(dc)
try:
result = mon.hostgroup_delete(hostgroup_name, dc_bound=dc_bound) # Fail loudly if doesnt exist
except RemoteObjectDoesNotExist as exc:
raise MgmtTaskException(exc.detail)
detail = 'Monitoring hostgroup "%s" was successfully deleted' % hostgroup_name
mon.task_log_success(task_id, obj=mon.server_class(dc), detail=detail, **kwargs['meta'])
return result
| apache-2.0 | -342,738,251,245,723,970 | 35.616438 | 111 | 0.701459 | false | 3.297964 | false | false | false |
matthew-brett/pymc | pymc/examples/model_1_missing.py | 1 | 1501 | """
A model for the disasters data with a changepoint, with missing data
changepoint ~ U(0,110)
early_mean ~ Exp(1.)
late_mean ~ Exp(1.)
disasters[t] ~ Po(early_mean if t <= switchpoint, late_mean otherwise)
"""
__all__ = ['swichpoint','early_mean','late_mean','disasters']
from pymc import DiscreteUniform, Exponential, deterministic, Poisson, Uniform, Lambda, MCMC, observed, poisson_like
from pymc.distributions import Impute
import numpy as np
# Missing values indicated by None placeholders
disasters_array = np.array([ 4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, None, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, None, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
# Switchpoint
s = DiscreteUniform('s', lower=0, upper=110)
# Early mean
e = Exponential('e', beta=1)
# Late mean
l = Exponential('l', beta=1)
@deterministic(plot=False)
def r(s=s, e=e, l=l):
"""Allocate appropriate mean to time series"""
out = np.empty(len(disasters_array))
# Early mean prior to switchpoint
out[:s] = e
# Late mean following switchpoint
out[s:] = l
return out
# Where the mask is true, the value is taken as missing.
D = Impute('D', Poisson, disasters_array, mu=r)
| mit | -4,282,114,124,814,546,000 | 32.355556 | 116 | 0.56429 | false | 2.633333 | false | false | false |
cdeboever3/cdpybio | cdpybio/ldsc.py | 1 | 2980 | import datetime as dt
import pandas as pd
def parse_ldsc_rg_log(fn):
conv_month = {'': 0, 'Apr': 4, 'Aug': 8, 'Dec': 12, 'Feb': 2,
'Jan': 1, 'Jul': 7, 'Jun': 6, 'Mar': 3,
'May': 5, 'Nov': 11, 'Oct': 10, 'Sep': 9}
with open(fn) as f:
fcontents = f.read()
lines = fcontents.split(69 * '*' + '\n')[-1].strip().split('\n')
month, day, time, year = [x.split() for x in lines if x[0:10] == 'Beginning '][0][4:]
hour, minute, second = time.split(':')
begin = dt.datetime(int(year), int(conv_month[month]), int(day), int(hour), int(minute), int(second))
month, day, time, year = [x.split() for x in lines if x[0:17] == 'Analysis finished'][0][4:]
hour, minute, second = time.split(':')
end = dt.datetime(int(year), int(conv_month[month]), int(day), int(hour), int(minute), int(second))
num_snps = int([x for x in lines if 'valid' in x][0].split()[0])
# Pheno 1
lines = fcontents.split(69 * '*' + '\n')[-1].split(29 * '-' + '\n')[0].strip().split('\n')
p1_h2, p1_h2_se = [x for x in lines if x[0:5] == 'Total'][0].split()[-2:]
p1_h2 = float(p1_h2)
p1_h2_se = float(p1_h2_se[1:-1])
p1_lambda_gc = float([x for x in lines if x[0:6] == 'Lambda'][0].strip().split()[-1])
p1_mean_chi2 = float([x for x in lines if x[0:4] == 'Mean'][0].strip().split()[-1])
p1_intercept, p1_intercept_se = [x for x in lines if x[0:9] == 'Intercept'][0].strip().split()[-2:]
p1_intercept = float(p1_intercept)
p1_intercept_se = float(p1_intercept_se[1:-1])
# Pheno 2
lines = fcontents.split(69 * '*' + '\n')[-1].split(29 * '-' + '\n')[0].strip().split('\n')
p2_h2, p2_h2_se = [x for x in lines if x[0:5] == 'Total'][0].split()[-2:]
p2_h2 = float(p2_h2)
p2_h2_se = float(p2_h2_se[1:-1])
p2_lambda_gc = float([x for x in lines if x[0:6] == 'Lambda'][0].strip().split()[-1])
p2_mean_chi2 = float([x for x in lines if x[0:4] == 'Mean'][0].strip().split()[-1])
p2_intercept, p2_intercept_se = [x for x in lines if x[0:9] == 'Intercept'][0].strip().split()[-2:]
p2_intercept = float(p2_intercept)
p2_intercept_se = float(p2_intercept_se[1:-1])
vals = [begin, end, num_snps]
ind = ['start_time', 'end_time', 'num_snps']
vals += [p1_h2, p1_h2_se, p1_lambda_gc, p1_mean_chi2, p1_intercept,
p1_intercept_se]
ind += ['h2_p1', 'h2_se_p1', 'lambda_gc_p1', 'mean_chi2_p1', 'intercept_p1',
'intercept_se_p1']
vals += [p2_h2, p2_h2_se, p2_lambda_gc, p2_mean_chi2, p2_intercept,
p2_intercept_se]
ind += ['h2_p2', 'h2_se_p2', 'lambda_gc_p2', 'mean_chi2_p2', 'intercept_p2',
'intercept_se_p2']
lines = fcontents.split(69 * '*' + '\n')[-1].strip().split('\n')
vals += lines[-4].split()[0:2]
ind += lines[-5].split()[0:2]
vals += [float(x) for x in lines[-4].split()[2:]]
ind += lines[-5].split()[2:]
out = pd.Series(vals, index=ind)
return(out)
| mit | -9,163,779,512,349,474,000 | 47.852459 | 105 | 0.538926 | false | 2.542662 | false | false | false |
demharters/git_scripts | super_all.py | 1 | 2172 | #! /usr/bin/env python
# original Written by Jules Jacobsen ([email protected]). Feel free to do whatever you like with this code.
# extensively modified by Robert L. Campbell ([email protected])
from pymol import cmd
def super_all(target=None,mobile_selection='name ca',target_selection='name ca',cutoff=2, cycles=5,cgo_object=0):
"""
Superimposes all models in a list to one target using the "super" algorithm
usage:
super_all [target][target_selection=name ca][mobile_selection=name ca][cutoff=2][cycles=5][cgo_object=0]
where target specifies is the model id you want to superimpose all others against,
and selection, cutoff and cycles are options passed to the super command.
By default the selection is all C-alpha atoms and the cutoff is 2 and the
number of cycles is 5.
Setting cgo_object to 1, will cause the generation of an superposition object for
each object. They will be named like <object>_on_<target>, where <object> and
<target> will be replaced by the real object and target names.
Example:
super_all target=name1, mobile_selection=c. b & n. n+ca+c+o,target_selection=c.a & n. n+ca+c+o
"""
cutoff = int(cutoff)
cycles = int(cycles)
cgo_object = int(cgo_object)
object_list = cmd.get_names()
object_list.remove(target)
rmsd = {}
rmsd_list = []
objectname = 'super_on_%s' % target
for i in range(len(object_list)):
if cgo_object:
# objectname = 'super_%s_on_%s' % (object_list[i],target)
rms = cmd.super('%s & %s'%(object_list[i],mobile_selection),'%s & %s'%(target,target_selection),cutoff=cutoff,cycles=cycles,object=objectname)
else:
rms = cmd.super('%s & %s'%(object_list[i],mobile_selection),'%s & %s'%(target,target_selection),cutoff=cutoff,cycles=cycles)
rmsd[object_list[i]] = rms[0]
rmsd_list.append((rms[0],object_list[i]))
rmsd_list.sort()
# loop over dictionary and print out matrix of final rms values
print "Superimposing against:",target
for object_name in object_list:
print "%s: %6.3f" % (object_name,rmsd[object_name])
for r in rmsd_list:
print "%6.3f %s" % r
cmd.extend('super_all',super_all)
| apache-2.0 | -2,541,234,397,714,040,300 | 39.222222 | 148 | 0.685543 | false | 3.198822 | false | false | false |
zengchunyun/s12 | day5/Day5/CreditCard/modules/creditcard.py | 1 | 9215 | #!/usr/bin/env python
import os
from datetime import datetime, date, timedelta
from conf import settings, errorcode
from modules import common
from dbhelper import dbapi
class CreditCard(object):
__database = "{0}.db".format(os.path.join(settings.DATABASE['dbpath'], settings.DATABASE["tables"]["creditcard"]))
def __init__(self, cardno):
# 信用卡卡号
self.cardno = cardno
# 信用卡密码
self.password = ""
# 卡所有者
self.owner = ""
# 信用卡额度
self.credit_total = settings.CREDIT_TOTAL
# 信用卡透支余额
self.credit_balance = settings.CREDIT_TOTAL
# 信用卡日息
self.dayrate = settings.EXPIRE_DAY_RATE
# 提现手续费率
self.feerate = settings.FETCH_MONEY_RATE
# 所有信用卡数据
self.credit_card = {}
# 信用卡是否存在标识
self.card_is_exists = True
# 信用卡状态(是否冻结)
self.frozenstatus = 0
# 获取卡的信息
self._load_card_info()
def _load_card_info(self):
"""
根据用户输入的卡号获取信用卡信息,如果卡号不存在就返回False
:return: 信用卡对象
"""
exists_flag = False
self.credit_card = dbapi.load_data_from_db(self.__database)
for key, items in self.credit_card.items():
if key == self.cardno:
self.password = self.credit_card[self.cardno]['password']
self.credit_total = self.credit_card[self.cardno]['credit_total']
self.credit_balance = self.credit_card[self.cardno]['credit_balance']
self.owner = self.credit_card[self.cardno]['owner']
self.frozenstatus = self.credit_card[self.cardno]['frozenstatus']
exists_flag = True
break
self.card_is_exists = exists_flag
"""
@property
def card_is_exists(self):
if self.cardno in list(self.credit_card.keys()):
return True
else:
return False
"""
def card_pay(self, cost, paytype, sereialno):
"""
信用卡支付,从信用卡可透支余额中扣费
:param sereialno: 流水号
:param cost: 消费金额 float类型
:param paytype: 消费类型 int类型 ( 1:消费、2:转账、3:提现、4:手续费 ) 对于2,3类型的支付要扣手续费,单记录一条流水单
:return:
"""
if paytype == 1:
payfor = "消费"
elif paytype == 2:
payfor = "转账"
elif paytype == 3:
payfor = "提现"
elif paytype == 4:
payfor = "手续费"
else:
payfor = "未知"
# 支付扣款
self.credit_balance -= cost
# 记录消费流水对账单,将发生了费用还没有还款的账单信息写入文件 report_bill 中
_tmp_bill_record = dict(cardno="{0}".format(self.cardno),
starttime=datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M"),
payfor=payfor,
cost=cost,
serialno=sereialno)
dbapi.append_db_json(_tmp_bill_record, os.path.join(settings.REPORT_PATH, "report_bill"))
# 更新信用卡可透支余额信息到数据库 creditcard.db
self.credit_card[self.cardno]["credit_balance"] = self.credit_balance
dbapi.write_db_json(self.credit_card, self.__database)
def create_card(self):
"""
新发行一张行用卡
:return:
"""
password = common.encrypt(self.password)
self.credit_card[self.cardno] = dict(password=password,
credit_total=self.credit_total,
credit_balance=self.credit_balance,
owner=self.owner,
frozenstatus=self.frozenstatus)
# 保存到数据库
dbapi.write_db_json(self.credit_card, self.__database)
def update_card(self):
password = common.encrypt(self.password)
self.credit_card[self.cardno]["password"] = password
self.credit_card[self.cardno]["owner"] = self.owner
self.credit_card[self.cardno]["credit_total"] = self.credit_total
self.credit_card[self.cardno]["credit_balance"] = self.credit_balance
self.credit_card[self.cardno]["frozenstatus"] = self.frozenstatus
# 写入数据库
dbapi.write_db_json(self.credit_card, self.__database)
def _pay_check(self, cost, password):
"""
转账、提现时验证操作,判断卡的余额与支付密码是否正确。并返回错误类型码
:param cost: 转账、提现金额(包含手续费)
:param password: 支付密码
:return: 错误码
"""
totalfee = cost
# 提现金额及手续费和大于余额,
if totalfee > self.credit_balance:
return errorcode.BALANCE_NOT_ENOUGHT
elif common.encrypt(password) != self.password:
return errorcode.CARD_PASS_ERROR
else:
return errorcode.NO_ERROR
def fetch_money(self, count, passwd):
"""
提现
:param count: 提现金额
:param passwd:信用卡提现密码
:return: 返回错误类型码
"""
totalfee = count + count * self.feerate
check_result = self._pay_check(totalfee, passwd)
if check_result == errorcode.NO_ERROR:
# 扣取提现金额并写入数据库,生成账单
self.card_pay(count, 3, common.create_serialno())
# 扣取手续费并写入数据库, 生成账单
self.card_pay(count * self.feerate, 4, common.create_serialno())
return errorcode.NO_ERROR
else:
return check_result
def translate_money(self, trans_count, passwd, trans_cardobj):
"""
信用卡转账模块
:param trans_count: 要转账的金额
:param passwd: 信用卡密码
:param trans_cardobj: 对方卡号对应的卡对象
:return: 转账结果
"""
totalfee = trans_count + trans_count * self.feerate
check_result = self._pay_check(totalfee, passwd)
if check_result == errorcode.NO_ERROR:
# 先扣款,生成消费流水账单
self.card_pay(trans_count, 2, common.create_serialno())
# 扣手续费, 生成消费流水账单
self.card_pay(trans_count * self.feerate, 4, common.create_serialno())
# 给对方卡充值,并写入数据库文件
trans_cardobj.credit_balance += totalfee
trans_cardobj.update_card()
return errorcode.NO_ERROR
else:
return check_result
def load_statement_list(self):
"""
获取要还款的对账单列表数据,仅包含对账单号、还款日、应还款额、已还款额
:return: 对账单列表
"""
# 获取要显示的信息
list_info = dbapi.load_statement_list(self.cardno)
return list_info
def recreate_statement(self):
"""
根据今天的日期将当前卡的对账单重新生成,主要对过了还款日的账单重新生成利息信息
:return:
"""
# 获取当前日期
today = datetime.strptime(date.today().strftime("%Y-%m-%d"), "%Y-%m-%d")
# 获取所有卡的对账单信息
card_statement = dbapi.load_statement_list(self.cardno)
tmp_list = list()
# 如果有记录
if len(card_statement) > 0:
for record in card_statement:
for k, v in record.items():
# 如果已经还款了,将对账单放入临时列表中
if v["isfinished"] == 1:
tmp_list.append(record)
else:
# 还未还款? 获取还款日期
pay_day = datetime.strptime(v["pdate"], "%Y-%m-%d")
# 如果还款日大于当前日期,无利息
day_delta = (today - pay_day).days
if day_delta > 0:
# 过了还款日了,计算利息 = 总费用 * 日息 * 超过天数
interest = v["total"] * settings.EXPIRE_DAY_RATE * day_delta
# 更新利息信息记录
record[k]["interest"] = interest
# 将更新过的记录写入临时列表
tmp_list.append(record)
else:
# 没有过还款日直接写入临时列表
tmp_list.append(record)
# 都处理完了,将更新过的列表写入文件,替换原有信息
dbapi.write_statement_list(self.cardno, tmp_list)
else:
# 此卡没有对账单记录
pass
| gpl-2.0 | -345,904,432,875,530,240 | 33.827434 | 118 | 0.526363 | false | 2.88104 | false | false | false |
parente/clique | Interface.py | 1 | 4948 | '''
Defines Clique interfaces.
@author: Peter Parente <[email protected]>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
from protocols import Interface
class IOption(Interface):
'''
Allows an object to be listed by a L{Chooser} instance.
'''
def GetObject(): pass
def GetName(): pass
class IContext(Interface):
'''Allows access to child objects at a given path.'''
def GetObjectAt(path): pass
class IInteractive(Interface):
'''
Allows activation and deactivation of an object. Defines methods for getting
an object's name and determining if an object has changed.
'''
def Activate(): pass
def Deactivate(): pass
def GetName(override, default): pass
def HasChanged(): pass
class ISeekable(Interface):
'''
Allows seeking to an item in this object given a single character.
'''
BACKWARD, FORWARD = 0, 1
def SeekToItem(pred, direction=FORWARD): pass
class ISearchable(Interface):
'''
Allows searching to an item in this object given a string. Supports
navigation to the next and previous matching item.
'''
def SearchStart(): pass
def SearchForNextMatch(text, current): pass
def SearchForPrevMatch(text, current): pass
def SearchReset(): pass
class ISortable(Interface):
'''
Allows sorting of items based on one or more criteria.
'''
def GetSortName(): pass
def SortNext(): pass
def SortPrev(): pass
class ISelectable(Interface):
'''
Allows the selection of one or all items managed by an object.
'''
def Reselect(): pass
def SelectAllItems(): pass
def UnselectItems(): pass
class IDeletable(Interface):
'''
Allows the deletion of one item managed by an object.
'''
def Delete(): pass
class IDetailable(Interface):
'''
Allows access to additional information about the currently selected item
managed by an object.
'''
def GetFields(): pass
def GetInheritedFields(): pass
class IStrideable(Interface):
'''
Allows variable levels of navigation through items.
'''
def NextLevel(): pass
def PrevLevel(): pass
def GetLevel(): pass
class IInfiniteCollection(Interface):
'''
Allows navigation through items via previous and next commands. Allows access
to the currently selected item and its name.
'''
def GetSelectedName(default=''): pass
def NextItem(): pass
def PrevItem(): pass
class IFiniteCollection(IInfiniteCollection):
'''
Allows navigation to the first item in a bounded collection. Provides methods
for getting the total number of items and the currently selected item's index.
'''
def GetItemCount(): pass
def GetIndex(): pass
def FirstItem(): pass
class IList(IFiniteCollection):
'''
Allows navigation to the last item in a bounded collection.
'''
def LastItem(): pass
class ITree(IFiniteCollection):
'''
Allows access to information about items managed at higher and lower levels.
'''
def GetParentName(default=''): pass
def GetChildCount(): pass
def HasChildren(): pass
def HasParent(): pass
class ILabel(Interface):
'''
Allows read-only access to an entire body of text that can only be retrieved
as one large string.
'''
def __str__(self): pass
def GetAllText(): pass
class IText(Interface):
'''
Allows read-only access to properties of a body of text and navigation by
character, word, and chunk.
'''
BOTH, FROM_START, TO_END = 0, 1, 2
CURR, PREV, NEXT = 0, 2, 4
def GetAllText(): pass
def GetWordCount(all=True): pass
def GetChunkText(which): pass
def GetWordText(which): pass
def GetCharText(which): pass
def NextChunk(skip=False): pass
def PrevChunk(): pass
def NextWord(): pass
def PrevWord(): pass
def PrevChar(): pass
def NextChar(): pass
def IsLastChunk(): pass
def IsFirstChunk(): pass
def MoveXChars(diff): pass
def MoveStart(self): pass
def MoveEnd(self): pass
def MoveStartChunk(self): pass
def MoveEndChunk(self): pass
class IHypertext(IDetailable,IText):
'''
Allows read-only access to extended properties and actions of rich text.
'''
def IsLink(): pass
def FollowLink(): pass
def GetTitle(): pass
class IEditableText(IText):
'''
Allows write access to a body of text with methods to replace all text, insert
a character, delete a character, and insert a new chunk.
'''
def SetText(): pass
def DeleteNext(): pass
def DeletePrev(): pass
def InsertChar(char): pass
def InsertText(text): pass
def InsertChunk(): pass
class ISound(Interface):
'''
Provides a mapping from an object state, action, warn, and identity to a sound
representing it.
'''
def State(name): pass
def Action(name): pass
def Warn(name): pass
def Identity(name=''): pass
| bsd-3-clause | 1,599,633,449,436,640,000 | 25.459893 | 80 | 0.708165 | false | 3.936356 | false | false | false |
JesseScott/PolyglotVancouver-Analysis | util.py | 1 | 1542 | #!/usr/bin/python
# Util file to import in all of the notebooks to allow for easy code re-use
# Calculate Percent of Attendees that did not speak
def percent_silent(df):
total = len(df)
silent = 0
for row in df.iteritems():
if row[1] == 0:
silent = silent + 1
percent = {}
percent['TOTAL'] = total
percent['SILENT'] = silent
percent['VERBOSE'] = total - silent
return percent
# Calculate Percent of Attendees that left
def percent_left(df):
total = len(df)
left = 0
for row in df.iteritems():
if row[1] == 0:
left = left + 1
percent = {}
percent['TOTAL'] = total
percent['LEFT'] = left
percent['STAYED'] = total - left
return percent
# Calculate Percent of Attendees along gender
def percent_gender(df):
total = len(df)
female = 0
for row in df.iteritems():
if row[1] == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = total - female
return percent
# Calculate Percent of Talking points by
def percent_talking_gender(df):
total = 0
male = 0
female = 0
for talks, gender in df.itertuples(index=False):
if talks > 0:
total = total + 1
if gender == 0:
male = male + 1
elif gender == 1:
female = female + 1
percent = {}
percent['TOTAL'] = total
percent['FEMALE'] = female
percent['MALE'] = male
return percent
| gpl-3.0 | -1,612,622,575,943,453,700 | 22.723077 | 75 | 0.56939 | false | 3.628235 | false | false | false |
originaltebas/chmembers | app/familias/forms.py | 1 | 2788 | # app/familias/forms.py
# coding: utf-8
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField, SelectField
from wtforms.validators import InputRequired, Length
class FamiliasForm(FlaskForm):
"""
Formulario para familias
"""
id = HiddenField("id")
id_direccion = HiddenField("idDir")
# Modelo Familia
apellidos_familia = StringField(u'Apellido de la Familia',
validators=[InputRequired(),
Length(min=1, max=60)])
descripcion_familia = StringField(u'Descripción de la Familia',
validators=[InputRequired(),
Length(min=0, max=200)])
telefono_familia = StringField(u'Teléfono de la Familia',
validators=[InputRequired(),
Length(min=0, max=15)])
TipoFamilia = SelectField(u'Tipo de Familia', coerce=int)
submit = SubmitField(u'Aceptar')
class DireccionModalForm(FlaskForm):
# Modelo Direccion
tipo_via = StringField(u'Tipo de vía',
validators=[InputRequired(),
Length(min=1, max=20)])
nombre_via = StringField(u'Nombre de la vía',
validators=[InputRequired(),
Length(min=1, max=100)])
nro_via = StringField(u'Nro',
validators=[InputRequired(),
Length(min=1, max=10)])
portalescalotros_via = StringField(u'Portal/Esc/Otro')
piso_nroletra_via = StringField(u'Nro/Letra del Piso')
cp_via = StringField(u'CP',
validators=[InputRequired(),
Length(min=1, max=10)])
ciudad_via = StringField(u'Ciudad',
validators=[InputRequired(),
Length(min=1, max=50)])
provincia_via = StringField(u'Provincia',
validators=[InputRequired(),
Length(min=1, max=50)])
pais_via = StringField(u'País',
validators=[InputRequired(),
Length(min=1, max=50)])
submit = SubmitField(u'Crear Dirección')
class AsignacionMiembrosForm(FlaskForm):
"""
Formulario para la asignacion de personas a las
ggcc. Las personas tienen que ser miembros creados
"""
ids_in = HiddenField('Ids IN')
ids_out = HiddenField('Ids OUT')
submit = SubmitField(u'Aceptar')
| mit | -5,743,181,648,455,708,000 | 35.093333 | 74 | 0.502876 | false | 4.091176 | false | false | false |
jingsam/tianditu | CheckRoadName.py | 1 | 2300 | # -*- coding: utf-8 -*-
__author__ = '[email protected]'
import os
import arcpy
from parallel import check_parallel
def check_road_name_task(args, cpus, pid):
in_fc = args[0]
fields = args[1]
error_id = "ERR06"
layer = os.path.basename(in_fc)
content = "NAME填写位置正确性检查"
description = "图层【{0}】的ID为【{1}】的要素,【{2}】填写不正确。"
warning = "不忽略"
desc = arcpy.Describe(in_fc)
errors = []
_fields = ["OID@", "SHAPE@XY"] + fields
cursor = arcpy.da.SearchCursor(in_fc, _fields, spatial_reference=desc.spatialReference.GCS)
for row in cursor:
if row[0] % cpus != pid:
continue
all_names = [row[i] for i in xrange(2, len(row))]
names = [name for name in all_names if name]
if len(names) == 0:
continue
if len(set(names)) < len(names):
errors.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}\n'
.format(row[0], error_id, layer, content, description.format(layer, row[0], ';'.join(fields)), row[1][0], row[1][1], warning))
continue
for name in names:
if all_names.index(name) >= len(names):
errors.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}\n'
.format(row[0], error_id, layer, content, description.format(layer, row[0], ';'.join(fields)), row[1][0], row[1][1], warning))
break
del cursor
return ''.join(errors)
def check_road_name(in_fc, fields, out_chk):
if not arcpy.Exists(in_fc):
arcpy.AddIDMessage("ERROR", 110, in_fc)
raise SystemExit()
ext = os.path.splitext(out_chk)[1]
if ext != '.csv':
out_chk += '.csv'
f = open(out_chk, 'w')
f.write('OID, ErrorID, Layer, InspectionContent, Description, X, Y, Warning\n')
# result = check_road_name_task((in_fc, fields), 1, 0)
result = check_parallel(check_road_name_task, (in_fc, fields))
f.write(result)
f.close()
if __name__ == "__main__":
in_fc = arcpy.GetParameterAsText(0)
fields = arcpy.GetParameterAsText(1)
out_chk = arcpy.GetParameterAsText(2)
check_road_name(in_fc, fields.split(';'), out_chk) | mit | -4,269,319,113,325,853,000 | 30.434783 | 152 | 0.54025 | false | 2.997319 | false | false | false |
willowd878/nca47 | nca47/agent/dns_driver/fake_driver.py | 1 | 5969 | from oslo_config import cfg
from oslo_log import log as logging
from nca47.common.i18n import _
from nca47.common.i18n import _LI
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DNS_DRIVER = None
ZONES_AGENT_OPTS = [
cfg.StrOpt('host_ip',
default='0.0.0.0',
help=_('The IP address on which nca47-zdns_driver listens.')),
cfg.PortOpt('port',
default=20120,
help=_('The TCP port on which nca47-zdns_driver listens.')),
cfg.StrOpt('view_id',
default='telecom',
help=_('The TCP view_id on which nca47-zdns_driver listens.')),
cfg.StrOpt('auth_name',
default='admin',
help=_('The TCP auth_name on which nca47-zdns_driver'
'listens.')),
cfg.StrOpt('auth_pw',
default='zdns',
help=_('The TCP auth_pw on which nca47-zdns_driver listens.')),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='zdns',
title='Options for the nca47-zdns_driver service')
CONF.register_group(opt_group)
CONF.register_opts(ZONES_AGENT_OPTS, opt_group)
class fake_dns_driver():
def __init__(self):
self.host = 'https://fake_ip'
self.port = CONF.zdns.port
self.view_id = CONF.zdns.view_id
self.auth_name = CONF.zdns.auth_name
self.auth_pw = CONF.zdns.auth_pw
@classmethod
def get_instance(cls):
global DNS_DRIVER
if not DNS_DRIVER:
DNS_DRIVER = cls()
return DNS_DRIVER
def create_zone(self, context, zone):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones')
LOG.info(_LI("create zones:"+url))
return {" fake create zone": "success"}
def update_zone_owners(self, context, zone, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' +
self.view_id + '/zones/' + zone_id + '/owners')
LOG.info(_LI("update_zone_owners:"+url))
return {"fake update zone owners zone": "success"}
def update_zone(self, context, zone, zone_id):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones/' + zone_id)
LOG.info(_LI("update zones :"+url))
return {"fake update_zone zone": "success"}
def delete_zone(self, context, zone, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id)
LOG.info(_LI("delete zones :" + url))
return {"fake delete_zone zone": "success"}
def create_rrs(self, context, rrs, zone_id):
url = (str(self.host) + ":" + str(self.port) + '/views/' +
self.view_id + '/zones/' + str(zone_id) + '/rrs')
LOG.info(_LI("create rrs:" + url))
res = {
"fake comment": "", "name": "www.baidu.", "type": "A",
"ttl": 1200, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"www.baidu.$1200$A$MTk4LjIwMi4zOC40OA==",
"klass": "IN", "rdata": "198.202.38.48",
"reverse_name": "baidu.www",
"id": "www.baidu.$1200$A$MTk4LjIwMi4zOC40OA==",
"is_shared": ""
}
return res
def update_rrs(self, context, rrs, zone_id, rrs_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs/' + rrs_id)
LOG.info(_LI("update rrs:" + url))
return {"fake id": "update_rrs"}
def delete_rrs(self, context, rrs, zone_id, rrs_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs/' + rrs_id)
LOG.info(_LI("delete rrs :" + url))
return {"fake delete_rss": "success"}
def del_cache(self, context, cache_dic):
url = (self.host + ":" + str(self.port) + '/cache/clean')
LOG.info(_LI("delete cache :" + url))
return {"fake clean cache": "success"}
def get_zone_one(self, context, zone_id):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones/' + zone_id)
LOG.info(_LI("view one zone :" + url))
return {"fake get_zone_one": "success"}
def get_zones(self, context):
url = (self.host + ":" + str(self.port) +
'/views/' + self.view_id + '/zones')
LOG.info(_LI("view all zone :" + url))
return {"fake get_zones": "success"}
def get_rrs(self, context, zone_id):
url = (self.host + ":" + str(self.port) + '/views/' + self.view_id +
'/zones/' + zone_id + '/rrs')
LOG.info(_LI("get_rrs :" + url))
res = {
"total_size": 2, "page_num": 1,
"resources":
[
{
"comment": "", "name": "www.baidu.",
"type": "NS", "ttl": 3600, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"www.baidu.$3600$NS$bnMud3d3LmJhaWR1Lg==",
"klass": "IN", "rdata": "ns.www.baidu.",
"reverse_name": "baidu.www",
"id": "www.baidu.$3600$NS$bnMud3d3LmJhaWR1Lg==",
"is_shared": ""
},
{
"comment": "", "name": "ns.www.baidu.",
"type": "A", "ttl": 3600, "state": "",
"href": "/views/default/zones/www.baidu/rrs/"
"ns.www.baidu.$3600$A$MTI3LjAuMC4x",
"klass": "IN", "rdata": "127.0.0.1",
"reverse_name": "baidu.www.ns",
"id": "ns.www.baidu.$3600$A$MTI3LjAuMC4x",
"is_shared": ""
}
],
"page_size": 2
}
return res
| apache-2.0 | -7,830,155,788,497,134,000 | 38.269737 | 78 | 0.482493 | false | 3.306925 | false | false | false |
wheeler-microfluidics/open-drop | open_drop/proxy.py | 1 | 2754 | try:
from .node import Proxy as _Proxy, I2cProxy as _I2cProxy
class ProxyMixin(object):
'''
Mixin class to add convenience wrappers around methods of the generated
`node.Proxy` class.
For example, expose config and state getters/setters as attributes.
'''
@property
def config(self):
from .config import Config
return Config.FromString(self.serialize_config().tostring())
@config.setter
def config(self, value):
return self.update_config(value)
@property
def state(self):
from .config import State
return State.FromString(self.serialize_state().tostring())
@state.setter
def state(self, value):
return self.update_state(value)
def update_config(self, **kwargs):
from .config import Config
config = Config(**kwargs)
return super(ProxyMixin, self).update_config(config)
def update_state(self, **kwargs):
from .config import State
state = State(**kwargs)
return super(ProxyMixin, self).update_state(state)
def _state_of_channels(self):
return super(ProxyMixin, self).state_of_channels()
@property
def state_of_channels(self):
'''
Retrieve the state bytes from the device and unpacks them into an
array with one entry per channel. Return unpacked array.
Notes
-----
State of each channel is binary, 0 or 1. On device, states are
stored in bytes, where each byte corresponds to the state of eight
channels.
'''
import numpy as np
return np.unpackbits(super(ProxyMixin, self).state_of_channels())
@state_of_channels.setter
def state_of_channels(self, states):
self.set_state_of_channels(states)
def set_state_of_channels(self, states):
'''
Pack array containing one entry per channel to bytes (8 channels
per byte). Set state of channels on device using state bytes.
See also: `state_of_channels` (get)
'''
import numpy as np
ok = (super(ProxyMixin, self)
.set_state_of_channels(np.packbits(states)))
if not ok:
raise ValueError('Error setting state of channels. Check '
'number of states matches channel count.')
class Proxy(ProxyMixin, _Proxy):
pass
class I2cProxy(ProxyMixin, _I2cProxy):
pass
except (ImportError, TypeError):
Proxy = None
I2cProxy = None
| gpl-3.0 | 4,884,305,799,123,094,000 | 29.263736 | 79 | 0.571895 | false | 4.659898 | true | false | false |
sillywilly42/simian | src/tests/simian/mac/common/util_medium_test.py | 1 | 3592 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""util module tests."""
import mox
import stubout
from google.apputils import app
from google.apputils import basetest
from simian.mac.common import util
class UtilModuleTest(mox.MoxTestBase):
def setUp(self):
mox.MoxTestBase.setUp(self)
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
def testSerializeNone(self):
"""Test Serialize()."""
self.assertEqual('null', util.Serialize(None))
def testSerializeUnicode(self):
"""Test Serialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
# javascript uses the same notation as python to represent unicode
# characters.
self.assertEqual(ustr_js, util.Serialize(ustr))
def testDeserializeUnicode(self):
"""Test Deserialize()."""
ustr = u'Hello there\u2014'
ustr_js = '"Hello there\\u2014"'
self.assertEqual(ustr, util.Deserialize(ustr_js))
def _DumpStr(self, s):
"""Return any binary string entirely as escaped characters."""
o = []
for i in xrange(len(s)):
o.append('\\x%02x' % ord(s[i]))
return ''.join(o)
def testSerializeControlChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(0, 31):
input.append(chr(x))
if x == 8:
output.append('\\b')
elif x == 9:
output.append('\\t')
elif x == 10:
output.append('\\n')
elif x == 12:
output.append('\\f')
elif x == 13:
output.append('\\r')
else:
output.append('\\u%04x' % x)
input_str = ''.join(input)
output_str = '"%s"' % ''.join(output)
serialized = util.Serialize(input_str)
self.assertEqual(
output_str,
serialized,
'%s != %s' % (self._DumpStr(output_str), self._DumpStr(serialized)))
def testSerialize8bitChars(self):
"""Test Serialize()."""
input = []
output = []
for x in xrange(128, 256, 1):
input.append(chr(x))
input_str = ''.join(input)
# the json module does not support encoding arbitrary 8 bit bytes.
# the bytes wil get snagged up in a unicode utf-8 decode step.
self.assertRaises(UnicodeDecodeError, util.Serialize, input_str)
def testSerializeFloat(self):
"""Test Serialize()."""
# expected behavior: we can only guarentee this level of precision
# in the unit test because of rounding errors.
#
# GAE's float is capable of 10 digits of precision, and a stock
# python2.6 reports 15 digits from sys.float_info.
input = {'foo': 103.2261}
output = '{"foo": 103.2261}'
self.assertEqual(
output,
util.Serialize(input))
def testDeserializeFloat(self):
"""Test Deserialize()."""
input = '{"foo": 103.2261}'
output = {'foo': 103.2261}
self.assertEqual(
output,
util.Deserialize(input))
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| apache-2.0 | 7,988,811,219,823,179,000 | 25.028986 | 76 | 0.636136 | false | 3.706914 | true | false | false |
f3at/feat | src/feat/agents/base/sender.py | 1 | 8431 | # F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
from feat.agents.base import task, replay, requester
from feat.common import defer, fiber, formatable
from feat.agents.application import feat
from feat.agents.monitor.interface import IClerk, DEFAULT_NOTIFICATION_PERIOD
from feat.agents.monitor.interface import PatientState
from feat.interface.protocols import ProtocolFailed
from feat.interface.recipient import IRecipient
from feat.database.interface import NotFoundError
class AgentMixin(object):
def initiate(self):
desc = self.get_descriptor()
if not hasattr(desc, 'pending_notifications'):
raise ValueError("Agent using this mixin, should have "
"'pending_notification' dictionary field"
"in his descriptor")
@replay.mutable
def startup(self, state):
config = state.medium.get_configuration()
period = config.notification_period
clerk = getattr(state, 'clerk', None)
proto = state.medium.initiate_protocol(NotificationSender,
clerk=clerk,
period=period)
state.notification_sender = proto
@replay.immutable
def has_empty_outbox(self, state):
return state.notification_sender.has_empty_outbox()
@feat.register_restorator
class PendingNotification(formatable.Formatable):
type_name = 'notification'
formatable.field('type', None)
formatable.field('origin', None)
formatable.field('payload', None)
formatable.field('recipient', None)
class NotificationSender(task.StealthPeriodicTask):
protocol_id = 'notification-sender'
@replay.entry_point
def initiate(self, state, clerk=None, period=None):
state.clerk = clerk and IClerk(clerk)
period = period or DEFAULT_NOTIFICATION_PERIOD
# IRecipient -> list of PendingNotifications
return task.StealthPeriodicTask.initiate(self, period)
@replay.immutable
def run(self, state):
defers = list()
for agent_id, notifications in self._iter_outbox():
if not notifications:
continue
if state.clerk and state.clerk.has_patient(agent_id):
status = state.clerk.get_patient(agent_id)
if status.state == PatientState.alive:
defers.append(self.flush_notifications(agent_id))
else:
defers.append(self.flush_notifications(agent_id))
return defer.DeferredList(defers)
@replay.mutable
def flush_notifications(self, state, agent_id):
return self._flush_next(agent_id)
@replay.immutable
def has_empty_outbox(self, state):
desc = state.agent.get_descriptor()
if desc.pending_notifications:
self.debug('Pending notifications keys are: %r',
desc.pending_notifications.keys())
return False
return True
### flushing notifications ###
@replay.mutable
def _flush_next(self, state, agent_id):
notification = self._get_first_pending(agent_id)
if notification:
recp = notification.recipient
f = requester.notify_partner(
state.agent, recp, notification.type,
notification.origin, notification.payload)
f.add_callbacks(fiber.drop_param, self._sending_failed,
cbargs=(self._sending_cb, recp, notification, ),
ebargs=(recp, ))
return f
@replay.mutable
def _sending_cb(self, state, recp, notification):
f = self._remove_notification(recp, notification)
f.add_both(fiber.drop_param, self._flush_next, str(recp.key))
return f
@replay.mutable
def _sending_failed(self, state, fail, recp):
fail.trap(ProtocolFailed)
# check that the document still exists, if not it means that this
# agent got buried
f = state.agent.get_document(recp.key)
f.add_callbacks(self._check_recipient, self._handle_not_found,
ebargs=(recp, ), cbargs=(recp, ))
return f
@replay.journaled
def _handle_not_found(self, state, fail, recp):
fail.trap(NotFoundError)
return self._forget_recipient(recp)
@replay.journaled
def _check_recipient(self, state, desc, recp):
self.log("Descriptor is still there, waiting patiently for the agent.")
new_recp = IRecipient(desc)
if recp != new_recp and new_recp.route is not None:
return self._update_recipient(recp, new_recp)
### methods for handling the list of notifications ###
@replay.journaled
def notify(self, state, notifications):
'''
Call this to schedule sending partner notification.
'''
def do_append(desc, notifications):
for notification in notifications:
if not isinstance(notification, PendingNotification):
raise ValueError("Expected notify() params to be a list "
"of PendingNotification instance, got %r."
% notification)
key = str(notification.recipient.key)
if key not in desc.pending_notifications:
desc.pending_notifications[key] = list()
desc.pending_notifications[key].append(notification)
return state.agent.update_descriptor(do_append, notifications)
@replay.immutable
def _iter_outbox(self, state):
desc = state.agent.get_descriptor()
return desc.pending_notifications.iteritems()
@replay.immutable
def _get_first_pending(self, state, agent_id):
desc = state.agent.get_descriptor()
pending = desc.pending_notifications.get(agent_id, list())
if pending:
return pending[0]
@replay.journaled
def _remove_notification(self, state, recp, notification):
def do_remove(desc, recp, notification):
try:
desc.pending_notifications[recp.key].remove(notification)
if not desc.pending_notifications[recp.key]:
del(desc.pending_notifications[recp.key])
except (ValueError, KeyError, ):
self.warning("Tried to remove notification %r for "
"agent_id %r from %r, but not found",
notification, recp.key,
desc.pending_notifications)
return state.agent.update_descriptor(do_remove, recp, notification)
@replay.journaled
def _forget_recipient(self, state, recp):
def do_remove(desc, recp):
desc.pending_notifications.pop(str(recp.key))
return state.agent.update_descriptor(do_remove, recp)
@replay.journaled
def _update_recipient(self, state, old, new):
old = IRecipient(old)
new = IRecipient(new)
if old.key != new.key:
raise AttributeError("Tried to subsituted recipient %r with %r, "
"the key should be the same!" % (old, new))
def do_update(desc, recp):
if not desc.pending_notifications.get(recp.key, None):
return
for notification in desc.pending_notifications[recp.key]:
notification.recipient = recp
return state.agent.update_descriptor(do_update, new)
| gpl-2.0 | 1,688,419,642,821,006,000 | 36.638393 | 79 | 0.629225 | false | 4.277524 | false | false | false |
mastacheata/tvheadend-xz.bundle | Contents/Code/__init__.py | 1 | 2752 | import htsp
TITLE = 'XZ'
PREFIX = '/video/xz'
ART = 'art-default.jpg'
ICON = 'tvheadend.png'
ICON_LIVE = 'televisions.png'
ICON_REC = 'rec.png'
tvh = None
def Start():
ObjectContainer.art = R(ART)
HTTP.CacheTime = 1
Log.Debug('XZ start')
global tvh
tvh = TVheadend()
ValidatePrefs()
@route(PREFIX + '/validate')
def ValidatePrefs():
if not Prefs['tvheadend-url']:
Log.Error('Please specify a URL to TVheadend in the settings')
return False
if not Prefs['tvheadend-http-port']:
Log.Error('Please specify the TVheadend HTTP port in the settings')
return False
if not Prefs['tvheadend-login']:
Log.Warning('Please specify your TVheadend username in the settings')
login = ''
# return False
else:
login = Prefs['tvheadend-login']
if not Prefs['tvheadend-password']:
Log.Warning('Please specify your TVheadend password in the settings')
password = ''
# return False
else:
password = Prefs['tvheadend-password']
global tvh
tvh.connect(Prefs['tvheadend-url'], int(Prefs['tvheadend-http-port'])+1)
return tvh.login(login, password)
@handler(PREFIX, TITLE, ICON, ART)
def main_menu():
main = ObjectContainer()
main.title1 = 'XZ'
main.no_cache = True
main.header = None
main.message = None
main.add(DirectoryObject(
key=Callback(epg_menu),
title='EPG / Live TV',
thumb=R(ICON_LIVE),
))
main.add(DirectoryObject(
key=Callback(dvr_menu),
title='Rec Timers',
thumb=R(ICON_REC),
))
return main
@route(PREFIX + '/live')
def epg_menu():
global tvh
tvh.get_channel_list()
epg = ObjectContainer(
)
return epg
@route(PREFIX + '/rec')
def dvr_menu():
dvr = ObjectContainer(
)
return dvr
class TVheadend:
tvh = None
channels = {}
channelNumbers = []
def __init__(self):
pass
def connect(self, host, port):
address = (host, port)
self.tvh = htsp.HTSPClient(address, 'TVheadend Plex Client')
def login(self, login, password):
self.tvh.hello()
response = self.tvh.authenticate(login, password)
if 'noaccess' in response:
Log.Error('Authentication with TVheadend server failed')
return False
else:
return True
def get_channel_list(self):
self.tvh.send('enableAsyncMetadata')
while True:
msg = self.tvh.recv()
if 'error' in msg:
Log.Error(msg['error'])
raise Exception(msg['Error'])
elif 'method' in msg:
Log.Info(msg)
return msg['method']
| lgpl-3.0 | -8,167,687,725,349,745,000 | 21.743802 | 77 | 0.587573 | false | 3.501272 | false | false | false |
praekeltfoundation/ndoh-hub | registrations/tasks.py | 1 | 60994 | import json
import random
import re
import uuid
from datetime import datetime, timedelta
from functools import partial
import phonenumbers
import requests
from celery import chain
from celery.exceptions import SoftTimeLimitExceeded
from celery.task import Task
from celery.utils.log import get_task_logger
from demands import HTTPServiceError
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone, translation
from requests.exceptions import ConnectionError, HTTPError, RequestException
from seed_services_client.identity_store import IdentityStoreApiClient
from seed_services_client.service_rating import ServiceRatingApiClient
from temba_client.exceptions import TembaHttpError
from wabclient.exceptions import AddressException
from ndoh_hub import utils
from ndoh_hub.celery import app
from ndoh_hub.utils import rapidpro, redis
from .models import ClinicCode, JembiSubmission, Registration, Source, WhatsAppContact
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
is_client = IdentityStoreApiClient(
api_url=settings.IDENTITY_STORE_URL, auth_token=settings.IDENTITY_STORE_TOKEN
)
sr_client = ServiceRatingApiClient(
api_url=settings.SERVICE_RATING_URL, auth_token=settings.SERVICE_RATING_TOKEN
)
def get_risk_status(reg_type, mom_dob, edd):
""" Determine the risk level of the mother """
# high risk if postbirth registration
if "postbirth" in reg_type:
return "high"
# high risk if age < 18
age = utils.get_mom_age(utils.get_today(), mom_dob)
if age < 18:
return "high"
# high risk if registering after 20 weeks pregnant
weeks = utils.get_pregnancy_week(utils.get_today(), edd)
if weeks >= 20:
return "high"
# otherwise normal risk
return "normal"
class HTTPRetryMixin(object):
"""
A mixin for exponential delay retries on retriable http errors
"""
max_retries = 10
delay_factor = 1
jitter_percentage = 0.25
def on_failure(self, exc, task_id, args, kwargs, einfo):
delay = (2 ** self.request.retries) * self.delay_factor
delay *= 1 + (random.random() * self.jitter_percentage)
if (
isinstance(exc, HTTPError)
and self.request.retries < self.max_retries
and 500 <= exc.response.status_code < 600
):
raise self.retry(countdown=delay, exc=exc)
if isinstance(exc, ConnectionError) and self.request.retries < self.max_retries:
raise self.retry(countdown=delay, exc=exc)
class ValidateSubscribe(Task):
""" Task to validate a registration model entry's registration
data.
"""
name = "ndoh_hub.registrations.tasks.validate_subscribe"
log = get_task_logger(__name__)
# Validation checks
def check_lang(self, data_fields, registration):
if "language" not in data_fields:
return ["Language is missing from data"]
elif not utils.is_valid_lang(registration.data["language"]):
return ["Language not a valid option"]
else:
return []
def check_mom_dob(self, data_fields, registration):
if "mom_dob" not in data_fields:
return ["Mother DOB missing"]
elif not utils.is_valid_date(registration.data["mom_dob"]):
return ["Mother DOB invalid"]
else:
return []
def check_edd(self, data_fields, registration):
if "edd" not in data_fields:
return ["Estimated Due Date missing"]
elif not utils.is_valid_edd(registration.data["edd"]):
return ["Estimated Due Date invalid"]
else:
return []
def check_baby_dob(self, data_fields, registration):
if "baby_dob" not in data_fields:
return ["Baby Date of Birth missing"]
elif not utils.is_valid_date(registration.data["baby_dob"]):
return ["Baby Date of Birth invalid"]
else:
return []
def check_operator_id(self, data_fields, registration):
if "operator_id" not in data_fields:
return ["Operator ID missing"]
elif not utils.is_valid_uuid(registration.data["operator_id"]):
return ["Operator ID invalid"]
else:
return []
def check_msisdn_registrant(self, data_fields, registration):
if "msisdn_registrant" not in data_fields:
return ["MSISDN of Registrant missing"]
elif not utils.is_valid_msisdn(registration.data["msisdn_registrant"]):
return ["MSISDN of Registrant invalid"]
else:
return []
def check_msisdn_device(self, data_fields, registration):
if "msisdn_device" not in data_fields:
return ["MSISDN of device missing"]
elif not utils.is_valid_msisdn(registration.data["msisdn_device"]):
return ["MSISDN of device invalid"]
else:
return []
def check_faccode(self, data_fields, registration):
if "faccode" not in data_fields:
return ["Facility (clinic) code missing"]
elif not utils.is_valid_faccode(registration.data["faccode"]):
return ["Facility code invalid"]
else:
return []
def check_consent(self, data_fields, registration):
if "consent" not in data_fields:
return ["Consent is missing"]
elif registration.data["consent"] is not True:
return ["Cannot continue without consent"]
else:
return []
def check_sa_id_no(self, data_fields, registration):
if "sa_id_no" not in data_fields:
return ["SA ID number missing"]
elif not utils.is_valid_sa_id_no(registration.data["sa_id_no"]):
return ["SA ID number invalid"]
else:
return []
def check_passport_no(self, data_fields, registration):
if "passport_no" not in data_fields:
return ["Passport number missing"]
elif not utils.is_valid_passport_no(registration.data["passport_no"]):
return ["Passport number invalid"]
else:
return []
def check_passport_origin(self, data_fields, registration):
if "passport_origin" not in data_fields:
return ["Passport origin missing"]
elif not utils.is_valid_passport_origin(registration.data["passport_origin"]):
return ["Passport origin invalid"]
else:
return []
def check_id(self, data_fields, registration):
if "id_type" not in data_fields:
return ["ID type missing"]
elif not utils.is_valid_id_type(registration.data["id_type"]):
return ["ID type should be one of {}".format(utils.ID_TYPES)]
else:
id_errors = []
if registration.data["id_type"] == "sa_id":
id_errors += self.check_sa_id_no(data_fields, registration)
id_errors += self.check_mom_dob(data_fields, registration)
elif registration.data["id_type"] == "passport":
id_errors += self.check_passport_no(data_fields, registration)
id_errors += self.check_passport_origin(data_fields, registration)
elif registration.data["id_type"] == "none":
id_errors += self.check_mom_dob(data_fields, registration)
return id_errors
# Validate
def validate(self, registration):
""" Validates that all the required info is provided for a
registration.
"""
self.log.info("Starting registration validation")
validation_errors = []
# Check if registrant_id is a valid UUID
if not utils.is_valid_uuid(registration.registrant_id):
validation_errors += ["Invalid UUID registrant_id"]
# Check that required fields are provided and valid
data_fields = registration.data.keys()
if "pmtct_prebirth" in registration.reg_type:
validation_errors += self.check_lang(data_fields, registration)
validation_errors += self.check_mom_dob(data_fields, registration)
validation_errors += self.check_edd(data_fields, registration)
validation_errors += self.check_operator_id(data_fields, registration)
elif "pmtct_postbirth" in registration.reg_type:
validation_errors += self.check_lang(data_fields, registration)
validation_errors += self.check_mom_dob(data_fields, registration)
validation_errors += self.check_baby_dob(data_fields, registration)
validation_errors += self.check_operator_id(data_fields, registration)
elif "nurseconnect" in registration.reg_type:
validation_errors += self.check_faccode(data_fields, registration)
validation_errors += self.check_operator_id(data_fields, registration)
validation_errors += self.check_msisdn_registrant(data_fields, registration)
validation_errors += self.check_msisdn_device(data_fields, registration)
validation_errors += self.check_lang(data_fields, registration)
elif registration.reg_type in ["momconnect_prebirth", "whatsapp_prebirth"]:
# Checks that apply to clinic, chw, public
validation_errors += self.check_operator_id(data_fields, registration)
validation_errors += self.check_msisdn_registrant(data_fields, registration)
validation_errors += self.check_msisdn_device(data_fields, registration)
validation_errors += self.check_lang(data_fields, registration)
validation_errors += self.check_consent(data_fields, registration)
# Checks that apply to clinic, chw
if registration.source.authority in ["hw_full", "hw_partial"]:
validation_errors += self.check_id(data_fields, registration)
# Checks that apply to clinic only
if registration.source.authority == "hw_full":
validation_errors += self.check_edd(data_fields, registration)
validation_errors += self.check_faccode(data_fields, registration)
elif registration.reg_type in ("momconnect_postbirth", "whatsapp_postbirth"):
if registration.source.authority == "hw_full":
validation_errors += self.check_operator_id(data_fields, registration)
validation_errors += self.check_msisdn_registrant(
data_fields, registration
)
validation_errors += self.check_msisdn_device(data_fields, registration)
validation_errors += self.check_lang(data_fields, registration)
validation_errors += self.check_consent(data_fields, registration)
validation_errors += self.check_id(data_fields, registration)
validation_errors += self.check_baby_dob(data_fields, registration)
validation_errors += self.check_faccode(data_fields, registration)
else:
validation_errors += [
"Momconnect postbirth not yet supported for public or CHW"
]
elif registration.reg_type == "loss_general":
validation_errors.append("Loss general not yet supported")
# Evaluate if there were any problems, save and return
if len(validation_errors) == 0:
self.log.info(
"Registration validated successfully - updating " "registration object"
)
registration.validated = True
registration.save()
self.log.info("Registration object updated.")
return True
else:
self.log.info(
"Registration validation failed - updating " "registration object"
)
registration.data["invalid_fields"] = validation_errors
registration.save()
self.log.info("Registration object updated.")
return False
def create_popi_subscriptionrequest(self, registration):
"""
Creates a new subscription request for the POPI message set. This
message set tells the user how to access the POPI required services.
This should only be sent for Clinic or CHW registrations.
"""
if registration.reg_type not in (
"momconnect_prebirth",
"momconnect_postbirth",
"whatsapp_prebirth",
"whatsapp_postbirth",
) or registration.source.authority not in ["hw_partial", "hw_full"]:
return "POPI Subscription request not created"
self.log.info("Fetching messageset")
r = ""
msgset_id, msgset_schedule, next_sequence_number = r
self.log.info("Creating subscription request")
from .models import SubscriptionRequest
SubscriptionRequest.objects.create(
identity=registration.registrant_id,
messageset=msgset_id,
next_sequence_number=next_sequence_number,
lang=registration.data["language"],
schedule=msgset_schedule,
)
self.log.info("POPI Subscription request created")
return "POPI Subscription Request created"
def create_service_info_subscriptionrequest(self, registration):
"""
Creates a new subscription request for the service info message set.
This should only be created for momconnect whatsapp registrations.
"""
if registration.reg_type not in (
"whatsapp_prebirth",
"whatsapp_postbirth",
) or registration.source.authority in ["hw_partial", "patient"]:
return
self.log.info("Fetching messageset")
if registration.reg_type == "whatsapp_prebirth":
weeks = utils.get_pregnancy_week(
utils.get_today(), registration.data["edd"]
)
else:
weeks = (
utils.get_baby_age(utils.get_today(), registration.data["baby_dob"])
+ 40
)
msgset_short_name = utils.get_messageset_short_name(
"whatsapp_service_info", registration.source.authority, weeks
)
r = utils.get_messageset_schedule_sequence(msgset_short_name, weeks)
msgset_id, msgset_schedule, next_sequence_number = r
self.log.info("Creating subscription request")
from .models import SubscriptionRequest
SubscriptionRequest.objects.create(
identity=registration.registrant_id,
messageset=msgset_id,
next_sequence_number=next_sequence_number,
lang=registration.data["language"],
schedule=msgset_schedule,
)
self.log.info("Service Info Subscription request created")
# Create SubscriptionRequest
def create_subscriptionrequests(self, registration):
""" Create SubscriptionRequest(s) based on the
validated registration.
"""
self.log.info("Starting subscriptionrequest creation")
self.log.info("Calculating weeks")
weeks = 1 # default week number
# . calculate weeks along
if registration.reg_type in (
"momconnect_prebirth",
"whatsapp_prebirth",
) and registration.source.authority not in ["hw_partial", "patient"]:
weeks = utils.get_pregnancy_week(
utils.get_today(), registration.data["edd"]
)
elif "pmtct_prebirth" in registration.reg_type:
weeks = utils.get_pregnancy_week(
utils.get_today(), registration.data["edd"]
)
elif "pmtct_postbirth" in registration.reg_type:
weeks = utils.get_baby_age(utils.get_today(), registration.data["baby_dob"])
elif (
registration.reg_type in ("momconnect_postbirth", "whatsapp_postbirth")
and registration.source.authority == "hw_full"
):
weeks = utils.get_baby_age(utils.get_today(), registration.data["baby_dob"])
# . determine messageset shortname
self.log.info("Determining messageset shortname")
short_name = utils.get_messageset_short_name(
registration.reg_type, registration.source.authority, weeks
)
# . determine sbm details
self.log.info("Determining SBM details")
r = utils.get_messageset_schedule_sequence(short_name, weeks)
msgset_id, msgset_schedule, next_sequence_number = r
subscription = {
"identity": registration.registrant_id,
"messageset": msgset_id,
"next_sequence_number": next_sequence_number,
"lang": registration.data["language"],
"schedule": msgset_schedule,
}
self.log.info("Creating SubscriptionRequest object")
from .models import SubscriptionRequest
SubscriptionRequest.objects.create(**subscription)
self.log.info("SubscriptionRequest created")
return "SubscriptionRequest created"
# Create ServiceRating Invite
def create_servicerating_invite(self, registration):
""" Create a new servicerating invite
"""
invite_data = {
"identity": registration.registrant_id
# could provide "invite" to override servicerating defaults
}
self.log.info("Creating ServiceRating invite")
response = sr_client.create_invite(invite_data)
self.log.info("Created ServiceRating invite")
return response
# Set risk status
def set_risk_status(self, registration):
""" Determine the risk status of the mother and save it to her identity
"""
self.log.info("Calculating risk level")
risk = get_risk_status(
registration.reg_type,
registration.data["mom_dob"],
registration.data["edd"],
)
self.log.info("Reading the identity")
identity = is_client.get_identity(registration.registrant_id)
details = identity["details"]
if "pmtct" in details:
details["pmtct"]["risk_status"] = risk
else:
details["pmtct"] = {"risk_status": risk}
self.log.info("Saving risk level to the identity")
is_client.update_identity(registration.registrant_id, {"details": details})
self.log.info("Identity updated with risk level")
return risk
def opt_in_identity(self, registration):
"""
Opts in the identity if they've previously opted out
"""
try:
msisdn = registration.data["msisdn_registrant"]
except KeyError:
return
opt_in_identity.delay(
registration.registrant_id, msisdn, registration.source_id
)
def send_welcome_message(self, registration):
"""
If this is a prebirth momconnect registration, send the welcome message
"""
if registration.reg_type not in ("momconnect_prebirth", "whatsapp_prebirth"):
return
if registration.source.authority != "hw_full":
# Only clinic registrations should get this message
return
try:
msisdn = registration.data["msisdn_registrant"]
language = registration.data["language"]
except KeyError:
return
send_welcome_message.delay(
language=language,
channel="WHATSAPP" if "whatsapp" in registration.reg_type else "JUNE_TEXT",
msisdn=msisdn,
identity_id=registration.registrant_id,
)
# Run
def run(self, registration_id, **kwargs):
""" Sets the registration's validated field to True if
validation is successful.
"""
self.log = self.get_logger(**kwargs)
self.log.info("Looking up the registration")
from .models import Registration
registration = Registration.objects.get(id=registration_id)
if registration.reg_type == "jembi_momconnect":
# We do this validation in it's own task
return
reg_validates = self.validate(registration)
if reg_validates:
self.create_subscriptionrequests(registration)
self.create_popi_subscriptionrequest(registration)
self.create_service_info_subscriptionrequest(registration)
self.opt_in_identity(registration)
self.send_welcome_message(registration)
# NOTE: disable service rating for now
# if registration.reg_type == "momconnect_prebirth" and\
# registration.source.authority == "hw_full":
# self.create_servicerating_invite(registration)
if "pmtct" in registration.reg_type:
self.set_risk_status(registration)
self.log.info("Scheduling registration push to Jembi")
jembi_task = BasePushRegistrationToJembi.get_jembi_task_for_registration(
registration
)
task = chain(
jembi_task.si(str(registration.pk)),
remove_personally_identifiable_fields.si(str(registration.pk)),
)
task.delay()
self.log.info("Task executed successfully")
return True
else:
self.log.info("Task terminated due to validation issues")
return False
validate_subscribe = ValidateSubscribe()
@app.task()
def remove_personally_identifiable_fields(registration_id):
"""
Saves the personally identifiable fields to the identity, and then
removes them from the registration object.
"""
registration = Registration.objects.get(id=registration_id)
fields = set(
(
"id_type",
"mom_dob",
"passport_no",
"passport_origin",
"sa_id_no",
"language",
"consent",
"mom_given_name",
"mom_family_name",
"mom_email",
)
).intersection(registration.data.keys())
if fields:
identity = is_client.get_identity(registration.registrant_id)
for field in fields:
# Language is stored as 'lang_code' in the Identity Store
if field == "language":
identity["details"]["lang_code"] = registration.data.pop(field)
continue
identity["details"][field] = registration.data.pop(field)
is_client.update_identity(identity["id"], {"details": identity["details"]})
msisdn_fields = set(("msisdn_device", "msisdn_registrant")).intersection(
registration.data.keys()
)
for field in msisdn_fields:
msisdn = registration.data.pop(field)
identities = is_client.get_identity_by_address("msisdn", msisdn)
try:
field_identity = next(identities["results"])
except StopIteration:
field_identity = is_client.create_identity(
{"details": {"addresses": {"msisdn": {msisdn: {}}}}}
)
field = field.replace("msisdn", "uuid")
registration.data[field] = field_identity["id"]
registration.save()
def add_personally_identifiable_fields(registration):
"""
Sometimes we might want to rerun the validation and subscription, and for
that we want to put back any fields that we placed on the identity when
anonymising the registration.
This function just adds those fields to the 'registration' object, it
doesn't save those fields to the database.
"""
identity = is_client.get_identity(registration.registrant_id)
if not identity:
return registration
fields = (
set(
(
"id_type",
"mom_dob",
"passport_no",
"passport_origin",
"sa_id_no",
"lang_code",
"consent",
"mom_given_name",
"mom_family_name",
"mom_email",
)
)
.intersection(identity["details"].keys())
.difference(registration.data.keys())
)
for field in fields:
if field == "lang_code":
registration.data["language"] = identity["details"][field]
continue
registration.data[field] = identity["details"][field]
uuid_fields = set(("uuid_device", "uuid_registrant")).intersection(
registration.data.keys()
)
for field in uuid_fields:
msisdn = utils.get_identity_msisdn(registration.data[field])
if msisdn:
field = field.replace("uuid", "msisdn")
registration.data[field] = msisdn
return registration
class ValidateSubscribeJembiAppRegistration(HTTPRetryMixin, ValidateSubscribe):
"""
Validates and creates subscriptions for registrations coming from the
Jembi application.
"""
def is_primary_address(self, addr_type, address, identity):
"""
Returns whether `address` is the primary address for `identity`
Arguments:
addr_type {string} -- The type of address to check for
address {string} -- The address to check for
identity {dict} -- The identity that has addresses to check
Returns:
A bool which is `True` when the address is the identity's primary
address.
"""
return all(
map(
lambda addr: address == addr[0] or not addr[1].get("default"),
identity.get("details", {})
.get("addresses", {})
.get(addr_type, {})
.items(),
)
)
def get_or_update_identity_by_address(self, address):
"""
Gets the first identity with the given primary address, or if no
identity exists, creates an identity with the given address
Arguments:
address {string} -- The MSISDN to search for
Returns:
A dict representing the identity for `address`
"""
identities = filter(
partial(self.is_primary_address, "msisdn", address),
is_client.get_identity_by_address("msisdn", address)["results"],
)
try:
return next(identities)
except StopIteration:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {address: {"default": True}}},
}
}
return is_client.create_identity(identity)
def is_opted_out(self, identity, address):
"""
Returns whether or not an address on an identity is opted out
"""
addr_details = identity["details"]["addresses"]["msisdn"][address]
return "optedout" in addr_details and addr_details["optedout"] is True
def opt_in(self, identity, address, source):
"""
Opts in a previously opted out identity
"""
optin = {
"identity": identity["id"],
"address_type": "msisdn",
"address": address,
"request_source": source.name,
"requestor_source_id": source.id,
}
return is_client.create_optin(optin)
def fail_validation(self, registration, reason):
"""
Validation for the registration has failed
"""
registration.data["invalid_fields"] = reason
registration.save()
return self.send_webhook(registration)
def fail_error(self, registration, reason):
"""
Uncaught error that caused the registration to fail
"""
registration.data["error_data"] = reason
registration.save()
return self.send_webhook(registration)
def registration_success(self, registration):
"""
Registration has been successfully processed
"""
return self.send_webhook(registration)
def send_webhook(self, registration):
"""
Sends a webhook if one is specified for the given registration
Also sends the status over websocket
"""
url = registration.data.get("callback_url", None)
token = registration.data.get("callback_auth_token", None)
headers = {}
if token is not None:
headers["Authorization"] = "Bearer {}".format(token)
if url is not None:
http_request_with_retries.delay(
method="POST", url=url, headers=headers, payload=registration.status
)
def is_registered_on_whatsapp(self, address):
"""
Returns whether or not the number is recognised on wassup
"""
r = requests.post(
urljoin(settings.ENGAGE_URL, "v1/contacts"),
json={"blocking": "wait", "contacts": [address]},
headers={"Authorization": "Bearer {}".format(settings.ENGAGE_TOKEN)},
)
r.raise_for_status()
data = r.json()
existing = filter(lambda d: d.get("status", False) == "valid", data["contacts"])
return any(existing)
def create_pmtct_registration(self, registration, operator):
if "whatsapp" in registration.reg_type:
reg_type = "whatsapp_pmtct_prebirth"
else:
reg_type = "pmtct_prebirth"
data = {
"language": registration.data["language"],
"mom_dob": registration.data["mom_dob"],
"edd": registration.data["edd"],
"operator_id": operator["id"],
}
Registration.objects.create(
reg_type=reg_type,
registrant_id=registration.registrant_id,
source=registration.source,
created_by=registration.created_by,
data=data,
)
def is_identity_subscribed(self, identity, regex):
"""
Checks to see if the identity is subscribed to the specified
messageset. Check is done on the short name of the messageset matching
the given regular expression
"""
active_subs = utils.sbm_client.get_subscriptions(
{"identity": identity["id"], "active": True}
)["results"]
messagesets = utils.sbm_client.get_messagesets()["results"]
messagesets = {ms["id"]: ms["short_name"] for ms in messagesets}
for sub in active_subs:
short_name = messagesets[sub["messageset"]]
if re.search(regex, short_name):
return True
return False
def is_valid_clinic_code(self, code):
"""
Checks to see if the specified clinic code is recognised or not
"""
return ClinicCode.objects.filter(code=code).exists()
def run(self, registration_id, **kwargs):
registration = Registration.objects.get(id=registration_id)
msisdn_registrant = registration.data["msisdn_registrant"]
registrant = self.get_or_update_identity_by_address(msisdn_registrant)
device = self.get_or_update_identity_by_address(
registration.data["msisdn_device"]
)
registration.registrant_id = registrant["id"]
# Check for existing subscriptions
if self.is_identity_subscribed(registrant, r"prebirth\.hw_full"):
self.fail_validation(
registration,
{
"mom_msisdn": "Number is already subscribed to MomConnect "
"messaging"
},
)
return
# Check for previously opted out
if self.is_opted_out(registrant, msisdn_registrant):
if registration.data["mom_opt_in"]:
self.opt_in(registrant, msisdn_registrant, registration.source)
else:
self.fail_validation(
registration,
{
"mom_opt_in": "Mother has previously opted out and has "
"not chosen to opt back in again"
},
)
return
# Determine WhatsApp vs SMS registration
registration.data["registered_on_whatsapp"] = self.is_registered_on_whatsapp(
msisdn_registrant
)
if (
registration.data["mom_whatsapp"]
and registration.data["registered_on_whatsapp"]
):
registration.reg_type = "whatsapp_prebirth"
else:
registration.reg_type = "momconnect_prebirth"
# Check clinic code
if not self.is_valid_clinic_code(registration.data["faccode"]):
self.fail_validation(
registration, {"clinic_code": "Not a recognised clinic code"}
)
return
registration.validated = True
registration.save()
# Create subscriptions
self.create_subscriptionrequests(registration)
self.create_popi_subscriptionrequest(registration)
self.create_service_info_subscriptionrequest(registration)
# Send welcome message
send_welcome_message(
language=registration.data["language"],
channel="WHATSAPP" if "whatsapp" in registration.reg_type else "JUNE_TEXT",
msisdn=msisdn_registrant,
identity_id=registration.registrant_id,
)
# Push to Jembi and remove personally identifiable information
jembi_task = BasePushRegistrationToJembi.get_jembi_task_for_registration(
registration
)
task = chain(
jembi_task.si(str(registration.pk)),
remove_personally_identifiable_fields.si(str(registration.pk)),
)
task.delay()
# Create PMTCT registration if required
if registration.data["mom_pmtct"]:
self.create_pmtct_registration(registration, device)
# Send success webhook
self.registration_success(registration)
def on_failure(self, exc, task_id, args, kwargs, einfo):
super(ValidateSubscribeJembiAppRegistration, self).on_failure(
exc, task_id, args, kwargs, einfo
)
# Send failure webhook
registration_id = kwargs.get("registration_id", None) or args[0]
registration = Registration.objects.get(id=registration_id)
self.fail_error(
registration,
{
"type": einfo.type.__name__,
"message": str(exc),
"traceback": einfo.traceback,
},
)
validate_subscribe_jembi_app_registration = ValidateSubscribeJembiAppRegistration()
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded, TembaHttpError),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def submit_jembi_registration_to_rapidpro(data):
rapidpro.create_flow_start(
settings.RAPIDPRO_JEMBI_REGISTRATION_FLOW,
urns=[f"whatsapp:{data['msisdn_registrant'].strip('+')}"],
extra=data,
)
class BasePushRegistrationToJembi(object):
"""
Base class that contains helper functions for pushing registration data
to Jembi.
"""
name = "ndoh_hub.registrations.tasks.base_push_registration_to_jembi"
log = get_task_logger(__name__)
def get_patient_id(
self, id_type, id_no=None, passport_origin=None, mom_msisdn=None
):
if id_type == "sa_id":
return id_no + "^^^ZAF^NI"
elif id_type == "passport":
return id_no + "^^^" + passport_origin.upper() + "^PPN"
elif mom_msisdn:
return mom_msisdn.replace("+", "") + "^^^ZAF^TEL"
def get_dob(self, mom_dob):
if mom_dob is not None:
return mom_dob.strftime("%Y%m%d")
else:
return None
def get_today(self):
return datetime.today()
def get_timestamp(self, registration):
return registration.created_at.strftime("%Y%m%d%H%M%S")
@staticmethod
def get_jembi_task_for_registration(registration):
"""
NOTE: this is a convenience method for getting the relevant
Jembi task to fire for a registration.
"""
if "nurseconnect" in registration.reg_type:
return push_nurse_registration_to_jembi
if "pmtct" in registration.reg_type:
return push_pmtct_registration_to_jembi
return push_registration_to_jembi
@staticmethod
def get_authority_from_source(source):
"""
NOTE: this is a convenience method to map the new "source"
back to ndoh-control's "authority" fields to maintain
backwards compatibility with existing APIs
"""
source_name = source.name.upper()
if source_name.startswith("EXTERNAL CHW"):
# catch all external chw sources
return "chw"
elif source_name.startswith("EXTERNAL CLINIC"):
# catch all external clinic sources
return "clinic"
else:
return {
"PUBLIC USSD APP": "personal",
"OPTOUT USSD APP": "optout",
"CLINIC USSD APP": "clinic",
"CHW USSD APP": "chw",
"NURSE USSD APP": "nurse",
"PMTCT USSD APP": "pmtct",
"PUBLIC WHATSAPP APP": "personal",
"CLINIC WHATSAPP APP": "clinic",
}.get(source_name)
def run(self, registration_id, **kwargs):
from .models import Registration
registration = Registration.objects.get(pk=registration_id)
authority = self.get_authority_from_source(registration.source)
if authority is None:
self.log.error(
"Unable to establish authority for source %s. Skipping."
% (registration.source)
)
return
json_doc = self.build_jembi_json(registration)
request_to_jembi_api.delay(self.URL, json_doc)
class PushRegistrationToJembi(BasePushRegistrationToJembi, Task):
""" Task to push registration data to Jembi
"""
name = "ndoh_hub.registrations.tasks.push_registration_to_jembi"
log = get_task_logger(__name__)
URL = "subscription"
def get_subscription_type(self, authority):
authority_map = {
"personal": 1,
"chw": 2,
"clinic": 3,
"optout": 4,
# NOTE: these are other valid values recognised by Jembi but
# currently not used by us.
# 'babyloss': 5,
# 'servicerating': 6,
# 'helpdesk': 7,
"pmtct": 9,
}
return authority_map[authority]
def get_software_type(self, registration):
""" Get the software type (swt) code Jembi expects """
if registration.data.get("swt", None):
return registration.data.get("swt")
if "whatsapp" in registration.reg_type:
registration.data["swt"] = 7 # USSD4WHATSAPP
registration.save()
return 7
return 1 # Default 1
def transform_language_code(self, lang):
return {
"zul_ZA": "zu",
"xho_ZA": "xh",
"afr_ZA": "af",
"eng_ZA": "en",
"nso_ZA": "nso",
"tsn_ZA": "tn",
"sot_ZA": "st",
"tso_ZA": "ts",
"ssw_ZA": "ss",
"ven_ZA": "ve",
"nbl_ZA": "nr",
}[lang]
def build_jembi_json(self, registration):
""" Compile json to be sent to Jembi. """
self.log.info("Compiling Jembi Json data for PushRegistrationToJembi")
authority = self.get_authority_from_source(registration.source)
id_msisdn = None
if not registration.data.get("msisdn_registrant"):
id_msisdn = utils.get_identity_msisdn(registration.registrant_id)
json_template = {
"mha": registration.data.get("mha", 1),
"swt": self.get_software_type(registration),
"dmsisdn": registration.data.get("msisdn_device"),
"cmsisdn": registration.data.get("msisdn_registrant", id_msisdn),
"id": self.get_patient_id(
registration.data.get("id_type"),
(
registration.data.get("sa_id_no")
if registration.data.get("id_type") == "sa_id"
else registration.data.get("passport_no")
),
# passport_origin may be None if sa_id is used
registration.data.get("passport_origin"),
registration.data.get("msisdn_registrant", id_msisdn),
),
"type": self.get_subscription_type(authority),
"lang": self.transform_language_code(registration.data["language"]),
"encdate": registration.data.get(
"encdate", self.get_timestamp(registration)
),
"faccode": registration.data.get("faccode"),
"dob": (
self.get_dob(
datetime.strptime(registration.data["mom_dob"], "%Y-%m-%d")
)
if registration.data.get("mom_dob")
else None
),
"sid": str(registration.registrant_id),
"eid": str(registration.id),
}
# Self registrations on all lines should use cmsisdn as dmsisdn too
if registration.data.get("msisdn_device") is None:
json_template["dmsisdn"] = registration.data.get(
"msisdn_registrant", id_msisdn
)
if authority == "clinic":
json_template["edd"] = datetime.strptime(
registration.data["edd"], "%Y-%m-%d"
).strftime("%Y%m%d")
return json_template
push_registration_to_jembi = PushRegistrationToJembi()
class PushPmtctRegistrationToJembi(PushRegistrationToJembi, Task):
""" Task to push PMTCT registration data to Jembi
"""
name = "ndoh_hub.registrations.tasks.push_pmtct_registration_to_jembi"
URL = "pmtctSubscription"
def build_jembi_json(self, registration):
json_template = super(PushPmtctRegistrationToJembi, self).build_jembi_json(
registration
)
json_template["risk_status"] = get_risk_status(
registration.reg_type,
registration.data["mom_dob"],
registration.data["edd"],
)
if not json_template.get("faccode"):
related_reg = (
Registration.objects.filter(
validated=True,
registrant_id=registration.registrant_id,
data__has_key="faccode",
)
.exclude(
reg_type__in=(
"whatsapp_pmtct_prebirth",
"pmtct_prebirth",
"whatsapp_pmtct_postbirth",
"pmtct_postbirth",
)
)
.order_by("-created_at")
.first()
)
if related_reg:
json_template["faccode"] = related_reg.data["faccode"]
return json_template
push_pmtct_registration_to_jembi = PushPmtctRegistrationToJembi()
class PushNurseRegistrationToJembi(BasePushRegistrationToJembi, Task):
name = "ndoh_hub.registrations.tasks.push_nurse_registration_to_jembi"
log = get_task_logger(__name__)
URL = "nc/subscription"
def get_persal(self, identity):
details = identity["details"]
return details.get("nurseconnect", {}).get("persal_no")
def get_sanc(self, identity):
details = identity["details"]
return details.get("nurseconnect", {}).get("sanc_reg_no")
def get_software_type(self, registration):
""" Get the software type (swt) code Jembi expects """
if registration.data.get("swt", None):
return registration.data.get("swt")
if "whatsapp" in registration.reg_type:
registration.data["swt"] = 7 # USSD4WHATSAPP
registration.save(update_fields=("data",))
return 7
return 3 # Default 3
def build_jembi_json(self, registration):
"""
Compiles and returns a dictionary representing the JSON that should
be sent to Jembi for the given registration.
"""
self.log.info("Compiling Jembi Json data for PushNurseRegistrationToJembi")
identity = is_client.get_identity(registration.registrant_id)
json_template = {
"mha": 1,
"swt": self.get_software_type(registration),
"type": 7,
"dmsisdn": registration.data["msisdn_device"],
"cmsisdn": registration.data["msisdn_registrant"],
# NOTE: this likely needs to be updated to reflect a change
# in msisdn as `rmsisdn` stands for replacement msisdn
"rmsisdn": None,
"faccode": registration.data["faccode"],
"id": self.get_patient_id(
registration.data.get("id_type"),
(
registration.data.get("sa_id_no")
if registration.data.get("id_type") == "sa_id"
else registration.data.get("passport_no")
),
# passport_origin may be None if sa_id is used
registration.data.get("passport_origin"),
registration.data["msisdn_registrant"],
),
"dob": (
self.get_dob(
datetime.strptime(registration.data["mom_dob"], "%Y-%m-%d")
)
if registration.data.get("mom_db")
else None
),
"persal": self.get_persal(identity),
"sanc": self.get_sanc(identity),
"encdate": self.get_timestamp(registration),
"sid": str(registration.registrant_id),
"eid": str(registration.id),
}
return json_template
push_nurse_registration_to_jembi = PushNurseRegistrationToJembi()
class DeliverHook(Task):
def run(self, target, payload, instance_id=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance_id: a possibly None "trigger" instance ID
hook_id: the ID of defining Hook object
"""
requests.post(
url=target,
data=json.dumps(payload),
headers={
"Content-Type": "application/json",
"Authorization": "Token %s" % settings.HOOK_AUTH_TOKEN,
},
)
def deliver_hook_wrapper(target, payload, instance, hook):
if instance is not None:
if isinstance(instance.id, uuid.UUID):
instance_id = str(instance.id)
else:
instance_id = instance.id
else:
instance_id = None
kwargs = dict(
target=target, payload=payload, instance_id=instance_id, hook_id=hook.id
)
DeliverHook.apply_async(kwargs=kwargs)
class HTTPRequestWithRetries(HTTPRetryMixin, Task):
def run(self, method, url, headers, payload):
r = requests.request(method, url, headers=headers, json=payload)
r.raise_for_status()
return r.text
http_request_with_retries = HTTPRequestWithRetries()
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded, HTTPError),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def get_whatsapp_contact(msisdn):
"""
Fetches the whatsapp contact ID from the API, and stores it in the database.
Args:
msisdn (str): The MSISDN to perform the lookup for.
"""
if redis.get(f"wacontact:{msisdn}"):
return
with redis.lock(f"wacontact:{msisdn}", timeout=15):
# Try to get existing
try:
contact = (
WhatsAppContact.objects.filter(
created__gt=timezone.now() - timedelta(days=7)
)
.filter(msisdn=msisdn)
.latest("created")
)
return contact.api_format
except WhatsAppContact.DoesNotExist:
pass
# If no existing, fetch status from API and create
try:
whatsapp_id = utils.wab_client.get_address(msisdn)
except AddressException:
whatsapp_id = ""
contact = WhatsAppContact.objects.create(msisdn=msisdn, whatsapp_id=whatsapp_id)
return contact.api_format
@app.task(
autoretry_for=(RequestException, HTTPServiceError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def get_or_create_identity_from_msisdn(context, field):
"""
Fetches the identity from the identity store using the MSISDN in the context from
`field` adds it to the context as `{field}_identity`. Creates the identity if it
doesn't exist.
Args:
context (dict): The context to find the msisdn and add the ID in
field (str): The field in the context that contains the MSISDN
"""
msisdn = phonenumbers.parse(context[field], "ZA")
msisdn = phonenumbers.format_number(msisdn, phonenumbers.PhoneNumberFormat.E164)
try:
identity = next(
utils.is_client.get_identity_by_address("msisdn", msisdn)["results"]
)
except StopIteration:
identity = utils.is_client.create_identity(
{
"details": {
"default_addr_type": "msisdn",
"addresses": {"msisdn": {msisdn: {"default": True}}},
}
}
)
context["{}_identity".format(field)] = identity
return context
@app.task(
autoretry_for=(RequestException, HTTPServiceError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def update_identity_from_rapidpro_clinic_registration(context):
"""
Updates the identity's details from the registration details
"""
identity = context["mom_msisdn_identity"]
identity["details"]["lang_code"] = context["mom_lang"]
identity["details"]["consent"] = True
identity["details"]["last_mc_reg_on"] = "clinic"
if context["mom_id_type"] == "sa_id":
identity["details"]["sa_id_no"] = context["mom_sa_id_no"]
identity["details"]["mom_dob"] = datetime.strptime(
context["mom_sa_id_no"][:6], "%y%m%d"
).strftime("%Y-%m-%d")
elif context["mom_id_type"] == "passport":
identity["details"]["passport_no"] = context["mom_passport_no"]
identity["details"]["passport_origin"] = context["mom_passport_origin"]
else: # mom_id_type == none
identity["details"]["mom_dob"] = context["mom_dob"]
if context["registration_type"] == "prebirth":
identity["details"]["last_edd"] = context["mom_edd"]
else: # registration_type == postbirth
identity["details"]["last_baby_dob"] = context["baby_dob"]
context["mom_msisdn_identity"] = utils.is_client.update_identity(
identity["id"], {"details": identity["details"]}
)
return context
@app.task(
autoretry_for=(SoftTimeLimitExceeded,),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def _create_rapidpro_clinic_registration(context):
"""
Creates the registration from the registration details
"""
user = User.objects.get(id=context["user_id"])
source = Source.objects.get(user=user)
reg_type = {
("prebirth", "WhatsApp"): "whatsapp_prebirth",
("prebirth", "SMS"): "momconnect_prebirth",
("postbirth", "WhatsApp"): "whatsapp_postbirth",
("postbirth", "SMS"): "momconnect_postbirth",
}.get((context["registration_type"], context["channel"]))
data = {
"operator_id": context["device_msisdn_identity"]["id"],
"msisdn_registrant": context["mom_msisdn"],
"msisdn_device": context["device_msisdn"],
"id_type": context["mom_id_type"],
"language": context["mom_lang"],
"faccode": context["clinic_code"],
"consent": True,
"mha": 6,
}
if data["id_type"] == "sa_id":
data["sa_id_no"] = context["mom_sa_id_no"]
data["mom_dob"] = datetime.strptime(
context["mom_sa_id_no"][:6], "%y%m%d"
).strftime("%Y-%m-%d")
elif data["id_type"] == "passport":
data["passport_no"] = context["mom_passport_no"]
data["passport_origin"] = context["mom_passport_origin"]
else: # id_type = None
data["mom_dob"] = context["mom_dob"]
if context["registration_type"] == "prebirth":
data["edd"] = context["mom_edd"]
else: # registration_type = postbirth
data["baby_dob"] = context["baby_dob"]
Registration.objects.create(
reg_type=reg_type,
registrant_id=context["mom_msisdn_identity"]["id"],
source=source,
created_by=user,
updated_by=user,
data=data,
)
create_rapidpro_clinic_registration = (
get_or_create_identity_from_msisdn.s("mom_msisdn")
| update_identity_from_rapidpro_clinic_registration.s()
| get_or_create_identity_from_msisdn.s("device_msisdn")
| _create_rapidpro_clinic_registration.s()
)
@app.task(
autoretry_for=(RequestException, HTTPServiceError, SoftTimeLimitExceeded),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def update_identity_from_rapidpro_public_registration(context):
"""
Updates the identity's details from the registration details
"""
identity = context["mom_msisdn_identity"]
identity["details"]["lang_code"] = context["mom_lang"]
identity["details"]["consent"] = True
identity["details"]["last_mc_reg_on"] = "public"
context["mom_msisdn_identity"] = utils.is_client.update_identity(
identity["id"], {"details": identity["details"]}
)
return context
@app.task(
autoretry_for=(SoftTimeLimitExceeded,),
retry_backoff=True,
retry_jitter=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def _create_rapidpro_public_registration(context):
user = User.objects.get(id=context["user_id"])
source = Source.objects.get(user=user)
data = {
"operator_id": context["mom_msisdn_identity"]["id"],
"msisdn_registrant": context["mom_msisdn"],
"msisdn_device": context["mom_msisdn"],
"language": context["mom_lang"],
"consent": True,
"registered_on_whatsapp": True,
"mha": 6,
}
Registration.objects.create(
reg_type="whatsapp_prebirth",
registrant_id=context["mom_msisdn_identity"]["id"],
source=source,
created_by=user,
updated_by=user,
data=data,
)
create_rapidpro_public_registration = (
get_or_create_identity_from_msisdn.s("mom_msisdn")
| update_identity_from_rapidpro_public_registration.s()
| _create_rapidpro_public_registration.s()
)
@app.task
def store_jembi_request(url, json_doc):
sub = JembiSubmission.objects.create(path=url, request_data=json_doc)
return sub.id, url, json_doc
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def push_to_jembi_api(args):
if not settings.ENABLE_JEMBI_EVENTS:
return
db_id, url, json_doc = args
r = requests.post(
url=urljoin(settings.JEMBI_BASE_URL, url),
headers={"Content-Type": "application/json"},
data=json.dumps(json_doc),
auth=(settings.JEMBI_USERNAME, settings.JEMBI_PASSWORD),
verify=False,
)
r.raise_for_status()
JembiSubmission.objects.filter(pk=db_id).update(
submitted=True,
response_status_code=r.status_code,
response_headers=dict(r.headers),
response_body=r.text,
)
if settings.ENABLE_JEMBI_EVENTS:
request_to_jembi_api = store_jembi_request.s() | push_to_jembi_api.s()
else:
request_to_jembi_api = store_jembi_request.s()
@app.task
def delete_jembi_pii(msisdn):
JembiSubmission.objects.filter(request_data__cmsisdn=msisdn).delete()
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def opt_in_identity(identity_id, address, source_id):
"""
Opts in an identity if previously opted out
"""
identity = is_client.get_identity(identity_id)
address_details = (
identity.get("details", {})
.get("addresses", {})
.get("msisdn", {})
.get(address, {})
)
if not address_details.get("optedout"):
return
source = Source.objects.get(id=source_id)
optin = {
"identity": identity_id,
"address_type": "msisdn",
"address": address,
"request_source": source.name,
"requestor_source_id": source.id,
}
return is_client.create_optin(optin)
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def send_welcome_message(language, channel, msisdn, identity_id):
"""
Sends the welcome message to the user in the user's language using the
message sender
"""
# Transform to django language code
language = language.lower().replace("_", "-")
with translation.override(language):
translation_context = {
"popi_ussd": settings.POPI_USSD_CODE,
"optout_ussd": settings.OPTOUT_USSD_CODE,
}
if channel == "WHATSAPP":
text = (
translation.ugettext(
"Welcome! MomConnect will send helpful WhatsApp msgs. To stop "
"dial %(optout_ussd)s (Free). To get msgs via SMS instead, "
'reply "SMS" (std rates apply).'
)
% translation_context
)
else:
text = (
translation.ugettext(
"Congratulations on your pregnancy! MomConnect will send you "
"helpful SMS msgs. To stop dial %(optout_ussd)s, for more dial "
"%(popi_ussd)s (Free)."
)
% translation_context
)
utils.ms_client.create_outbound(
{
"to_addr": msisdn,
"to_identity": identity_id,
"content": text,
"channel": "JUNE_TEXT",
"metadata": {},
}
)
@app.task(
autoretry_for=(RequestException, SoftTimeLimitExceeded, TembaHttpError),
retry_backoff=True,
max_retries=15,
acks_late=True,
soft_time_limit=10,
time_limit=15,
)
def submit_third_party_registration_to_rapidpro(username, data):
registration = {
"registered_by": data["hcw_msisdn"],
"language": data["mom_lang"],
"timestamp": data["encdate"],
"source": username,
}
if data.get("mha"):
registration["mha"] = data["mha"]
if data.get("swt"):
registration["swt"] = data["swt"]
if data["authority"] in ("chw", "clinic"):
id_type = registration["id_type"] = data["mom_id_type"]
if id_type == "sa_id":
registration["sa_id_number"] = data["mom_id_no"]
registration["dob"] = data["mom_dob"]
elif id_type == "passport":
registration["passport_origin"] = data["mom_passport_origin"]
registration["passport_number"] = data["mom_id_no"]
elif id_type == "none":
registration["dob"] = data["mom_dob"]
if data["authority"] == "patient":
rapidpro.create_flow_start(
settings.RAPIDPRO_PUBLIC_REGISTRATION_FLOW,
urns=[f"whatsapp:{data['mom_msisdn'].strip('+')}"],
extra=registration,
)
elif data["authority"] == "chw":
rapidpro.create_flow_start(
settings.RAPIDPRO_CHW_REGISTRATION_FLOW,
urns=[f"whatsapp:{data['mom_msisdn'].strip('+')}"],
extra=registration,
)
elif data["authority"] == "clinic":
registration["edd"] = data["mom_edd"]
registration["clinic_code"] = data["clinic_code"]
rapidpro.create_flow_start(
settings.RAPIDPRO_CLINIC_REGISTRATION_FLOW,
urns=[f"whatsapp:{data['mom_msisdn'].strip('+')}"],
extra=registration,
)
| bsd-3-clause | 4,513,856,714,026,675,000 | 34.297454 | 88 | 0.592665 | false | 3.977956 | false | false | false |
indexofire/gork | src/gork/application/article/mixins.py | 1 | 7231 | # -*- coding: utf-8 -*-
from django.http import Http404
from django.template import Template
from django.utils.datastructures import SortedDict
from django.views import generic
from django.views.generic.base import TemplateResponseMixin
from feincms import settings
class ContentModelMixin(object):
"""
Mixin for ``feincms.models.Base`` subclasses which need need some degree of
additional control over the request-response cycle.
"""
#: Collection of request processors
request_processors = None
#: Collection of response processors
response_processors = None
def setup_request(self, request):
import warnings
warnings.warn(
'%s.setup_request does nothing anymore, and will be removed in'
' FeinCMS v1.8',
DeprecationWarning, stacklevel=2)
@classmethod
def register_request_processor(cls, fn, key=None):
"""
Registers the passed callable as request processor. A request processor
always receives two arguments, the current object and the request.
"""
if cls.request_processors is None:
cls.request_processors = SortedDict()
cls.request_processors[fn if key is None else key] = fn
@classmethod
def register_response_processor(cls, fn, key=None):
"""
Registers the passed callable as response processor. A response
processor always receives three arguments, the current object, the
request and the response.
"""
if cls.response_processors is None:
cls.response_processors = SortedDict()
cls.response_processors[fn if key is None else key] = fn
class ContentObjectMixin(TemplateResponseMixin):
"""
Mixin for Django's class based views which knows how to handle
``ContentModelMixin`` detail pages.
This is a mixture of Django's ``SingleObjectMixin`` and
``TemplateResponseMixin`` conceptually to support FeinCMS'
``ApplicationContent`` inheritance. It does not inherit
``SingleObjectMixin`` however, because that would set a
precedence for the way how detail objects are determined
(and would f.e. make the page and blog module implementation
harder).
"""
context_object_name = None
def handler(self, request, *args, **kwargs):
if not hasattr(self.request, '_feincms_extra_context'):
self.request._feincms_extra_context = {}
r = self.run_request_processors()
if r:
return r
r = self.process_content_types()
if r:
return r
response = self.render_to_response(self.get_context_data())
r = self.finalize_content_types(response)
if r:
return r
r = self.run_response_processors(response)
if r:
return r
return response
def get_template_names(self):
# According to the documentation this method is supposed to return
# a list. However, we can also return a Template instance...
if isinstance(self.template_name, (Template, list, tuple)):
return self.template_name
if self.template_name:
return [self.template_name]
self.object._needs_templates()
if self.object.template.path:
return [self.object.template.path]
# Hopefully someone else has a usable get_template_names()
# implementation...
return super(ContentObjectMixin, self).get_template_names()
def get_context_data(self, **kwargs):
context = self.request._feincms_extra_context
context[self.context_object_name or 'feincms_object'] = self.object
context.update(kwargs)
return super(ContentObjectMixin, self).get_context_data(**context)
@property
def __name__(self):
"""
Dummy property to make this handler behave like a normal function.
This property is used by django-debug-toolbar
"""
return self.__class__.__name__
def run_request_processors(self):
"""
Before rendering an object, run all registered request processors. A
request processor may peruse and modify the page or the request. It can
also return a ``HttpResponse`` for shortcutting the rendering and
returning that response immediately to the client.
"""
if self.object.request_processors is None:
return
for fn in reversed(self.object.request_processors.values()):
r = fn(self.object, self.request)
if r:
return r
def run_response_processors(self, response):
"""
After rendering an object to a response, the registered response
processors are called to modify the response, eg. for setting cache or
expiration headers, keeping statistics, etc.
"""
if self.object.response_processors is None:
return
for fn in self.object.response_processors.values():
r = fn(self.object, self.request, response)
if r:
return r
def process_content_types(self):
"""
Run the ``process`` method of all content types sporting one
"""
# store eventual Http404 exceptions for re-raising,
# if no content type wants to handle the current self.request
http404 = None
# did any content type successfully end processing?
successful = False
for content in self.object.content.all_of_type(tuple(
self.object._feincms_content_types_with_process)):
try:
r = content.process(self.request, view=self)
if r in (True, False):
successful = r
elif r:
return r
except Http404, e:
http404 = e
if not successful:
if http404:
# re-raise stored Http404 exception
raise http404
extra_context = self.request._feincms_extra_context
if (not settings.FEINCMS_ALLOW_EXTRA_PATH
and extra_context.get('extra_path', ['/'])[-1] != '/'):
raise Http404('Not found (extra_path %r on %r)' % (
extra_context.get('extra_path'),
self.object,
))
def finalize_content_types(self, response):
"""
Runs finalize() on content types having such a method, adds headers and
returns the final response.
"""
for content in self.object.content.all_of_type(tuple(
self.object._feincms_content_types_with_finalize)):
r = content.finalize(self.request, response)
if r:
return r
class ContentView(ContentObjectMixin, generic.DetailView):
def dispatch(self, request, *args, **kwargs):
if request.method.lower() not in self.http_method_names:
return self.http_method_not_allowed(request, *args, **kwargs)
self.request = request
self.args = args
self.kwargs = kwargs
self.object = self.get_object()
return self.handler(request, *args, **kwargs)
| mit | -7,984,136,424,656,516,000 | 33.598086 | 79 | 0.617342 | false | 4.588198 | false | false | false |
SouthForkResearch/CHaMP_Metrics | lib/topoproject.py | 1 | 7281 | from os import path
from xml.etree import ElementTree as ET
from exception import DataException, MissingException
from loghelper import Logger
from lib.util import getAbsInsensitivePath
# TODO: This shares a lot in common with riverscapes.py. Let's look at refactoring
class TopoProject():
# Dictionary with layer { layernname : layerxpath }
LAYERS = {
"DEM": "./Realizations/Topography/TIN[@active='true']/DEM/Path",
"DetrendedDEM": "./Realizations/Topography/TIN[@active='true']/Detrended/Path",
"WaterDepth": "./Realizations/Topography/TIN[@active='true']/WaterDepth/Path",
"ErrorSurface": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/ErrSurface/Path",
"WaterSurfaceDEM": "./Realizations/Topography/TIN[@active='true']/WaterSurfaceDEM/Path",
"AssocPointQuality": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/PointQuality3D/Path",
"AssocSlope": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/Slope/Path",
"AssocRough": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/Roughness/Path",
"AssocPointDensity": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/PointDensity/Path",
"AssocInterpolationError": "./Realizations/Topography/TIN[@active='true']/AssocSurfaces/InterpolationError/Path",
"Topo_Points": "./Realizations/SurveyData[@projected='true']/Vector[@id='topo_points']/Path",
"StreamFeatures": "./Realizations/SurveyData[@projected='true']/Vector[@id='stream_features']/Path",
"EdgeofWater_Points": "./Realizations/SurveyData[@projected='true']/Vector[@id='eow_points']/Path",
"Control_Points": "./Realizations/SurveyData[@projected='true']/Vector[@id='control_points']/Path",
"Error_Points": "./Realizations/SurveyData[@projected='true']/Vector[@id='error_points']/Path",
"Breaklines": "./Realizations/SurveyData[@projected='true']/Vector[@id='breaklines']/Path",
"WaterExtent": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='wetted'][@type='extent']/Path",
"BankfullExtent": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='bankfull'][@type='extent']/Path",
"WettedIslands": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='wetted'][@type='islands']/Path",
"BankfullIslands": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='bankfull'][@type='islands']/Path",
"ChannelUnits": "./Realizations/Topography/TIN[@active='true']/ChannelUnits/Path",
"Thalweg": "./Realizations/Topography/TIN[@active='true']/Thalweg/Path",
"WettedCenterline": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='wetted'][@type='centerline']/Path",
"BankfullCenterline": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='bankfull'][@type='centerline']/Path",
"WettedCrossSections": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='wetted'][@type='crosssections']/Path",
"BankfullCrossSections": "./Realizations/Topography/TIN[@active='true']/Stages/Vector[@stage='bankfull'][@type='crosssections']/Path",
"SurveyExtent": "./Realizations/SurveyData/SurveyExtents/Vector[@active='true']/Path", #MR?
"ControlPoints": "./Realizations/SurveyData/Vector[@id='control_points']/Path",
"TopoTin": "./Realizations/Topography/TIN[@active='true']/Path",
"Survey_Extent": "./Realizations/SurveyData[@projected='true']/SurveyExtents/Vector[@id='survey_extent']/Path"} #KMW
def __init__(self, sProjPath):
"""
:param sProjPath: Either the folder containing the project.rs.xml or the filepath of the actual project.rs.xml
"""
log = Logger('TopoProject')
try:
if path.isfile(sProjPath):
self.projpath = path.dirname(sProjPath)
self.projpathxml = sProjPath
elif path.isdir(sProjPath):
self.projpath = sProjPath
self.projpathxml = path.join(sProjPath, "project.rs.xml")
else:
raise MissingException("No project file or directory with the name could be found: {}".format(sProjPath))
except Exception, e:
raise MissingException("No project file or directory with the name could be found: {}".format(sProjPath))
self.isrsproject = False
if path.isfile(self.projpathxml):
log.info("Attempting to load project file: {}".format(self.projpathxml))
self.isrsproject = True
try:
self.domtree = ET.parse(self.projpathxml)
except ET.ParseError, e:
raise DataException("project.rs.xml exists but could not be parsed.")
self.domroot = self.domtree.getroot()
log.info("XML Project file loaded")
def getdir(self, layername):
return path.dirname(self.getpath(layername))
def getpath(self, layername):
"""
Turn a relative path into an absolute one.
:param project_path:
:param root:
:param xpath:
:return:
"""
if layername not in TopoProject.LAYERS:
raise DataException("'{}' is not a valid layer name".format(layername))
try:
node = self.domroot.find(TopoProject.LAYERS[layername]).text.replace("\\", path.sep).replace("/", path.sep)
except Exception, e:
raise DataException("Error retrieving layer '{}' from project file.".format(layername))
if node is not None:
finalpath = path.join(self.projpath, node)
if not path.isfile(finalpath) and not path.isdir(finalpath):
# One last, desparate call to see if there's a case error. This is expensive and should not be run
# as default
finalpath = getAbsInsensitivePath(finalpath, ignoreAbsent=True)
return finalpath
else:
raise DataException("Could not find layer '{}' with xpath '{}'".format(layername, TopoProject.LAYERS[layername]))
def getMeta(self, metaname):
"""
Retrieve Meta tags from the project.rs.xml file
:param metaname:
:return:
"""
try:
return self.domroot.find('./MetaData/Meta[@name="{}"]'.format(metaname)).text
except Exception, e:
raise DataException("Error retrieving metadata with name '{}' from project file.".format(metaname, self.projpathxml))
def get_guid(self, layername):
"""
Get the guid from a given layer
:param layername:
:return:
"""
if layername not in TopoProject.LAYERS:
raise DataException("'{}' is not a valid layer name".format(layername))
node = self.domroot.find(TopoProject.LAYERS[layername].rstrip("/Path"))
if node is not None:
return node.get("guid")
else:
raise DataException("Could not find layer '{}' with xpath '{}'".format(layername, TopoProject.LAYERS[layername]))
def layer_exists(self, layername):
node = self.domroot.find(TopoProject.LAYERS[layername])
return True if node is not None else False
| gpl-3.0 | -8,287,736,121,320,830,000 | 52.536765 | 142 | 0.648675 | false | 3.935676 | false | false | false |
siliconchris1973/picbrick | stuff/picView_standalone.py | 1 | 9662 | #!/usr/bin/env python
import os
import pygame
from pygame.locals import *
#define some colors
#color R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 0, 255, 255)
btnCycle_col = white
btnPrev_col = white
btnNext_col = white
btnF1_col = cyan
btnF2_col = blue
btnF3_col = red
btnF4_col = green
btnF5_col = cyan
# directory structure
core_data = 'data'
image_dir = 'images'
video_dir = 'videos'
initial_image = 'HAL900_320x240.png'
"""
Screen layout:
|------------- 320 -------------|
C y c l e
+-------------------------------+ ---
20 |### ####################### ###| 20 |
|### ###| |
|### ###| |
P |### ###| N |
R |### ###| E 240
E |### ###| X |
V |### ###| T |
|### ###| |
|### ###| |
|### ### ### ### ### ### ### ###| |
+-------------------------------+ ---
40 F1 F2 F3 F4 F5 F6 40
"""
#screen size
width = 320
height = 240
size = (width, height)
# button definitions
# pressed button 21, 219
number_of_x_buttons = 5
number_of_y_buttons = 2
btn_width = 40
btn_height = 40
safetyMargin = 2
# evenly distribute function buttons
btnDistance_x = ((width - 2 * btn_width) - (number_of_x_buttons * btn_width)) / (number_of_x_buttons + 1)
btnDistance_y = ((height - btn_height) - (number_of_y_buttons * btn_height)) / (number_of_y_buttons + 1)
# these are the two big area to "scroll" left and right
btnPrev_x = 0
btnPrev_y = safetyMargin
btnPrev_width = btn_width
btnPrev_height = height - safetyMargin
btnNext_x = width - btn_width
btnNext_y = safetyMargin
btnNext_width = btn_width
btnNext_height = height - safetyMargin
btnCycle_x = 0 + (btn_width + safetyMargin)
btnCycle_y = 0
btnCycle_width = width - (2 * btn_width + 2 * safetyMargin)
btnCycle_height = btn_height
btnF1_x = 0 + (btn_width + safetyMargin)
btnF1_y = height - btn_height
btnF1_width = btn_width
btnF1_height = btn_height
btnF2_x = btnF1_x + btnDistance_x
btnF2_y = height - btn_height
btnF2_width = btn_width
btnF2_height = btn_height / 2
btnF3_x = btnF2_x + btnDistance_x
btnF3_y = height - btn_height
btnF3_width = btn_width
btnF3_height = btn_height / 2
btnF4_x = btnF3_x + btnDistance_x
btnF4_y = height - btn_height
btnF4_width = btn_width
btnF4_height = btn_height / 2
btnF5_x = btnF4_x + btnDistance_x
btnF5_y = height - btn_height
btnF5_width = btn_width
btnF5_height = btn_height / 2
# initialize pyGame and the screen
pygame.init()
screen = pygame.display.set_mode(size)
screen.fill((black))
touch_buttons = {
'btnPrev.png':(btnPrev_x, btnPrev_y, btnPrev_width, btnPrev_height) # Previous image button
,'btnNext.png':(btnNext_x,btnNext_y,btnNext_width, btnNext_height) # Next image button
,'btnCycle.png':(btnCycle_x,btnCycle_y,btnCycle_width, btnCycle_height) # Cycle screen button
,'btnF1.png':(btnF1_x,btnF1_y,btnF1_width, btnF1_height) # function 1 button
,'btnF1.png':(btnF2_x,btnF2_y,btnF2_width, btnF2_height) # function 2 button
,'btnF1.png':(btnF3_x,btnF3_y,btnF3_width, btnF3_height) # function 3 button
,'btnF1.png':(btnF4_x,btnF4_y,btnF4_width, btnF4_height) # function 4 button
,'btnF5.png':(btnF5_x,btnF5_y,btnF5_width, btnF5_height) # function 5 button
}
# functions
def prev_picture():
print 'prev picture called'
def next_picture():
print 'next picture called'
def cycle_function():
print 'cycle function called'
def display_image(directory, filename):
try:
# load from subfolder 'data'
img = pygame.image.load(os.path.join(directory,filename))
except:
raise UserWarning, "Unable to find the images in the folder 'data' :-( "
screen.blit(img,(0,0))
# This function takes the name of an image to load.
# It also optionally takes an argument it can use to set a colorkey for the image.
# A colorkey is used in graphics to represent a color of the image that is transparent.
# we also use this this function to initialize filenav.py -- see modules
def load_image(name, colorkey=None):
fullname = os.path.join('data', name)
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', name
raise SystemExit, message
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
def show_controls():
# Draw a rectangle outline
pygame.draw.rect(screen, btnPrev_col, [btnPrev_x, btnPrev_y, btnPrev_width, btnPrev_height], 2)
#pygame.blit(source, dest, area=None, special_flags = 0) -> Rect
pygame.draw.rect(screen, btnNext_col, [btnNext_x, btnNext_y, btnNext_width, btnNext_height], 2)
pygame.draw.rect(screen, btnCycle_col, [btnCycle_x, btnCycle_y, btnCycle_width, btnCycle_height], 2)
#pygame.draw.rect(screen, btnF1_col, [btnF1_x, btnF1_y, btnF1_width, btnF1_height], 2)
#pygame.draw.rect(screen, btnF2_col, [btnF2_x, btnF2_y, btnF2_width, btnF2_height], 2)
#pygame.draw.rect(screen, btnF3_col, [btnF3_x, btnF3_y, btnF3_width, btnF3_height], 2)
#pygame.draw.rect(screen, btnF4_col, [btnF4_x, btnF4_y, btnF4_width, btnF4_height], 2)
#pygame.draw.rect(screen, btnF5_col, [btnF5_x, btnF5_y, btnF5_width, btnF5_height], 2)
"""
for i,v in touch_buttons.items():
btn_image = pygame.image.load(os.path.join('data', i))
# X Y W H
rect = btn_image.set_rect(v[0], v[1], v[2], v[3])
screen.blit(btn_image, rect)
"""
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
def hide_controls():
display_image(core_data, current_image)
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
def get_display():
disp_no = os.getenv('DISPLAY')
if disp_no:
print "I'm running under X display = {0}".format(disp_no)
pygame.mouse.set_visible(True)
else:
drivers = ['directfb', 'fbcon', 'svgalib']
found = False
for driver in drivers:
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
print "I'm running on the framebuffer using driver " + str(driver)
pygame.mouse.set_visible(False)
break
if not found:
raise Exception('No suitable video driver found!')
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
def run(done, toggle_controls):
display_image(core_data, current_image)
show_controls()
while not done:
# This limits the while loop to a max of 10 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(10)
# Scan touchscreen events
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if(event.type is MOUSEBUTTONDOWN):
# get list of images in picture folder
pos = pygame.mouse.get_pos()
# Find which quarter of the screen we're in
x,y = pos
print 'pos is ' + str(pos)
# check which button was pressed
if btnPrev_x <= x <= btnPrev_x + btnPrev_width and btnPrev_y <= y <= btnPrev_y + btnPrev_height:
prev_picture()
elif btnNext_x <= x <= btnNext_x + btnNext_width and btnNext_y <= y <= btnNext_y + btnNext_height:
next_picture()
elif btnCycle_x <= x <= btnCycle_x + btnCycle_width and btnCycle_y <= y <= btnCycle_y + btnCycle_height:
cycle_function()
else:
print 'event outside of control buttons'
if (toggle_controls == True):
toggle_controls = False
print 'showing controls'
show_controls()
else:
toggle_controls = True
print 'hiding controls'
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
elif(event.type is MOUSEBUTTONUP):
pos = pygame.mouse.get_pos()
if __name__ == "__main__":
done = False
toggle_controls = True
get_display()
#define font
font = pygame.font.Font(None, 20)
font_big = pygame.font.Font(None, 50)
pygame.display.update()
current_image = initial_image
clock = pygame.time.Clock()
#background = pygame.Surface(screen.get_size())
#background = background.convert()
#background.fill((black))
run(done, toggle_controls)
# Be IDLE friendly
pygame.quit() | apache-2.0 | 2,926,098,843,309,572,600 | 31.10299 | 120 | 0.575968 | false | 3.345568 | false | false | false |
lellolandi/Tagger | python/mp3_m4a.py | 1 | 1680 | from os.path import splitext
from mutagen.mp4 import MP4
from mutagen.id3 import ID3
class TagObject:
def __init__(self,filename):
self.path = filename
self.track = self.total = self.year = 0
self.coverpath = self.artist = self.album = self.title = self.cover = ""
ext = splitext(filename)[1]
if ext == ".mp3":
tagfile = ID3(filename)
for key in tagfile:
if key == "TIT2": self.title = tagfile[key].text[0]
elif key == "TALB": self.album = tagfile[key].text[0]
elif key == "TPE1": self.artist = tagfile[key].text[0]
elif key == "TDRC":
try: self.year = int(str(tagfile[key].text[0]))
except ValueError: self.year = int(str(tagfile[key].text[0]).split("-")[0])
except OverflowError: pass
elif key == "TRCK":
val = tagfile[key].text[0].split("/")
try: self.track = int(val[0])
except (ValueError,OverflowError): pass
try: self.total = int(val[1])
except (ValueError,OverflowError,IndexError): pass
elif "APIC" in key: self.cover = tagfile[key].data
else:
tagfile = MP4(filename)
for key in tagfile:
if key == "\xa9nam": self.title = tagfile[key][0]
elif key == "\xa9alb": self.album = tagfile[key][0]
elif key == "\xa9ART": self.artist = tagfile[key][0]
elif key == "\xa9day":
try: self.year = int(tagfile[key][0])
except ValueError: self.year = int(tagfile[key][0].split("-")[0])
except OverflowError: pass
elif key == "covr": self.cover = tagfile[key][0]
elif key == "trkn":
try: self.track = tagfile[key][0][0]
except (OverflowError,IndexError): pass
try: self.total = tagfile[key][0][1]
except (OverflowError,IndexError): pass
| gpl-3.0 | 7,655,653,335,960,738,000 | 36.333333 | 80 | 0.629762 | false | 2.916667 | false | false | false |
landscapeio/prospector | tests/profiles/test_profile.py | 1 | 3944 | import os
from unittest import TestCase
from prospector2.profiles.profile import ProspectorProfile
class ProfileTestBase(TestCase):
def setUp(self):
self._profile_path = [
os.path.join(os.path.dirname(__file__), 'profiles'),
os.path.join(os.path.dirname(__file__), '../../prospector2/profiles/profiles')
]
def _file_content(self, name):
path = os.path.join(self._profile_path, name)
with open(path) as f:
return f.read()
class TestProfileParsing(ProfileTestBase):
def test_empty_disable_list(self):
"""
This test verifies that a profile can still be loaded if it contains
an empty 'pylint.disable' list
"""
profile = ProspectorProfile.load('empty_disable_list', self._profile_path, allow_shorthand=False)
self.assertEqual([], profile.pylint['disable'])
def test_empty_profile(self):
"""
Verifies that a completely empty profile can still be parsed and have
default values
"""
profile = ProspectorProfile.load('empty_profile', self._profile_path, allow_shorthand=False)
self.assertEqual([], profile.pylint['disable'])
def test_ignores(self):
profile = ProspectorProfile.load('ignores', self._profile_path)
self.assertEqual(['^tests/', '/migrations/'].sort(), profile.ignore_patterns.sort())
def test_disable_tool(self):
profile = ProspectorProfile.load('pylint_disabled', self._profile_path)
self.assertFalse(profile.is_tool_enabled('pylint'))
self.assertTrue(profile.is_tool_enabled('pep8') is None)
class TestProfileInheritance(ProfileTestBase):
def _example_path(self, testname):
return os.path.join(os.path.dirname(__file__), 'profiles', 'inheritance', testname)
def _load(self, testname):
profile_path = self._profile_path + [self._example_path(testname)]
return ProspectorProfile.load('start', profile_path)
def test_simple_inheritance(self):
profile = ProspectorProfile.load('inherittest3', self._profile_path, allow_shorthand=False)
disable = profile.pylint['disable']
disable.sort()
self.assertEqual(['I0002', 'I0003', 'raw-checker-failed'], disable)
def test_disable_tool_inheritance(self):
profile = ProspectorProfile.load('pep8_and_pylint_disabled', self._profile_path)
self.assertFalse(profile.is_tool_enabled('pylint'))
self.assertFalse(profile.is_tool_enabled('pep8'))
def test_precedence(self):
profile = self._load('precedence')
self.assertTrue(profile.is_tool_enabled('pylint'))
self.assertTrue('expression-not-assigned' in profile.get_disabled_messages('pylint'))
def test_strictness_equivalence(self):
profile = self._load('strictness_equivalence')
medium_strictness = ProspectorProfile.load('strictness_medium', self._profile_path)
self.assertListEqual(sorted(profile.pylint['disable']), sorted(medium_strictness.pylint['disable']))
def test_shorthand_inheritance(self):
profile = self._load('shorthand_inheritance')
high_strictness = ProspectorProfile.load('strictness_high', self._profile_path,
# don't implicitly add things
allow_shorthand=False,
# but do include the profiles that the start.yaml will
forced_inherits=['doc_warnings', 'no_member_warnings']
)
self.assertDictEqual(profile.pylint, high_strictness.pylint)
self.assertDictEqual(profile.pep8, high_strictness.pep8)
self.assertDictEqual(profile.pyflakes, high_strictness.pyflakes)
def test_pep8_inheritance(self):
profile = self._load('pep8')
self.assertTrue(profile.is_tool_enabled('pep8'))
| gpl-2.0 | -3,299,009,453,564,928,500 | 41.869565 | 108 | 0.642495 | false | 4.155954 | true | false | false |
ecrespo/django_kanban-agile | kanban/django_kanban_agile/django_kanban_agile/settings.py | 1 | 2913 | """
Django settings for django_kanban_agile project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm_#*ocyst)mcc8z*84%j2e2o2+9qo17isuf6$f-p^nf*+kdvt-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'material',
'material.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tastypie',
'apps.kanban',
'apps.backlog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'django_kanban_agile.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__),'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_kanban_agile.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-ve'
TIME_ZONE = 'America/Caracas'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
| mit | 1,358,321,658,298,448,100 | 24.552632 | 71 | 0.687951 | false | 3.391153 | false | false | false |
opena11y/fae2 | fae2/fae-util/save_markup_information.py | 1 | 2728 | """
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: fae-util/save_markup_information.py
Author: Jon Gunderson
"""
from __future__ import absolute_import
import sys
import os
import string
import glob
import optparse
import subprocess
import shlex
import time
import getopt
import shutil
import json
import csv
import urllib
# sys.path.append(os.path.abspath('..'))
from django.utils.encoding import iri_to_uri
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae20.settings')
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import connection, transaction
from ruleCategories.models import RuleCategory
from rules.models import Rule
DEBUG=False
INFO=True
ERROR=True
class PageMarkupInformation:
def __init__(self, mi, log):
self.markup_information = mi
log = log
def debug(s):
if DEBUG and log:
log.write("[SAVE MARKUP][DEBUG]: " + str(s) + "\n")
def info(s):
if INFO and log:
log.write("[SAVE MARKUP][INFO]: " + str(s) + "\n")
def error(s):
if ERROR and log:
log.write("[SAVE MARKUP][ERROR]: " + str(s) + "\n")
def saveMarkupGroup(self, page_result, group, cursor):
insert_str = "INSERT INTO \"markupInfo_mipage" + str(group) + "\" ( "
insert_str += "page_result_id"
value_str = ") VALUES ( "
value_str += str(page_result.id)
for item in self.markup_information[group]:
insert_str += ", " + str(item)
value_str += ", " + str(self.markup_information[group][item])
insert_str = insert_str + value_str + ")"
# debug("[PageMarkupInformation][saveMarkupGroup] " + insert_str)
try:
# Data insertion operation - commit required
cursor.execute(insert_str, [])
except:
self.error("[PageMarkupInformation][saveMarkupGroup] SQL insert error ")
def saveToDjango(self, page_result):
try:
cursor = connection.cursor()
for group in self.markup_information:
self.saveMarkupGroup(page_result, group, cursor)
except:
self.error("[PageMarkupInformation][saveToDango] SQL insert error ")
| apache-2.0 | -170,159,004,764,348,670 | 24.027523 | 78 | 0.690249 | false | 3.731874 | false | false | false |
chunlaw/GeoNews | models/applespider.py | 1 | 5145 | from html.parser import HTMLParser
from urllib.request import urlopen
from urllib import parse
from bs4 import BeautifulSoup
# We are going to create a class called LinkParser that inherits some
# methods from HTMLParser which is why it is passed into the definition
class LinkParser(HTMLParser):
# This is a function that HTMLParser normally has
# but we are adding some functionality to it
def handle_starttag(self, tag, attrs):
# We are looking for the begining of a link. Links normally look
# like <a href="www.someurl.com"></a>
if tag == 'a':
for (key, value) in attrs:
if key == 'href':
# We are grabbing the new URL. We are also adding the
# base URL to it. For example:
# www.netinstructions.com is the base and
# somepage.html is the new URL (a relative URL)
#
# We combine a relative URL with the base URL to create
# an absolute URL like:
# www.netinstructions.com/somepage.html
newUrl = parse.urljoin(self.baseUrl, value)
# And add it to our colection of links:
if self.rules is not None and self.rules.get('link_prefix') is not None:
found = False
for rule in self.rules.get('link_prefix'):
found = found or newUrl.startswith( parse.urljoin(self.baseUrl, rule ) )
if not found:
break
self.links = self.links + [newUrl]
# This is a new function that we are creating to get links
# that our spider() function will call
def getLinks(self, url, rules=None):
self.links = []
self.rules = rules
# Remember the base URL which will be important when creating
# absolute URLs
self.baseUrl = url
# Use the urlopen function from the standard Python 3 library
response = urlopen(url)
# Make sure that we are looking at HTML and not other things that
# are floating around on the internet (such as
# JavaScript files, CSS, or .PDFs for example)
if response.getheader('Content-Type')=='text/html':
htmlBytes = response.read()
# Note that feed() handles Strings well, but not bytes
# (A change from Python 2.x to Python 3.x)
htmlString = htmlBytes.decode("utf-8")
self.feed(htmlString)
return htmlString, self.links
else:
return "",[]
class AppleSpider:
def __init__(self, baseUrl=None, rules=None, callback=None):
self.baseUrl = baseUrl or [('http://hkm.appledaily.com/list.php?category_guid=10829391&category=instant', 0)]
self.rules = rules or {'link_prefix': ['http://hkm.appledaily.com/detail.php']}
self.callback = callback
def setCallback(self,callback):
self.callback = callback
def extractContent(self, html, url):
soup = BeautifulSoup(html, 'html.parser')
content = ''
lastUpdateTime = None
title = ''
if soup.select('.lastupdate'):
lastUpdateTime = soup.select('.lastupdate')[0].text
if soup.select('#content-article h1'):
title = soup.select('#content-article h1')[0].text
paragraphs = soup.select('#content-article p')
for paragraph in paragraphs:
if paragraph.get('class') is None or ( paragraph.get('class') not in [ ['video-caption'], ['next'] ] ):
if not paragraph.text.startswith('【'):
content += paragraph.text
if self.callback is not None and lastUpdateTime is not None:
self.callback(title, content, url, lastUpdateTime)
# And finally here is our spider. It takes in an URL, a word to find,
# and the number of pages to search through before giving up
def crawl(self, maxLevel=1):
pagesToVisit = self.baseUrl
levelVisited = 0
# The main loop. Create a LinkParser and get all the links on the page.
# Also search the page for the word or string
# In our getLinks function we return the web page
# (this is useful for searching for the word)
# and we return a set of links from that web page
# (this is useful for where to go next)
while pagesToVisit != []:
# Start from the beginning of our collection of pages to visit:
url, levelVisited = pagesToVisit[0]
if levelVisited > maxLevel:
break
pagesToVisit = pagesToVisit[1:]
print(levelVisited, "Visiting:", url)
parser = LinkParser()
data, links = parser.getLinks(url, self.rules)
self.extractContent(data,url)
# Add the pages that we visited to the end of our collection
# of pages to visit:
links = [(link, levelVisited+1) for link in links ]
pagesToVisit = pagesToVisit + links
| mit | -8,124,131,442,782,550,000 | 45.333333 | 117 | 0.588761 | false | 4.441278 | false | false | false |
RedFantom/ttkwidgets | examples/example_timeline.py | 1 | 1282 | # -*- coding: utf-8 -*-
# Copyright (c) RedFantom 2017
# For license see LICENSE
import tkinter as tk
from ttkwidgets import TimeLine
window = tk.Tk()
timeline = TimeLine(
window,
categories={str(key): {"text": "Category {}".format(key)} for key in range(0, 5)},
height=100, extend=True
)
menu = tk.Menu(window, tearoff=False)
menu.add_command(label="Some Action", command=lambda: print("Command Executed"))
timeline.tag_configure("1", right_callback=lambda *args: print(args), menu=menu, foreground="green",
active_background="yellow", hover_border=2, move_callback=lambda *args: print(args))
timeline.create_marker("1", 1.0, 2.0, background="white", text="Change Color", tags=("1",), iid="1")
timeline.create_marker("2", 2.0, 3.0, background="green", text="Change Category", foreground="white", iid="2",
change_category=True)
timeline.create_marker("3", 1.0, 2.0, text="Show Menu", tags=("1",))
timeline.create_marker("4", 4.0, 5.0, text="Do nothing", move=False)
timeline.draw_timeline()
timeline.grid()
window.after(2500, lambda: timeline.configure(marker_background="cyan"))
window.after(5000, lambda: timeline.update_marker("1", background="red"))
window.after(5000, lambda: print(timeline.time))
window.mainloop()
| gpl-3.0 | -8,101,244,742,804,365,000 | 43.206897 | 110 | 0.684087 | false | 3.304124 | false | false | false |
nathan-osman/StackIRC | setup.py | 1 | 1469 | #!/usr/bin/env python
'''
Copyright (c) 2012 Nathan Osman
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from distutils.core import setup
setup(name='stackirc',
version='0.1',
description='A bot for posting Stack Exchange questions to IRC channels.',
author='Nathan Osman',
author_email='[email protected]',
url='https://github.com/nathan-osman/StackIRC',
license='MIT',
packages=['stackirc',],
scripts=['scripts/stackirc',])
| mit | 5,386,130,225,626,811,000 | 43.515152 | 80 | 0.761743 | false | 4.307918 | false | false | false |
danac/xpensemate | doc/intro/debt_graph.py | 1 | 1722 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pylab
G1 = nx.DiGraph()
G2 = nx.DiGraph()
for G in [G1, G2]:
G.add_node('A', pos=(0.55,0.5))
G.add_node('B', pos=(0.95,0.6))
G.add_node('C', pos=(0,0.7))
G.add_node('D', pos=(0.9,1.2))
G.add_node('E', pos=(0.35,1.1))
G1.add_edges_from([('A', 'B')], weight=1)
G1.add_edges_from([('A', 'C')], weight=2)
G1.add_edges_from([('D', 'B')], weight=1.5)
G1.add_edges_from([('D', 'C')], weight=5)
G1.add_edges_from([('A', 'D')], weight=1)
G1.add_edges_from([('C', 'B')], weight=1.5)
G1.add_edges_from([('E', 'C')], weight=1)
G2.add_edges_from([('A', 'B')], weight=4)
G2.add_edges_from([('E', 'C')], weight=1)
G2.add_edges_from([('D', 'C')], weight=5.5)
names = ["full", "simple"]
i=0
for G in [G1, G2]:
f=plt.figure()
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
#red_edges = [('C','D'),('D','A')]
edge_colors = ['black' for edge in G.edges()] #['black' if not edge in red_edges else 'red' for edge in G.edges()]
pos=nx.spring_layout(G)
pos=nx.get_node_attributes(G,'pos')
# Draw nodes
nx.draw_networkx_nodes(G,pos,node_size=700, node_color='orange')
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels, font_size=16)
nx.draw_networkx_edges(G,pos,edgelist=G.edges(data=True), edge_color='k')
nx.draw_networkx_labels(G,pos,font_size=16,font_family='sans-serif')
#nx.draw(G,pos, node_color = 'orange', node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)
plt.axis('off')
plt.savefig("../static/debt_graph_"+names[i]+".png", format='png', transparent=True)
i+=1
pylab.show()
| agpl-3.0 | -3,601,441,419,496,271,000 | 33.44 | 118 | 0.585366 | false | 2.499274 | false | false | false |
krzysztofszymanski/appengine-bugs | issue_handler.py | 1 | 2028 | from google.appengine.api import users
from lib import BaseRequest
from service import *
from models import Issue
from google.appengine.ext.webapp import template
template.register_template_library('tags.br')
class IssueHandler(BaseRequest):
def get(self, project_slug, issue_slug):
if self.request.path[-1] != "/":
self.redirect("%s/" % self.request.path, True)
return
user = users.get_current_user()
if not user:
self.redirect('/')
try:
issue = Issue.all().filter('internal_url =', "/%s/%s/" % (
project_slug, issue_slug)).fetch(1)[0]
issues = Issue.all().filter('project =', issue.project).filter(
'fixed =', False).fetch(10)
except IndexError:
self.render_404()
return
on_list = False
try:
if user.email() in issue.project.other_users:
on_list = True
except:
pass
if issue.project.user == user or users.is_current_user_admin() or on_list:
owner = True
else:
owner = False
context = {
'issue': issue,
'issues': issues,
'owner': owner,
}
output = self.render("issue.html", context)
self.response.out.write(output)
def post(self, project_slug, issue_slug):
# if we don't have a user then throw
# an unauthorised error
user = users.get_current_user()
if not user:
self.render_403()
return
issue = Issue.all().filter('internal_url =', "/%s/%s/" % (
project_slug, issue_slug)).fetch(1)[0]
Service.update_issue_with_request_values(issue, self.request)
issue.put()
service = Service()
if issue.fixed:
service.send_fixed_email(issue)
else:
service.send_issue_updated_email(issue)
self.redirect("/projects{}".format(issue.internal_url))
| mit | 1,482,959,902,427,885,000 | 28.391304 | 82 | 0.54783 | false | 4.121951 | false | false | false |
EdinburghGenomics/clarity_scripts | prodscripts/AssignWorkflow.py | 1 | 2969 | #!/usr/bin/env python
import getopt
import sys
from genologics.entities import Process
from genologics.lims import Lims
HOSTNAME = ""
VERSION = ""
BASE_URI = ""
api = None
args = None
def get_workflow_stage(lims, workflow_name, stage_name=None):
workflows = [w for w in lims.get_workflows() if w.name == workflow_name]
if len(workflows) != 1:
return
if not stage_name:
return workflows[0].stages[0]
stages = [s for s in workflows[0].stages if s.name == stage_name]
if len(stages) != 1:
return
return stages[0]
def get_parent_process_id(art):
return art.parent_process.id
def assignWorkflow():
LIMSID = args["limsid"]
usernameargs = args["username"]
passwordargs = args["password"]
stepURI = args["stepURI"]
apiLocation = stepURI.find('/api')
BASE_URI = stepURI[0:apiLocation]
l = Lims(baseuri=BASE_URI, username=usernameargs, password=passwordargs)
p = Process(l, id=LIMSID)
artifacts = p.all_inputs()
for art in artifacts:
sample = art.samples[0]
submitted_art = sample.artifact
if art.samples[0].udf.get("Proceed To SeqLab") and not art.samples[0].udf.get("2D Barcode"): #checks to see if sample is in plate or fluidX tube
stage = get_workflow_stage(l, "PreSeqLab EG 6.0", "Sequencing Plate Preparation EG 2.0")
l.route_artifacts([submitted_art], stage_uri=stage.uri)
elif art.samples[0].udf.get("Proceed To SeqLab") and art.samples[0].udf.get("2D Barcode"): #if is a fluidX tube will need to find the derived artifact created by the FluidX Transfer step
fluidX_artifacts = l.get_artifacts(process_type="FluidX Transfer From Rack Into Plate EG 1.0 ST", sample_name=art.samples[0].name, type='Analyte')
if len(fluidX_artifacts) >1: #its possible that the FluidX Transfer has occurred more than once so must find the most recent occurrence of that step
fluidX_artifacts.sort(key=get_parent_process_id, reverse=True) #sorts the artifacts returned to place the most recent artifact at position 0 in list
fluidX_artifact=fluidX_artifacts[0]
else:
fluidX_artifact=fluidX_artifacts[0]
stage = get_workflow_stage(l, "PreSeqLab EG 6.0", "Sequencing Plate Preparation EG 2.0")
l.route_artifacts([fluidX_artifact], stage_uri=stage.uri)
def main():
global api
global args
args = {}
opts, extraparams = getopt.getopt(sys.argv[1:], "l:s:u:p:")
for o, p in opts:
if o == '-l':
args["limsid"] = p
elif o == '-s':
args["stepURI"] = p
elif o == '-u':
args["username"] = p
elif o == '-p':
args["password"] = p
## at this point, we have the parameters the EPP plugin passed, and we have network plumbing
## so let's get this show on the road!
assignWorkflow()
if __name__ == "__main__":
main()
| mit | -7,272,142,704,908,751,000 | 33.126437 | 194 | 0.633547 | false | 3.448316 | false | false | false |
Dzess/ALFIRT | alfirt.runner/src/generator/scene/tests/SceneInjecterX3DTests.py | 1 | 3811 | '''
Created on Aug 20, 2011
@author: Piotr
'''
import unittest
from generator.scene.SceneInjecterX3D import SceneInjecterX3D
from generator.data.SceneDescription import SceneDescription
from generator.data.ObjectPose import ObjectPose
from lxml import etree
from lxml import objectify
class TagWriterX3DTests(unittest.TestCase):
def setUp(self):
self.injecter = SceneInjecterX3D()
# Setting up the X3D string with ALFIRT namespace tags
self.x3dString = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd">
<X3D profile="Interchange" version="3.2"
xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance"
xmlns:alfirt="ALFIRT"
xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd ">
<Scene>
<Viewpoint description='Rear View' orientation='0 1 0 3.14159' position='0 0 -10'/>
<Shape alfirt:anchor_translate="0 1 2" alfirt:anchor_rotate="0.4 0.2 0.3">
<IndexedFaceSet coordIndex="0 1 2">
<Coordinate point="0 0 0 1 0 0 0.5 1 0"/>
</IndexedFaceSet>
</Shape>
</Scene>
</X3D>
"""
camera = ObjectPose([0, 0, 0], [0, 0, 0])
anchor = ObjectPose([1, 2, 3], [4, 5, 6])
self.scene = SceneDescription(camera, anchor)
self.expected_x3dString = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.2//EN" "http://www.web3d.org/specifications/x3d-3.2.dtd">
<X3D profile="Interchange" version="3.2"
xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance"
xmlns:alfirt="ALFIRT"
xsd:noNamespaceSchemaLocation=" http://www.web3d.org/specifications/x3d-3.2.xsd ">
<Scene>
<Viewpoint description='Rear View' orientation='-0.9999999403953552 0.0 0.0 1.5707963705062866' position='0.0 0.0 0.0'/>
<Shape alfirt:anchor_translate="0 1 2" alfirt:anchor_rotate="0.4 0.2 0.3">
<IndexedFaceSet coordIndex="0 1 2">
<Coordinate point="0 0 0 1 0 0 0.5 1 0"/>
</IndexedFaceSet>
</Shape>
</Scene>
</X3D>
"""
def test_writing_proper_values(self):
result = self.injecter.injectScene(data=self.x3dString, scene=self.scene)
print(result)
# get the whitespace trimmed
expected_tree = objectify.fromstring(self.expected_x3dString.encode(encoding='ascii', errors='ignore'))
result_tree = objectify.fromstring(result.encode(encoding='utf_8', errors='strict'))
expected_string = etree.tostring(expected_tree)
result_string = etree.tostring(result_tree)
print(expected_string)
print(result_string)
self.assertEqual(result_string, expected_string, "The values were not injected")
def test_writing_nones_values(self):
with self.assertRaises(TypeError):
self.injecter.injectScene(None, None)
def test_writing_wrong_values(self):
with self.assertRaises(TypeError):
self.injecter.injectScene(3, "scene")
if __name__ == "__main__":
unittest.main()
| mit | 8,723,458,854,731,689,000 | 41.820225 | 151 | 0.535817 | false | 3.822467 | true | false | false |
fedora-infra/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/fasshim.py | 1 | 9107 | import collections
import logging
import os
import socket
import string
import threading
from hashlib import sha256, md5
_fas_cache = {}
_fas_cache_lock = threading.Lock()
log = logging.getLogger("moksha.hub")
try:
from six.moves.urllib import parse
except ImportError:
# Really really old 'six' doesn't have this move.. so we fall back to
# python-2 only usage. If we're on an old 'six', then we can assume that
# we must also be on an old Python.
import urllib as parse
def _ordered_query_params(params):
# if OrderedDict is available, preserver order of params
# to make this easily testable on PY3
if hasattr(collections, 'OrderedDict'):
retval = collections.OrderedDict(params)
else:
retval = dict(params)
return retval
# https://github.com/fedora-infra/fedmsg_meta_fedora_infrastructure/issues/320
hardcoded_avatars = {
'bodhi': 'https://apps.fedoraproject.org/img/icons/bodhi-{size}.png',
'koschei': 'https://apps.fedoraproject.org/img/icons/koschei-{size}.png',
# Taskotron may have a new logo at some point. Check this out:
# https://mashaleonova.wordpress.com/2015/08/18/a-logo-for-taskotron/
# Ask tflink before actually putting this in place though. we need
# a nice small square version. It'll look great!
# In the meantime, we can use this temporary logo.
'taskotron': (
'https://apps.fedoraproject.org/img/icons/taskotron-{size}.png'
)
}
def avatar_url(username, size=64, default='retro'):
if username in hardcoded_avatars:
return hardcoded_avatars[username].format(size=size)
openid = "http://%s.id.fedoraproject.org/" % username
return avatar_url_from_openid(openid, size, default)
def avatar_url_from_openid(openid, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
openid=openid,
size=size,
default=default,
)
else:
params = _ordered_query_params([('s', size), ('d', default)])
query = parse.urlencode(params)
hash = sha256(openid.encode('utf-8')).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
def avatar_url_from_email(email, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
email=email,
size=size,
default=default,
)
else:
params = _ordered_query_params([('s', size), ('d', default)])
query = parse.urlencode(params)
hash = md5(email.encode('utf-8')).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
def make_fasjson_cache(**config):
global _fas_cache
if _fas_cache:
return _fas_cache
log.warn("No previous fasjson cache found. Looking to rebuild.")
creds = config['fasjson_credentials']
krb5_principal = creds.get("krb5_principal")
krb5_client_ktname = creds.get("krb5_client_ktname")
gss_use_proxy = creds.get("gss_use_proxy")
if krb5_client_ktname:
os.environ["KRB5_CLIENT_KTNAME"] = krb5_client_ktname
if gss_use_proxy:
os.environ["GSS_USE_PROXY"] = "yes"
# the base URL shouldn't contain the API version, the fasjson client takes
# care of it
default_url = 'https://fasjson.fedoraproject.org/'
base_url = creds.get('base_url', default_url)
try:
import fasjson_client
except ImportError:
fasjson_client = None
log.warn(
"No fasjson-client installed. Falling back to querying directly."
)
if fasjson_client:
try:
client = fasjson_client.Client(
url=base_url, principal=krb5_principal
)
except fasjson_client.errors.ClientSetupError as e:
log.error(
"Error while setting up fasjson client: %s" % e
)
return {}
APIError = fasjson_client.errors.APIError
else:
import requests
import requests.exceptions
from requests.compat import urlencode, urljoin
from requests_gssapi import HTTPSPNEGOAuth
# shim inside a shim
class Client(object):
def __init__(self, url, principal=None):
self.url = url.rstrip("/") + "/v1/"
self.principal = principal
gssapi_auth = HTTPSPNEGOAuth(
opportunistic_auth=True, mutual_authentication="OPTIONAL"
)
self.session = requests.Session()
self.session.auth = gssapi_auth
def list_all_entities(self, ent_name):
if not ent_name.endswith("/"):
# avoid redirection round trip
ent_name += "/"
endpoint = urljoin(self.url, ent_name)
# yay, pagination
next_page_url = endpoint + "?" + urlencode({"page_number": 1})
while next_page_url:
res = self.session.get(next_page_url)
for item in res["result"]:
yield item
next_page_url = res.get("page", {}).get("next_page")
client = Client(url=base_url, principal=krb5_principal)
APIError = requests.exceptions.RequestException
try:
_add_to_cache(list(client.list_all_entities("users")))
except APIError as e:
log.error("Something went wrong building cache with error: %s" % e)
return {}
return _fas_cache
def _add_to_cache(users):
global _fas_cache
for user in users:
nicks = user.get('ircnicks', [])
for nick in nicks:
_fas_cache[nick] = user['username']
emails = user.get('emails', [])
for email in emails:
_fas_cache[email] = user['username']
def make_fas_cache(**config):
global _fas_cache
if _fas_cache:
return _fas_cache
log.warn("No previous fas cache found. Looking to rebuild.")
try:
import fedora.client
import fedora.client.fas2
except ImportError:
log.warn("No python-fedora installed. Not caching fas.")
return {}
if 'fas_credentials' not in config:
log.warn("No fas_credentials found. Not caching fas.")
return {}
creds = config['fas_credentials']
default_url = 'https://admin.fedoraproject.org/accounts/'
fasclient = fedora.client.fas2.AccountSystem(
base_url=creds.get('base_url', default_url),
username=creds['username'],
password=creds['password'],
)
timeout = socket.getdefaulttimeout()
for key in string.ascii_lowercase:
socket.setdefaulttimeout(600)
try:
log.info("Downloading FAS cache for %s*" % key)
response = fasclient.send_request(
'/user/list',
req_params={'search': '%s*' % key},
auth=True)
except fedora.client.ServerError as e:
log.warning("Failed to download fas cache for %s %r" % (key, e))
continue
finally:
socket.setdefaulttimeout(timeout)
log.info("Caching necessary user data for %s*" % key)
for user in response['people']:
nick = user['ircnick']
if nick:
_fas_cache[nick] = user['username']
email = user['email']
if email:
_fas_cache[email] = user['username']
del response
del fasclient
del fedora.client.fas2
return _fas_cache
def nick2fas(nickname, **config):
log.debug("Acquiring _fas_cache_lock for nicknames.")
with _fas_cache_lock:
log.debug("Got _fas_cache_lock for nicknames.")
fasjson = config.get('fasjson')
if fasjson:
fas_cache = make_fasjson_cache(**config)
else:
fas_cache = make_fas_cache(**config)
result = fas_cache.get(nickname, nickname)
log.debug("Released _fas_cache_lock for nicknames.")
return result
def email2fas(email, **config):
if email.endswith('@fedoraproject.org'):
return email.rsplit('@', 1)[0]
log.debug("Acquiring _fas_cache_lock for emails.")
with _fas_cache_lock:
log.debug("Got _fas_cache_lock for emails.")
fasjson = config.get('fasjson')
if fasjson:
fas_cache = make_fasjson_cache(**config)
else:
fas_cache = make_fas_cache(**config)
result = fas_cache.get(email, email)
log.debug("Released _fas_cache_lock for emails.")
return result
| lgpl-2.1 | 3,938,818,083,284,466,000 | 30.842657 | 78 | 0.599868 | false | 3.706553 | true | false | false |
doismellburning/tox | setup.py | 1 | 1993 | import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ["-v", "-epy"]
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
tox.cmdline(self.test_args)
def main():
version = sys.version_info[:2]
install_requires = ['virtualenv>=1.11.2', 'py>=1.4.17', ]
if version < (2, 7):
install_requires += ['argparse']
setup(
name='tox',
description='virtualenv-based automation of test activities',
long_description=open("README.rst").read(),
url='http://tox.testrun.org/',
version='1.8.2.dev1',
license='http://opensource.org/licenses/MIT',
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
author='holger krekel',
author_email='[email protected]',
packages=['tox', 'tox.vendor'],
entry_points={'console_scripts': 'tox=tox:cmdline\ntox-quickstart=tox._quickstart:main'},
# we use a public tox version to test, see tox.ini's testenv
# "deps" definition for the required dependencies
tests_require=['tox'],
cmdclass={"test": Tox},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3'],
)
if __name__ == '__main__':
main()
| mit | 5,426,994,447,869,910,000 | 34.589286 | 97 | 0.584546 | false | 4.075665 | true | false | false |
jjaviergalvez/CarND-Term3-Quizzes | search/first-search-program.py | 1 | 2602 | # ----------
# User Instructions:
#
# Define a function, search() that returns a list
# in the form of [optimal path length, row, col]. For
# the grid shown below, your function should output
# [11, 4, 5].
#
# If there is no valid path from the start point
# to the goal, your function should return the string
# 'fail'
# ----------
# Grid format:
# 0 = Navigable space
# 1 = Occupied space
import sys
grid = [[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0], # go up
[ 0,-1], # go left
[ 1, 0], # go down
[ 0, 1]] # go right
delta_name = ['^', '<', 'v', '>']
def search(grid,init,goal,cost):
# ----------------------------------------
# insert code here
# ----------------------------------------
def smallest(open_list):
# This function recive an open list and return the index of the
# element with the lowest g-value
lowest_value = 100000
i_lowest_value = -1
i = 0
for element in open_list:
if element[0] < lowest_value:
lowest_value = element[0]
i_lowest_value = i
i += 1
if i_lowest_value != -1:
return i_lowest_value
else:
print("fail")
sys.exit(0)
def expand(pos):
# This function recieve an elemet pos and return the neighbors that are
# not yet used. Each of the elemt used are set to -1 on the grid
n_row = len(grid)
n_col = len(grid[0])
x = pos[1]
y = pos[2]
g_val = pos[0] + 1
expand_list = []
for i in delta:
x_ = i[0] + x
y_ = i[1] + y
#if are between the bounds of the map
if (x_>=0 and x_<n_row and y_>=0 and y_<n_col):
value = grid[x_][y_]
if (value != 1 and value != -1):
expand_list.append([g_val, x_, y_])
grid[x_][y_] = -1 #mark as value already taken
return expand_list
# intitialization
grid[init[0]][init[1]] = -1
open_list = init[:]
open_list.insert(0,0)
open_list = [open_list]
#print("initial open list:")
#print(open_list)
list_item = [0,0,0]
while (list_item[1:]!= goal):
#print("----")
#print("take list item")
index = smallest(open_list)
list_item = open_list.pop(index)
#print(list_item)
#print("new open list")
open_list += expand(list_item)
#print(open_list)
return list_item
print(search(grid, init, goal, cost)) | mit | -6,260,567,240,804,987,000 | 23.327103 | 76 | 0.506533 | false | 3.008092 | false | false | false |
robjwells/adventofcode-solutions | 2015/python/2015-19.py | 1 | 5504 | #!/usr/bin/env python3
"""Advent of Code 2015, Day 19: Medicine for Rudolph"""
import pathlib
import re
input_file = pathlib.Path('../input/2015-19.txt')
def parse_input(text: str) -> (list, str):
"""Return a list of replacement pairs and the molecule string"""
replacement_block, molecule = text.rstrip().split('\n\n')
replacement_pairs = [tuple(line.split(' => '))
for line in replacement_block.splitlines()]
return replacement_pairs, molecule
def generate_replacements(molecule: str, replacements: list) -> set:
"""Return set of permutations for the given molecule
replacements should be a list of (str, str) tuples, with
the first item being the string to be replaced and
the second the replacement string.
"""
generated = set()
# This is quadratic!
for find_str, replace_str in replacements:
for match in re.finditer(find_str, molecule):
substring_start, substring_end = match.span()
new_molecule = (molecule[:substring_start] +
replace_str +
molecule[substring_end:])
generated.add(new_molecule)
return generated
def reverse_reps(replacements):
"""Map from replacement to source and reverse each string also
The string reverse is needed because the steps_to_molecule
reverses the molecule string itself.
"""
return {b[::-1]: a[::-1] for a, b in replacements}
def steps_to_molecule(molecule: str, replacements: list):
"""Return the minimum number of replacements needed to make molecule
This is based off askalski’s solution on Reddit:
https://www.reddit.com/r/adventofcode/comments/
3xflz8/day_19_solutions/cy4etju
This solution processes the molecule in reverse, matches the (reversed)
replacement elements with their source element and retraces the steps
back to the original element (which is e).
The reversal is necessary to avoid backtracking to match sequences
that end in Ar.
"""
reps = reverse_reps(replacements)
# Reverse the molecule so we can consume *Ar sequences
# without the regex engine backtracking
molecule = molecule[::-1]
count = 0
# e is the original molecule we're trying to reach
while molecule != 'e':
# Replace one molecule at a time, using the reps dictionary
# to find the replacement string
molecule = re.sub(
'|'.join(reps.keys()),
lambda m: reps[m.group()],
molecule,
count=1
)
count += 1
return count
def test_replacements():
test_molecule = 'HOH'
test_replacements = [
('H', 'HO'),
('H', 'OH'),
('O', 'HH'),
]
result = generate_replacements(molecule=test_molecule,
replacements=test_replacements)
expected = {'HOOH', 'HOHO', 'OHOH', 'HHHH'}
assert result == expected
def count(molecule, replacements):
"""This uses a modified version of askalski’s formula to count the steps
Note that in the following expression we don’t have an exact copy
of askalski’s formula, which is:
t - p - 2 * c - 1
This is because in the above function we’re left over with a
single token (which doesn’t get reduced by the pattern) matching,
which correlates with having 'e' left over if you do the step
by step reduction.
Having that left over, it doesn’t get added to our totals and
so we don’t have to subtract 1 from the rest of the calculation
for the total number of steps.
(At least, I’m pretty sure that’s how this works :)
I’ve adapted this solution from one in F# by Yan Cui:
http://theburningmonk.com/2015/12/advent-of-code-f-day-19/
"""
# Create a set of all the 'source' elements, with the strings reversed
reps = {a[::-1] for a, b in replacements}
def loop(molecule, tokens=0, parens=0, commas=0):
# Minimum length of the molecule list is 1.
if len(molecule) == 1:
return (tokens, parens, commas)
first, second, *rest = molecule
if first in ('(', ')'):
return loop(molecule[1:], tokens + 1, parens + 1, commas)
elif first == ',':
return loop(molecule[1:], tokens + 1, parens, commas + 1)
elif first in reps:
return loop(molecule[1:], tokens + 1, parens, commas)
elif first + second in reps:
return loop(rest, tokens + 1, parens, commas)
# This looks so gross in Python
molecule = molecule.replace(
'Rn', '(').replace(
'Ar', ')').replace(
'Y', ',')
molecule = molecule[::-1]
tokens, parens, commas = loop(molecule)
return tokens - parens - 2 * commas
def main():
replacement_pairs, molecule = parse_input(input_file.read_text())
generated_molecules = generate_replacements(
molecule=molecule,
replacements=replacement_pairs)
num_generated = len(generated_molecules)
print(f'Part one, number of molecules generated: {num_generated}')
min_steps_to_molecule = steps_to_molecule(molecule, replacement_pairs)
print(f'Part two, minimum steps to molecule: {min_steps_to_molecule}'
' (iter)')
min_steps_by_count = count(molecule, replacement_pairs)
print(f'Part two, minimum steps to molecule: {min_steps_by_count}'
' (count)')
if __name__ == '__main__':
main()
| mit | 1,421,638,115,674,974,700 | 32.024096 | 76 | 0.628968 | false | 4.02201 | false | false | false |
afolmert/mentor | src/views.py | 1 | 17557 | #!/usr/bin/env python
# -*- coding: iso-8859-2 -*-
#
# Copyright (C) 2007 Adam Folmert <[email protected]>
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
#
#
"""This is the module for views used in Mentor GUI"""
import release
__author__ = '%s <%s>' % \
( release.authors['afolmert'][0], release.authors['afolmert'][1])
__license__ = release.license
__version__ = release.version
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from utils_qt import tr
from utils import log
# main gui parts
# card widget is a sort of delegate and it should behave as one
# it currently has card model assigned
# or is it like more like listwidget
# helper widget classes
# TODO move this to views module
class MyTextEdit(QTextEdit):
"""Overriden to emit focusLost signals."""
# TODO maybe better is to subclass the event procedure?
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Tab:
self.emit(SIGNAL('tabPressed()'))
event.accept()
else:
QTextEdit.keyPressEvent(self, event)
def focusOutEvent(self, event):
QTextEdit.focusOutEvent(self, event)
self.emit(SIGNAL('focusLost()'))
class AbstractCardView(QAbstractItemView):
"""Base abstract class for card widgets."""
# current index is stored in selection model
# it is updated by connecting changing of current index in other views with
# currentChanged slots in here
def __init__(self, parent=None):
QAbstractItemView.__init__(self, parent)
self._dirty = False
# self.setSelectionModel(QAbstractItemView.SingleSelection)
# these control what it looks for
def currentChanged(self, current, previous):
# TODO how to check if two indexes are equal/inequal?
if current != self.getCurrentIndex():
# save pending changes
self.saveChanges()
self.setCurrentIndex(current)
self._updateView(self.model(), current)
def setModel(self, model):
QAbstractItemView.setModel(self, model)
self.connect(model, SIGNAL('modelAboutToBeReset()'), self.saveChanges)
def dataChanged(self, index):
# TODO do this only if index is the one as currently used
# TODO how to check whether this is the model
if index == self.getCurrentIndex():
self._updateView(self.model(), index)
def dirty(self):
return self._dirty
def setDirty(self, dirty):
self._dirty = dirty
def saveChanges(self):
if self.dirty():
self._updateModel(self.model(), self.getCurrentIndex())
self.setDirty(False)
def reset(self):
# what in here ?
# the changes will not be saved
# external app must call save
self._updateView(self.model(), self.getCurrentIndex())
def _updateModel(self, model, index):
# to be overridden
pass
def _updateView(self, model, index):
# to be overridden
pass
def getCurrentIndex(self):
"""Returns currently selected item"""
selection = self.selectionModel()
# get current selection
selectedIndex = selection.selectedIndexes()
if len(selectedIndex) > 0:
return selectedIndex[0]
else:
return None
def setCurrentIndex(self, index):
"""Returns currenly selected item from the model"""
selection = self.selectionModel()
selection.select(index, QItemSelectionModel.Select | QItemSelectionModel.Current)
# must override pure virtual functions
# perhaps I should abandon the idea of having this as abstractitemview?
def verticalOffset(self):
return 1
def horizontalOffset(self):
return 1
def visualRegionForSelection(self, selection):
return QRegion(0, 0, 1, 1)
def visualRect(self):
return QRect(0, 0, 1, 1)
class CardMainView(AbstractCardView):
"""Widget for displaying current card.
May be later subclassed to display all kinds of cards:
RTF , graphical, simple etc.
"""
def __init__(self, parent=None):
AbstractCardView.__init__(self, parent)
self._updatingView = False
self._updatingModel = False
self.lblQuestion = QLabel("&Question:")
self.txtQuestion = MyTextEdit()
self.txtQuestion.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.txtQuestion.setFont(QFont("Courier New", 13, QFont.Bold))
self.txtQuestion.setText("")
self.txtQuestion.setMinimumHeight(100)
self.lblQuestion.setBuddy(self.txtQuestion)
self.splitter = QSplitter(Qt.Vertical)
self.splitter.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.lblAnswer = QLabel("&Answer:")
self.txtAnswer = MyTextEdit()
self.txtAnswer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.txtAnswer.setFont(QFont("Courier New", 13, QFont.Bold))
self.txtAnswer.setText("")
self.txtAnswer.setMinimumHeight(100)
self.lblAnswer.setBuddy(self.txtAnswer)
self.connect(self.txtAnswer, SIGNAL('tabPressed()'), self.on_txtAnswer_tabPressed)
self.connect(self.txtQuestion, SIGNAL('tabPressed()'), self.on_txtQuestion_tabPressed)
self.connect(self.txtAnswer, SIGNAL('textChanged()'), self.txtAnswer_textChanged)
self.connect(self.txtQuestion, SIGNAL('textChanged()'), self.txtQuestion_textChanged)
self.connect(self.txtAnswer, SIGNAL('focusLost()'), self.saveChanges)
self.connect(self.txtQuestion, SIGNAL('focusLost()'), self.saveChanges)
self.splitter.addWidget(self.txtQuestion)
self.splitter.addWidget(self.txtAnswer)
self.splitter.setSizes([200, 100])
# FIXME how to block splitter from hiding one window completely ??
layout = QHBoxLayout()
layout.setMargin(2)
layout.setSpacing(2)
layout.addWidget(self.splitter)
self.setLayout(layout)
def _updateModel(self, model, index):
self._updatingModel = True
if index:
model.updateCard(index, \
self.txtQuestion.toPlainText(), self.txtAnswer.toPlainText())
self._updatingModel = False
def _updateView(self, model, index):
self._updatingView = True
try:
assert index and index.isValid(), "Invalid card model"
card = model.data(index, Qt.UserRole)
self.txtQuestion.setText(card.question)
self.txtAnswer.setText(card.answer)
self.txtQuestion.setEnabled(True)
self.txtAnswer.setEnabled(True)
# TODO narrow it to No data found exception !
except:
self.txtQuestion.setText("")
self.txtQuestion.setEnabled(False)
self.txtAnswer.setText("")
self.txtAnswer.setEnabled(False)
self._updatingView = False
def on_txtAnswer_tabPressed(self):
self.txtQuestion.setFocus(Qt.TabFocusReason)
def on_txtQuestion_tabPressed(self):
self.txtAnswer.setFocus(Qt.TabFocusReason)
def txtAnswer_focusLost(self):
if self._dirty:
self._updateModel(self.model(), self.getCurrentIndex())
def txtQuestion_focusLost(self):
if self._dirty:
self._updateModel(self.model(), self.getCurrentIndex())
def txtAnswer_textChanged(self):
if not self._updatingView:
self.setDirty(True)
def txtQuestion_textChanged(self):
if not self._updatingView:
self.setDirty(True)
# FIXME
# these functions are not really connected with the model/view thing
# the question is : should this be connected with a model and be descended
# from QAbstractItemView or just be a standalone control for displaying
# cards?
def displayCard(self, card, readonly=True, showAnswer=True):
self.txtQuestion.setEnabled(not readonly)
self.txtAnswer.setEnabled(not readonly)
self.txtQuestion.setText(card.question)
if showAnswer:
self.txtAnswer.setText(card.answer)
else:
self.txtAnswer.setText("")
def switchAnswer(self):
self.txtAnswer.setVisible(not self.txtAnswer.isVisible())
class CardDetailView(AbstractCardView):
"""Widget for displaying card details (score, hints, review dates etc.)"""
def __init__(self, parent=None):
AbstractCardView.__init__(self, parent)
self._updatingView = False
self.setFont(QFont("vt100", 8))
self.lblId = QLabel("Id:")
self.edId = QLabel("edId")
self.lblScore = QLabel("Score:")
self.edScore = QLabel("edScore")
self.lblDesc = QLabel("Description:")
self.edDesc = QLabel("edDescription")
self.lblRepetitions = QLabel("Repetitions:")
self.edRepetitions = QLabel("edRepetitions")
self.lblInterval = QLabel("Interval:")
self.edInterval = QLabel("edInterval")
self.lblLastRepetition = QLabel("Last repetition:")
self.edLastRepetition = QLabel("edLast repetition")
self.lblNextRepetition = QLabel("Next repetition:")
self.edNextRepetition = QLabel("edNext repetition")
self.lblAFactor = QLabel("A-Factor:")
self.edAFactor = QLabel("edA-Factor")
self.lblUFactor = QLabel("U-Factor:")
self.edUFactor = QLabel("edU-Factor")
self.lblForgettingIndex = QLabel("Forgetting index:")
self.edForgettingIndex = QLabel("edForgetting index")
self.lblFutureRep = QLabel("Future reptition:")
self.edFutureRep = QLabel("edFuture reptition")
self.lblOrdinal = QLabel("Ordinal:")
self.edOrdinal = QLabel("edOrdinal")
self.lblDifficulty = QLabel("Difficulty:")
self.edDifficulty = QLabel("edDifficulty")
self.lblFirstGrade = QLabel("First grade:")
self.edFirstGrade = QLabel("edFirst grade")
self.lblType = QLabel("Type:")
self.edType = QLabel("edType")
layout = QGridLayout()
layout.addWidget(self.lblId , 0, 0)
layout.addWidget(self.edId , 0, 1)
layout.addWidget(self.lblScore , 1, 0)
layout.addWidget(self.edScore , 1, 1)
layout.addWidget(self.lblDesc , 2, 0)
layout.addWidget(self.edDesc , 2, 1)
layout.addWidget(self.lblRepetitions , 3, 0)
layout.addWidget(self.edRepetitions , 3, 1)
layout.addWidget(self.lblInterval , 4, 0)
layout.addWidget(self.edInterval , 4, 1)
layout.addWidget(self.lblLastRepetition, 5, 0)
layout.addWidget(self.edLastRepetition , 5, 1)
layout.addWidget(self.lblNextRepetition, 6, 0)
layout.addWidget(self.edNextRepetition , 6, 1)
layout.addWidget(self.lblAFactor , 7, 0)
layout.addWidget(self.edAFactor , 7, 1)
layout.addWidget(self.lblUFactor , 8, 0)
layout.addWidget(self.edUFactor , 8, 1)
layout.addWidget(self.lblForgettingIndex , 9, 0)
layout.addWidget(self.edForgettingIndex , 9, 1)
layout.addWidget(self.lblFutureRep , 10, 0)
layout.addWidget(self.edFutureRep , 10, 1)
layout.addWidget(self.lblOrdinal , 11, 0)
layout.addWidget(self.edOrdinal , 11, 1)
layout.addWidget(self.lblDifficulty , 12, 0)
layout.addWidget(self.edDifficulty , 12, 1)
layout.addWidget(self.lblFirstGrade , 13, 0)
layout.addWidget(self.edFirstGrade , 13, 1)
layout.addWidget(self.lblType , 14, 0)
layout.addWidget(self.edType , 14, 1)
layout.setMargin(1)
layout.setSpacing(1)
self.setLayout(layout)
def _updateView(self, model, index):
# display information from the current cardModel and cardModelIndex
self._updatingView = True
try:
assert index and index.isValid(), "Invalid cardModel index!"
self.edId.setText(model.data(index).toString()[:10])
self.edScore.setText(model.data(index).toString()[:10])
self.edDesc.setText(model.data(index).toString()[:10])
except:
self.edId.setText("")
self.edScore.setText("")
self.edDesc.setText("")
self._updatingView = False
class CardSourceView(AbstractCardView):
"""Widget for displaying XML source for card"""
def __init__(self, parent=None):
AbstractCardView.__init__(self, parent)
self._updatingView = False
#self.lblSource = QLabel("&Source:")
self.txtSource = MyTextEdit()
self.setFont(QFont("vt100", 8))
#self.lblSource.setBuddy(self.txtSource)
layout = QVBoxLayout(self)
layout.setMargin(2)
layout.setSpacing(2)
#layout.addWidget(self.lblSource)
layout.addWidget(self.txtSource)
self.setLayout(layout)
def _updateView(self, model, index):
self._updatingView = True
try:
assert index and index.isValid(), "Invalid card model index!"
self.txtSource.setText(model.data(index).toString())
self.txtSource.setEnabled(True)
except:
self.txtSource.setText("")
self.txtSource.setEnabled(False)
self._updatingView = False
class CardGridView(QTableView):
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.setSortingEnabled(False)
self.setShowGrid(False)
self.setSelectionMode(QAbstractItemView.NoSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setAlternatingRowColors(True)
self.setFont(QFont("vt100", 8))
class CardSidesView(QListWidget):
"""This is view for card sides """
def __init__(self, parent=None):
QListWidget.__init__(self, parent)
self.addItem('Side 1')
self.addItem('Side 2')
self.addItem('Side 3')
self.setMaximumWidth(50)
self.setMaximumHeight(50)
class CardContentView(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
# cardView
self.cardView = QWidget(self)
self.cardMainView2 = CardMainView(self)
self.cardMainView2.setEnabled(False)
self.cardDetailView = CardDetailView(self)
self.cardSidesView = CardSidesView(self)
topLayout = QHBoxLayout()
topLayout.addWidget(self.cardSidesView)
topLayout.addWidget(QLabel(r"""This is a preview of the given side of the card.
Select specific side on the left in order to see it.
Click on the 'Show/hide' button to switch between answer and question view.""", self))
self.btnSwitch = QPushButton("Show/hide answer", self)
self.connect(self.btnSwitch, SIGNAL('clicked()'), self.cardMainView2.switchAnswer)
topLayout.addWidget(self.btnSwitch)
layout = QVBoxLayout()
layout.addLayout(topLayout)
layout.addWidget(self.cardMainView2)
layout.addWidget(self.cardDetailView)
self.cardView.setLayout(layout)
# cardEditView
self.cardEditView = QWidget(self)
self.cardMainView = CardMainView(self)
layout = QVBoxLayout()
layout.addWidget(self.cardMainView)
self.cardEditView.setLayout(layout)
# cardSourceView
self.cardSourceView = CardSourceView(self)
tab = QTabWidget(self)
tab.addTab(self.cardView, "Card")
tab.addTab(self.cardEditView, "Edit card")
tab.addTab(self.cardSourceView, "Edit card source")
tab.addTab(QLabel("Here will go template graphical editor (ala SuperMemo designing mode or color scheme editor)", self), "Edit template")
tab.addTab(QLabel("Here will go template source editor (XSL)", self), "Edit template source")
layout = QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(tab)
self.setLayout(layout)
def setModel(self, model):
self.cardMainView.setModel(model)
self.cardMainView2.setModel(model)
self.cardSourceView.setModel(model)
self.cardDetailView.setModel(model)
def currentChanged(self, current, previous):
self.cardMainView.currentChanged(current, previous)
self.cardMainView2.currentChanged(current, previous)
self.cardSourceView.currentChanged(current, previous)
self.cardDetailView.currentChanged(current, previous)
| gpl-2.0 | -3,008,446,038,544,591,000 | 34.612576 | 145 | 0.638264 | false | 3.965891 | false | false | false |
archesproject/arches | arches/app/models/migrations/5613_notification_type.py | 1 | 3420 | # Generated by Django 2.2.6 on 2019-12-03 14:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("models", "5475_update_geom_mv"),
]
operations = [
migrations.CreateModel(
name="NotificationType",
fields=[
("typeid", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("name", models.TextField(blank=True, null=True)),
("emailtemplate", models.TextField(blank=True, null=True)),
("emailnotify", models.BooleanField(default=False)),
("webnotify", models.BooleanField(default=False)),
],
options={"db_table": "notification_types", "managed": True},
),
migrations.CreateModel(
name="UserXNotificationType",
fields=[
("id", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("emailnotify", models.BooleanField(default=False)),
("webnotify", models.BooleanField(default=False)),
("notiftype", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="models.NotificationType")),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={"db_table": "user_x_notification_types", "managed": True},
),
migrations.AddField(
model_name="notification",
name="notiftype",
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to="models.NotificationType"),
),
migrations.RenameField(model_name="userxtask", old_name="date_done", new_name="datedone"),
migrations.RenameField(model_name="userxtask", old_name="date_start", new_name="datestart"),
migrations.CreateModel(
name="UserXNotification",
fields=[
("id", models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
("isread", models.BooleanField(default=False)),
("notif", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="models.Notification")),
("recipient", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={"db_table": "user_x_notifications", "managed": True},
),
migrations.AddField(
model_name="notification",
name="context",
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True),
),
migrations.RunSQL(
"""
INSERT INTO notification_types (typeid, name, emailtemplate, emailnotify, webnotify)
VALUES (
'441e6ed4-188d-11ea-a35b-784f435179ea',
'Search Export Download Ready',
'email/download_ready_email_notification.htm',
true,
true
);
""",
"""
DELETE FROM notification_types
WHERE typeid in ('441e6ed4-188d-11ea-a35b-784f435179ea');
""",
),
]
| agpl-3.0 | 8,205,261,245,665,000,000 | 43.415584 | 124 | 0.58655 | false | 4.312736 | false | false | false |
zuck/scribee | renderers/html.py | 1 | 2199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file is part of the Scribee project.
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.1'
import os, shutil
from StringIO import StringIO
from mako.lookup import TemplateLookup
from mako.template import Template
from mako.runtime import Context
from entity import Entity
import settings
def render(entities, output_dir):
basepath = os.path.join(output_dir, "html")
if os.path.exists(basepath):
shutil.rmtree(basepath)
HTML_ENTITIES_TEMPLATE = getattr(settings, "HTML_ENTITIES_TEMPLATE", "templates/entities.html")
HTML_ENTITY_TEMPLATE = getattr(settings, "HTML_ENTITY_TEMPLATE", "templates/entity.html")
HTML_INDEX_TEMPLATE = getattr(settings, "HTML_INDEX_TEMPLATE", "templates/index.html")
HTML_STATIC_ROOT = getattr(settings, "HTML_STATIC_ROOT", "templates/static")
# 1) Copies static files (it also creates <basepath>).
shutil.copytree(HTML_STATIC_ROOT, basepath)
# 2) Renders entity list page.
render_template({'entities': entities}, os.path.join(basepath, 'entities' + ".html"), HTML_ENTITIES_TEMPLATE)
# 3) Renders single entity page.
for entity in entities:
if not entity.parent or entity.type == Entity.Types.Class or entity.parent.type not in (Entity.Types.Class, Entity.Types.Function):
render_template({'entity': entity}, os.path.join(basepath, entity.uid() + ".html"), HTML_ENTITY_TEMPLATE)
# 4) Renders the index page.
render_template({'entities': entities}, os.path.join(basepath, 'index' + ".html"), HTML_INDEX_TEMPLATE)
def render_template(context, filename, template):
fd = open(filename, "w")
output = render_to_string(context, template)
fd.write(output)
fd.close()
def render_to_string(context, template):
fd = open(template, "r")
source = fd.read()
fd.close()
output = StringIO()
lookup = TemplateLookup(directories=[template.rpartition("/")[0]])
ctx = Context(output, **context)
Template(source, lookup=lookup).render_context(ctx)
return output.getvalue()
| mit | 3,229,216,363,738,476,500 | 36.913793 | 139 | 0.684402 | false | 3.593137 | false | false | false |
xfix/NextBoard | forum/models.py | 1 | 4985 | from django.db import models
import django.contrib.auth.models as auth
from django.utils.timezone import now
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from markdown import markdown
class User(auth.User):
"""Model for representing users.
It has few fields that aren't in the standard authentication user
table, and are needed for the forum to work, like footers.
"""
display_name = models.CharField(max_length=30, null=True)
footer = models.TextField(null=True)
def __str__(self):
"""Show display name or user name."""
return self.display_name or self.username
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
"""Show category name."""
return self.name
class Forum(models.Model):
"""Model for representing forums."""
category = models.ForeignKey(Category)
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
"""Show forum title."""
return self.title
def postcount(self):
"""Show forum postcount."""
return Post.objects.filter(thread__forum=self).count()
@cached_property
def last_post(self):
"""Show last post in the forum."""
result = Revision.objects.raw('''
SELECT revision.id, post_id, author_id, date_created
FROM forum_post AS post
JOIN forum_revision AS revision
ON revision.id = (SELECT id
FROM forum_revision
WHERE post_id = post.id
ORDER BY date_created
LIMIT 1
)
JOIN forum_thread AS thread
ON thread.id = thread_id
WHERE forum_id = %s
ORDER BY date_created DESC
LIMIT 1
''', [self.id])
try:
return result[0]
except IndexError:
return None
class Meta:
order_with_respect_to = 'category'
class Thread(models.Model):
"""Model for representing threads."""
forum = models.ForeignKey(Forum)
title = models.CharField(max_length=100)
views = models.PositiveIntegerField(default=0)
sticky = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
def __str__(self):
"""Show thread title."""
return self.title
class Meta:
ordering = ['-sticky']
@cached_property
def last_post(self):
"""Show last post in the thread."""
return Revision.objects.raw('''
SELECT revision.id, post_id, author_id, date_created
FROM forum_post AS post
JOIN forum_revision AS revision
ON revision.id = (SELECT id
FROM forum_revision
WHERE post_id = post.id
ORDER BY date_created
LIMIT 1
)
WHERE thread_id = %s
ORDER BY date_created DESC
LIMIT 1
''', [self.id])[0]
def author(self):
"""Show author of post."""
return self.post_set.first().author()
def replies(self):
"""Show number of replies in thread."""
return self.post_set.count() - 1
class Post(models.Model):
"""Model for representing posts.
Actual posts are stored in Revision, this only stores the
thread number. The first created revision contains the author
of post and date of its creation. The last revision contains actual
text post.
"""
thread = models.ForeignKey(Thread)
def first_revision(self):
"""Get first revision.
The first revision is important for things like post author.
"""
return self.revision_set.first()
def last_revision(self):
"""Get last revision.
The last revision contains most current post contents.
"""
return self.revision_set.last()
def author(self):
"""Get author.
This usually shows along with the post.
"""
return self.first_revision().author
@cached_property
def html(self):
"""Get post contents in HTML format."""
return self.last_revision().html
class Revision(models.Model):
"""Model for representing post revisions.
The first revision for given post contains its author and date to
show to the user. The last revision shows the date it was created
on.
"""
post = models.ForeignKey(Post)
author = models.ForeignKey(User)
date_created = models.DateTimeField(default=now)
text = models.TextField()
@cached_property
def html(self):
"""Return HTML version of post (in Markdown format)."""
return mark_safe(markdown(self.text))
class Meta:
ordering = ['date_created']
| mit | 8,401,562,665,391,931,000 | 29.03012 | 71 | 0.589769 | false | 4.628598 | false | false | false |
42cc/django-x-file-accel | x_file_accel_redirects/migrations/0001_initial.py | 1 | 2184 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AccelRedirect'
db.create_table('xfar_accelredirect', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=64)),
('prefix', self.gf('django.db.models.fields.CharField')(default='media', unique=True, max_length=64)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=True)),
('internal_path', self.gf('django.db.models.fields.CharField')(max_length=64)),
('serve_document_root', self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True)),
('filename_solver', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
))
db.send_create_signal('x_file_accel_redirects', ['AccelRedirect'])
def backwards(self, orm):
# Deleting model 'AccelRedirect'
db.delete_table('xfar_accelredirect')
models = {
'x_file_accel_redirects.accelredirect': {
'Meta': {'object_name': 'AccelRedirect', 'db_table': "'xfar_accelredirect'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'filename_solver': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "'media'", 'unique': 'True', 'max_length': '64'}),
'serve_document_root': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'})
}
}
complete_apps = ['x_file_accel_redirects'] | bsd-3-clause | 6,863,806,590,103,413,000 | 51.02381 | 132 | 0.613553 | false | 3.726962 | false | false | false |
JasonLC506/CollaborativeFiltering | kcoreDecomposition.py | 1 | 5916 | import pymongo
import cPickle
database = pymongo.MongoClient().dataSet
col = database.user_reaction_filtered
logfile = "log_kcore"
users_list_file = "data/user_dict_NYTWaPoWSJ"
items_list_file = "data/item_dict_NYTWaPoWSJ"
def slotsort(entry_dictionary):
sorted = {}
for ID in entry_dictionary.keys():
value = entry_dictionary[ID]
sorted.setdefault(entry_dictionary[ID],{})
sorted[value][ID] = True
return sorted
def list_del(K, entry_list, entry_list_sorted):
entry_list_del = []
for k in range(K):
if k in entry_list_sorted:
entry_list_del += entry_list_sorted[k].keys()
for entryID in entry_list_sorted[k].keys():
del entry_list[entryID]
del entry_list_sorted[k]
return entry_list_del
def transaction_del(K = None, users_list_del = None, items_list_del = None, items_list_sorted = None, items_list = None, users_list_sorted = None, users_list = None, col_edge = None):
if users_list_del is None:
UDEL = False
else:
UDEL = True
if UDEL:
item_update = {}
for userID in users_list_del:
edge_cursor = col_edge.find({"READERID": userID}, no_cursor_timeout = True)
for edge in edge_cursor:
kcore = edge["KCORE"]
if kcore != 0 and kcore <= K:
print "kcore", kcore
print "edge", edge
continue # already excluded by smaller k core
itemID = edge["POSTID"]
# print "item to be modified", itemID
item_update.setdefault(itemID,0)
item_update[itemID] += 1
# print item_update
edge["KCORE"] = K
col_edge.save(edge)
print "total item to be updated", len(item_update), "total reactions to del", sum(item_update.values())
listUpdate(items_list, items_list_sorted, item_update)
else:
user_update = {}
for itemID in items_list_del:
edge_cursor = col_edge.find({"$and":[{"POSTID": itemID},{"KCORE": 0}]}, no_cursor_timeout = True)
for edge in edge_cursor:
kcore = edge["KCORE"]
if kcore != 0 and kcore <= K:
print "kcore", kcore
print "edge", edge
continue # already excluded by smaller k core
userID = edge["READERID"]
user_update.setdefault(userID,0)
user_update[userID] += 1
edge["KCORE"] = K
col_edge.save(edge)
print "total user to be updated", len(user_update), "total reactions to del", sum(user_update.values())
listUpdate(users_list, users_list_sorted, user_update)
def listUpdate(entry_list, entry_list_sorted, entry_update):
for entryID in entry_update.keys():
old_value = entry_list[entryID]
new_value = old_value - entry_update[entryID]
entry_list[entryID] = new_value
del entry_list_sorted[old_value][entryID]
entry_list_sorted.setdefault(new_value, {})[entryID] = True
def kcoreSingle(K, users_list_sorted, items_list_sorted, users_list, items_list, col_edge):
while True:
users_list_del = list_del(K, users_list, users_list_sorted)
with open(logfile, "a") as logf:
logf.write("users to be deleted" + str(len(users_list_del)) + "\n")
Nreaction = sum(items_list.values())
print "Nreaction from items before", Nreaction
transaction_del(K = K, users_list_del = users_list_del, items_list_sorted = items_list_sorted, items_list = items_list, col_edge = col_edge)
items_list_del = list_del(K, items_list, items_list_sorted)
with open(logfile, "a") as logf:
logf.write("items to be deleted" + str(len(items_list_del)) + "\n")
Nreaction = sum(items_list.values())
print "Nreaction from items after", Nreaction
if len(items_list_del) < 1:
with open(logfile, "a") as logf:
logf.write("kcore decomposition done with K = %d\n" % K)
break
transaction_del(K = K, items_list_del = items_list_del, users_list_sorted = users_list_sorted, users_list = users_list, col_edge = col_edge)
return users_list, items_list, users_list_sorted, items_list_sorted
def kcore(K, users_list_file, items_list_file, col_edge, store_every_k = False):
with open(users_list_file, "r") as f:
users_list = cPickle.load(f)
with open(items_list_file, "r") as f:
items_list = cPickle.load(f)
users_list_sorted = slotsort(users_list)
items_list_sorted = slotsort(items_list)
for k in range(2,K+1):
Nreaction = sum(items_list.values())
print "Nreaction from items before kcoreSingle", Nreaction
kcoreSingle(k, users_list_sorted, items_list_sorted, users_list, items_list, col_edge)
Nreaction = sum(items_list.values())
print "Nreaction from items after kcoreSingle", Nreaction
if store_every_k or k == K:
with open(users_list_file[:25] + "_K" + str(k), "w") as f:
cPickle.dump(users_list, f)
with open(items_list_file[:25] + "_K" + str(k), "w") as f:
cPickle.dump(items_list, f)
def RESET(K_MAX, col_edge):
col_edge.update_many({"KCORE":{"$gt": K_MAX-1}}, {"$set": {"KCORE": 0}}, upsert = False)
print "reset done, no k larger or equal than K_MAX"
if __name__ == "__main__":
database = pymongo.MongoClient().dataSet
col_edge = database.user_reaction_filtered
# !!!!!! RESET !!!!!!!! #
### RESET(2, col_edge)
#########################
users_list_file = "data/user_dict_NYTWaPoWSJ_K10"
items_list_file = "data/item_dict_NYTWaPoWSJ_K10"
K = 50
kcore(K, users_list_file, items_list_file, col_edge, store_every_k = True)
| mit | -7,255,527,684,046,012,000 | 41.561151 | 183 | 0.586376 | false | 3.425594 | false | false | false |
Hitachi-Data-Systems/org-chart-builder | openpyxl/xml/tests/test_tags.py | 1 | 1093 | # Copyright (c) 2010-2014 openpyxl
from io import BytesIO
import pytest
from openpyxl.xml.functions import start_tag, end_tag, tag, XMLGenerator
@pytest.fixture
def doc():
return BytesIO()
@pytest.fixture
def root(doc):
return XMLGenerator(doc)
class TestSimpleTag:
def test_start_tag(self, doc, root):
start_tag(root, "start")
assert doc.getvalue() == b"<start>"
def test_end_tag(self, doc, root):
""""""
end_tag(root, "blah")
assert doc.getvalue() == b"</blah>"
class TestTagBody:
def test_start_tag(self, doc, root):
start_tag(root, "start", body="just words")
assert doc.getvalue() == b"<start>just words"
def test_end_tag(self, doc, root):
end_tag(root, "end")
assert doc.getvalue() == b"</end>"
def test_start_tag_attrs(doc, root):
start_tag(root, "start", {'width':"10"})
assert doc.getvalue() == b"""<start width="10">"""
def test_tag(doc, root):
tag(root, "start", {'height':"10"}, "words")
assert doc.getvalue() == b"""<start height="10">words</start>"""
| apache-2.0 | 7,729,934,673,050,552,000 | 22.255319 | 72 | 0.605672 | false | 3.252976 | true | false | false |
polyaxon/polyaxon | platform/polycommon/polycommon/options/option.py | 1 | 3366 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Optional, Tuple
from polyaxon.parser import parser
from polycommon.options.exceptions import OptionException
NAMESPACE_DB_OPTION_MARKER = ":"
NAMESPACE_DB_CONFIG_MARKER = "__"
NAMESPACE_SETTINGS_MARKER = "__"
NAMESPACE_ENV_MARKER = ":"
class OptionStores(Enum):
ENV = "env"
DB_OPTION = "db_option"
DB_CONFIG = "db_config"
SETTINGS = "settings"
class OptionScope(Enum):
GLOBAL = "global"
ORGANISATION = "organization"
TEAM = "team"
PROJECT = "project"
USER = "user"
class Option:
key = None
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = None
typing = None
default = None
options = None
description = None
cache_ttl = 0
@staticmethod
def get_default_value():
return None
@classmethod
def default_value(cls):
return cls.default if cls.default is not None else cls.get_default_value()
@classmethod
def is_global(cls):
return cls.scope == OptionScope.GLOBAL
@classmethod
def get_marker(cls) -> str:
if cls.store == OptionStores.DB_OPTION:
return NAMESPACE_DB_OPTION_MARKER
if cls.store == OptionStores.DB_CONFIG:
return NAMESPACE_DB_CONFIG_MARKER
if cls.store == OptionStores.SETTINGS:
return NAMESPACE_SETTINGS_MARKER
return NAMESPACE_ENV_MARKER
@classmethod
def parse_key(cls) -> Tuple[Optional[str], str]:
marker = cls.get_marker()
parts = cls.key.split(marker)
if len(parts) > 2:
raise OptionException(
"Option declared with multi-namespace key `{}`.".format(cls.key)
)
if len(parts) == 1:
return None, cls.key
return parts[0], parts[1]
@classmethod
def get_namespace(cls) -> Optional[str]:
return cls.parse_key()[0]
@classmethod
def get_key_subject(cls):
return cls.parse_key()[1]
@classmethod
def to_dict(cls, value=None):
return {
"key": cls.key,
"typing": cls.typing,
"is_list": cls.is_list,
"is_secret": cls.is_secret,
"value": value if value is not None else cls.default,
"description": cls.description,
}
@classmethod
def _extra_processing(cls, value):
return value
@classmethod
def parse(cls, value):
_value = parser.TYPE_MAPPING[cls.typing](
key=cls.key,
value=value,
is_list=cls.is_list,
is_optional=cls.is_optional,
default=cls.default,
options=cls.options,
)
return cls._extra_processing(_value)
| apache-2.0 | 6,719,844,214,865,127,000 | 25.928 | 82 | 0.620915 | false | 3.891329 | false | false | false |
patdaburu/djio | docs/source/conf.py | 1 | 7097 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# djio documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 12 15:57:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
# Determine the absolute path to the directory containing the python modules.
_pysrc = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..', '..'))
# Insert it into the path.
sys.path.insert(0, _pysrc)
# Now we can import local modules.
import djio
# -- Document __init__ methods by default. --------------------------------
# This section was added to allow __init__() to be documented automatically.
# You can comment this section out to go back to the default behavior.
# See: http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# http://docs.readthedocs.io/en/latest/faq.html
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
class BaseGeometry(object):
pass
MOCK_MODULES = [
'numpy',
'scipy',
'sklearn',
'matplotlib',
'matplotlib.pyplot',
'scipy.interpolate',
'scipy.special',
'math',
#'typing',
# #'__future__',
'toolboxutilities',
'CaseInsensitiveDict',
'geoalchemy2', 'geoalchemy2.types', 'geoalchemy2.shape',
'shapely', 'shapely.errors', 'shapely.geometry', 'shapely.geometry.base', 'shapely.wkb', 'shapely.wkt',
'measurement', 'measurement.measures', 'osgeo'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'djio'
copyright = '2018, Pat Daburu'
author = 'Pat Daburu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djio.__version__
# The full version, including alpha/beta/rc tags.
release = djio.__release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'djiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djio.tex', 'djio Documentation',
'Pat Daburu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djio', 'djio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djio', 'djio Documentation',
author, 'djio', 'One line description of project.',
'Miscellaneous'),
]
| mit | 4,778,953,984,657,526,000 | 28.570833 | 114 | 0.670565 | false | 3.785067 | true | false | false |
FarhansCode/file_repository | file_repository/models.py | 1 | 5494 | from __future__ import unicode_literals
from django.db import models
import os, re
# Single inode model for both files and directories
class Inode(models.Model):
# This will only be specified if Inode is a root
rootname = models.CharField(max_length=10, default='')
name = models.CharField(max_length=255)
# True = Directory, False = File
is_directory = models.BooleanField(default=False)
# Only makes sense if its a file
content = models.FileField(upload_to='file_repository/_files')
# Only makes senes if its a directory
inodes = models.ManyToManyField('Inode')
def __str__(self):
return self.name
def get_path(self):
path = ''
rootpath = self
while True:
if rootpath.inode_set.count() == 1:
rootpath = rootpath.inode_set.get()
if rootpath.name is not '/': # Not last element
path = rootpath.name + '/' + path
elif rootpath.name is '/': # Last element
path = '/' + path
break
else: # Only for root elements
path = '/' + path
break
return path
def create_file(self, name, content):
try:
exists = self.inodes.get(name=name)
raise Inode.NameConflict(name)
except Inode.DoesNotExist:
pass
new_file = Inode(is_directory=False, rootname=self.rootname)
new_file.content = content
new_file.name = name
new_file.save()
self.inodes.add(new_file)
return new_file
def create_directory(self, name):
try:
exists = self.inodes.get(name=name)
raise Inode.NameConflict(name)
except Inode.DoesNotExist:
pass
new_directory = Inode(is_directory=True, rootname=self.rootname)
new_directory.name = name
new_directory.save()
self.inodes.add(new_directory)
return new_directory
def deletenode(self):
if self.is_directory == False:
os.remove(self.content.path)
self.delete()
else:
# Recursively go through all subdirectories
directories = self.inodes.filter(is_directory = True)
for dir_inode in directories:
dir_inode.deletenode()
# Now delete them all
directories.all().delete()
# And then wipe out the files
files = self.inodes.filter(is_directory = False)
for subfile in files:
subfile.deletenode()
self.delete()
class methods:
def createroot(rootname):
newroot = Inode(name='/', rootname=rootname, is_directory=True)
newroot.save()
return newroot
def getroot(rootname):
root = Inode.objects.get(name='/',
rootname=rootname,
is_directory=True)
return root
def getinode(filedir, rootname):
try: # Get the root or die
rootdirectory = Inode.objects.get(rootname=rootname,
name='/',
is_directory=True)
except Inode.DoesNotExist:
raise Inode.Error500('rootname %s does not exist' % rootname)
if filedir == '' or filedir == '/':
return rootdirectory # Quit if its just the root
current_directory = rootdirectory
tempurl = filedir
while tempurl:
lastnode = re.match('^(\/)?([\w\.]+)?(\/?)$', tempurl)
if lastnode is not None:
try:
if lastnode.group(1) is '/' and \
lastnode.group(2) is None:
return current_directory
elif lastnode.group(2) is not None:
inode = current_directory.inodes.get(
name=lastnode.group(2))
if inode.is_directory == True and \
lastnode.group(3) is not '/':
raise Inode.Redirect302(filedir+'/')
return inode
except Inode.DoesNotExist:
raise Inode.Error404
response = re.match('^([\w\-\.\ ]+)\/([\w\-\.\ \/]+)', tempurl)
if response == None: # Its the last node, kick it back up
continue
tree, tempurl = response.groups()
if tree: # This is a directory
current_directory = current_directory.inodes.get(name=tree,
is_directory=True)
continue
class Error404(Exception):
def __str__(self):
return repr("Inode does not exist")
class Error500(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Redirect302(Exception):
def __init__(self, path):
self.newpath = path
class NameConflict(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return repr("Inode %s already exists" % self.name)
| bsd-2-clause | -8,650,357,869,796,421,000 | 35.872483 | 79 | 0.503094 | false | 4.65199 | false | false | false |
newmediamedicine/indivo_server_1_0 | codingsystems/data/loinc.py | 1 | 1891 | """
LOINC loading
Ben Adida
2010-08-25
"""
from django.utils import simplejson
from loadutils import create_codingsystem
import os.path
import csv
from codingsystems import models
def load(stream, codingsystem, delimiter='\t'):
"""
load data from a file input stream.
"""
csv_reader = csv.reader(stream, delimiter = delimiter)
FIELD_NAMES = ["loinc_num", "component", "property", "time_aspct", "system", "scale_typ", "method_typ", "relat_nms", "class", "source",
"dt_last_ch", "chng_type", "comments", "answerlist", "status", "map_to", "scope", "consumer_name", "ipcc_units", "reference",
"exact_cmp_sy", "molar_mass", "classtype", "formula", "species", "exmpl_answers", "acssym", "base_name", "final",
"naaccr_id", "code_table", "setroot", "panelelements", "survey_quest_text", "survey_quest_src", "unitsrequired", "submitted_units",
"relatednames2", "shortname", "order_obs", "cdisc_common_tests", "hl7_field_subfield_id", "external_copyright_notice", "example_units", "inpc_percentage",
"long_common_name", "hl7_v2_datatype", "hl7_v3_datatype", "curated_range_and_units", "document_section", "definition_description", "example_ucum_units"]
for row in csv_reader:
values = dict([(f, row[i]) for i, f in enumerate(FIELD_NAMES[:len(row)])])
models.CodedValue.objects.create(system = codingsystem,
code = values['loinc_num'],
physician_value = values['component'], consumer_value = values['consumer_name'])
def create_and_load_from(filepath):
if not os.path.isfile(filepath):
print "Can't load LOINC, the file does not exist at %s" % filepath
return
codingsystem = create_codingsystem('loinc', 'LOINC')
load(open(filepath, "r"), codingsystem)
| gpl-3.0 | -8,305,867,522,785,487,000 | 40.108696 | 173 | 0.620307 | false | 3.431942 | false | false | false |
udacity/course-front-end-frameworks | lesson3/quizExpressions/unit_tests.py | 1 | 2472 | import re
is_correct = False
brace_regex = "{{.*}}"
color_regex = "(?:brick.)?color"
size_regex = "(?:brick.)?size"
price_regex = "(?:brick.)?price"
heading = widget_inputs["text1"]
brick_color = widget_inputs["text2"]
brick_size = widget_inputs["text3"]
brick_price = widget_inputs["text4"]
brick_description = widget_inputs["text5"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if heading == '':
is_correct = True
else:
commentizer("Do you think the heading should change if you use a different brick? Why would a different brick make the heading change?")
#check the brick's color matches a RegEx
if re.search( color_regex, brick_color ):
if not re.search( brace_regex, brick_color ):
is_correct = False
commentizer("What you entered into the color field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The color field is not correct.")
#check the brick's size matches a RegEx
if re.search( size_regex, brick_size ):
if not re.search( brace_regex, brick_size ):
is_correct = False
commentizer("What you entered into the size field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The size field is not correct.")
#check the brick's price matches a RegEx
if re.search( price_regex, brick_price ):
if not re.search( brace_regex, brick_price ):
is_correct = False
commentizer("What you entered into the price field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The price field is not correct.")
# if they're all unchecked
if not any([heading, brick_color, brick_size, brick_price, brick_description]):
is_correct = False
comments = []
comments.append('At least one of these should be converted into an expression.\n\nLook at the data in the template and ask yourself, "Will this change if I use a different brick?" If the answer is yes, then enter the expression into the appropriate field.')
if is_correct:
commentizer("Great job!")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| mit | -2,914,863,373,141,105,000 | 34.314286 | 261 | 0.691343 | false | 3.551724 | false | false | false |
senarvi/senarvi-speech | filter-text/perplexity.py | 1 | 3615 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Routines for language model estimation and perplexity computation.
#
# Author: Seppo Enarvi
# http://users.marjaniemi.com/seppo/
import sys
import re
import tempfile
import subprocess
def read_word_segmentations(input_file):
wsegs = dict()
for line in input_file:
line = line.strip()
if line.startswith('#'):
continue
line = re.sub('\d*', '', line)
parts = line.split(r'+')
if len(parts) < 2:
parts = line.split(' ')
parts = [re.sub(' ', '', x) for x in parts]
wrd = ''
for part in parts:
wrd += part
wsegs[wrd] = parts
return wsegs
def word_perplexity(train_text, devel_text, vocabulary=None):
lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
command = [ 'ngram-count',
'-order', '2',
'-wbdiscount1', '-wbdiscount2',
'-interpolate1', '-interpolate2',
'-text', train_text,
'-lm', lm_file.name ]
if vocabulary is not None:
command.extend(['-unk', '-vocab', vocabulary])
subprocess.check_call(command)
command = [ 'ngram',
'-order', '2',
'-lm', lm_file.name,
'-ppl', devel_text]
if vocabulary is not None:
command.extend(['-unk', '-vocab', vocabulary])
output = subprocess.check_output(command).decode('utf-8').splitlines()
matches = re.search(b'(\d+) OOVs', output[0])
if matches:
num_oovs = int(matches.group(1))
else:
sys.stderr.write("Unable to parse OOVs from:\n")
sys.stderr.write(output[0])
sys.stderr.write("\n")
sys.exit(1)
matches = re.search(b'ppl= ?(\d+(.\d+)?)', output[1])
if matches:
perplexity = float(matches.group(1))
else:
sys.stderr.write("Unable to parse ppl from:\n")
sys.stderr.write(output[1])
sys.stderr.write("\n")
sys.exit(1)
return perplexity, num_oovs
# Segments text according to given word segmentation, to be used as subword
# language model training data.
def segment_text(input_file, output_file, wsegs):
for line in input_file:
line = line.strip()
words = line.split()
output_file.write("<s> <w> ")
for word in words:
subwords = wsegs[word]
for sw in subwords:
output_file.write(sw)
output_file.write(" ")
output_file.write("<w> ")
output_file.write("</s>\n")
def subword_perplexity(train_text, devel_text, wsegs, order=3):
if wsegs is None:
segmented_train_text = train_text
segmented_devel_text = devel_text
else:
segmented_train_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
segment_text(train_text, segmented_train_text, wsegs)
segmented_devel_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
segment_text(devel_text, segmented_devel_text, wsegs)
lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
command = [ 'ngram-count',
'-order', str(order),
'-wbdiscount1', '-wbdiscount2', '-wbdiscount3',
'-interpolate1', '-interpolate2', '-interpolate3',
'-text', segmented_train_text.name,
'-lm', lm_file.name ]
subprocess.check_call(command)
command = [ 'perplexity',
'-a', lm_file.name,
'-t', '2',
segmented_devel_text.name,
'-']
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
matches = re.search('^Dropped:\s*(\d+) UNKS', output, re.MULTILINE)
if matches:
num_oovs = int(matches.group(1))
else:
sys.stderr.write("Unable to parse UNKS from:\n")
sys.stderr.write(output)
sys.exit(1)
matches = re.search('^Perplexity (\d+(.\d+)?)', output, re.MULTILINE)
if matches:
perplexity = float(matches.group(1))
else:
sys.stderr.write("Unable to parse Perplexity from:\n")
sys.stderr.write(output)
sys.exit(1)
return perplexity, num_oovs
| apache-2.0 | -6,104,108,968,600,764,000 | 27.464567 | 84 | 0.665007 | false | 2.828638 | false | false | false |
stdgy/adventofcode | 2016/days/8/solution.py | 1 | 2860 | class Screen(object):
def count_lit_pixels(self):
count = 0
for row in self.monitor:
for val in row:
if val == '1':
count = count + 1
return count
def display(self):
for row in self.monitor:
print(''.join(row))
def draw(self, width, height):
for x in range(height):
for y in range(width):
self.monitor[x][y] = '1'
def shift_row_right(self, row, shift):
r = self.monitor[row]
self.monitor[row] = self._shift_list(r, shift)
def shift_col_down(self, col, shift):
# Copy column into a row to make it easier to deal with
new_column = self._copy_column(col)
# Shift values in copied column
new_column = self._shift_list(new_column, shift)
# Put the modified values back into their home column
for idx, val in enumerate(new_column):
self.monitor[idx][col] = val
def _shift_list(self, l, shift_amount):
new_list = []
list_length = len(l)
for idx in range(list_length):
val = l[(idx - shift_amount) % list_length]
new_list.append(val)
return new_list
def _copy_column(self, col):
column = []
for row in range(len(self.monitor)):
column.append(self.monitor[row][col])
return column
def __init__(self):
self.monitor = []
# Initialize monitor to all off
for row in range(6):
row = []
for col in range(50):
row.append('0')
self.monitor.append(row)
def parse_rect(line):
dimensions = line[4:]
dimensions = dimensions.split('x')
dimensions = map(int, dimensions)
return dimensions
def parse_rotate_row(line):
rotate_params = line[13:]
rotate_params = rotate_params.split('by')
rotate_params = map(str.strip, rotate_params)
rotate_params = map(int, rotate_params)
return rotate_params
def parse_rotate_col(line):
rotate_params = line[16:]
rotate_params = rotate_params.split('by')
rotate_params = map(str.strip, rotate_params)
rotate_params = map(int, rotate_params)
return rotate_params
if __name__ == '__main__':
s = Screen()
with open('input.txt') as file:
for line in file:
if 'rect' == line[:4]:
width, height = parse_rect(line)
s.draw(width, height)
elif 'rotate row' == line[:10]:
row, shift = parse_rotate_row(line)
s.shift_row_right(row, shift)
elif 'rotate column' == line[:13]:
col, shift = parse_rotate_col(line)
s.shift_col_down(col, shift)
s.display()
print('There are {} pixels lit on the display.'.format(s.count_lit_pixels()))
| mit | -7,901,886,625,612,999,000 | 29.425532 | 81 | 0.552098 | false | 3.808256 | false | false | false |
jweyrich/livemgr-webui | webui/livemgr/models/dummy.py | 1 | 1271 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Jardel Weyrich
#
# This file is part of livemgr-webui.
#
# livemgr-webui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# livemgr-webui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with livemgr-webui. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Jardel Weyrich <[email protected]>
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Dashboard(models.Model):
class Meta:
app_label = 'livemgr'
db_table = ''
managed = False
verbose_name = _('dashboard')
permissions = (
("see_dashboard", "Can see dashboard"),
)
class Support(models.Model):
class Meta:
app_label = 'livemgr'
db_table = ''
managed = False
verbose_name = _('support')
permissions = (
("see_support", "Can see support"),
)
| gpl-3.0 | 47,185,329,086,548,020 | 27.886364 | 71 | 0.711251 | false | 3.380319 | false | false | false |
zhangpf/image-tget | tget/filemanager.py | 1 | 7274 | #
# -*-encoding:utf-8-*-
import os
import hashlib
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from bitfield import BitField
from utils import sleep
class BTFileError (Exception) :
pass
class BTHashTestError (Exception):
pass
class BTFile:
def __init__(self, metainfo, saveDir):
self.metainfo = metainfo
self.path = os.path.join(saveDir, metainfo.path)
self.length = metainfo.file_length
self.piece_len = metainfo.piece_length
self.hash_array = metainfo.pieces_hash
self.pieces_size = metainfo.pieces_size
self.fd = None
logging.info("the saved file path is %s" % self.path)
if os.path.exists(self.path):
self.fd = open(self.path, 'rb+')
else:
dirname = os.path.dirname(self.path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.fd = open(self.path, 'wb')
#print self.abs_pos0, self.abs_pos1, self.piece_len, self.idx0_piece, self.idx1_piece
h, t = os.path.split(self.path)
if not os.path.exists(h):
os.makedirs(h)
def write(self, begin, data):
if begin < 0:
raise BTFileError("Invalid write begin position.")
elif len(data) + begin > self.length:
raise BTFileError("Invalid write end position.")
self.fd.seek(begin)
self.fd.write(data)
def read(self, begin, length):
if length < 0:
raise BTFileError("Invalid read length.")
elif begin < 0:
raise BTFileError("Invalid read begin position.")
elif begin + length > self.length:
raise BTFileError("Invalid read end position.")
self.fd.seek(begin)
data = self.fd.read(length)
return data
def close(self):
if self.fd :
self.fd.close()
self.fd = None
def get_bitfield(self):
bf_need = BitField(self.pieces_size)
bf_have = BitField(self.pieces_size)
for i in xrange(self.pieces_size):
try :
data = self[i]
if data and self.do_hash_test(i, data):
bf_have[i] = 1
bf_need[i] = 0
else:
bf_have[i] = 0
bf_need[i] = 1
except BTFileError as error :
pass
print bf_have
print bf_need
return bf_have, bf_need
def do_hash_test(self, idx, data):
return hashlib.sha1(data).digest() == self.hash_array[idx]
def __getitem__(self, idx):
end = min((idx + 1) * self.piece_len, self.length)
return self.read(idx * self.piece_len, end - idx * self.piece_len)
def __setitem__(self, idx, data):
self.write(idx * self.piece_len, data)
class BTFileManager :
def __init__(self, btm):
self.btm = btm
self.config = btm.config
metainfo = self.config.metainfo
self.metainfo = metainfo
self.piece_length = metainfo.piece_length
self.pieces_size = metainfo.pieces_size
self.btfile = BTFile(metainfo, self.btm.app.save_dir)
self.bitfield_have, self.bitfield_need = self.btfile.get_bitfield()
self.buffer_max_size = 100 * 2**20 / self.piece_length
self.buffer = {}
self.buffer_record = []
self.buffer_dirty = {}
def start(self) :
self.status = 'started'
reactor.callLater(2, self.deamon_write)
reactor.callLater(2, self.deamon_read)
def stop(self) :
for idx, data in self.buffer_dirty.iteritems():
self.write(idx, data)
self.buffer_dirty.clear()
self.buffer.clear()
del self.buffer_record[:]
self.status = 'stopped'
@defer.inlineCallbacks
def deamon_write(self):
while self.status == 'started':
self.__thread_write()
yield sleep(2)
def __thread_write(self):
if not hasattr(self, '__thread_write_status') :
self.__thread_write_status = 'stopped'
if self.__thread_write_status == 'running' :
return
if not self.buffer_dirty :
return
bfd = self.buffer_dirty.copy()
def call_in_thread():
# Writing to disk
for idx in sorted(bfd.keys()) :
data = bfd[idx]
self.write(idx, data)
reactor.callFromThread(call_from_thread)
def call_from_thread():
self.__thread_write_status = 'stopped'
for idx, data in bfd.iteritems() :
if data is self.buffer_dirty[idx] :
del self.buffer_dirty[idx]
if self.__thread_write_status == 'stopped' :
self.__thread_write_status = 'running'
reactor.callInThread(call_in_thread)
@defer.inlineCallbacks
def deamon_read(self):
while self.status == 'started':
size = len(self.buffer)
if size > self.buffer_max_size :
remove_count = size - self.buffer_max_size
remove_count += self.buffer_max_size / 5
for idx in self.buffer_record[:remove_count] :
del self.buffer[idx]
del self.buffer_record[:remove_count]
yield sleep(10)
############################################################
def read_piece(self, index) :
if not (0 <= index < self.pieces_size) :
raise BTFileError('index is out of range')
if not self.bitfield_have[index] :
raise BTFileError('index is not downloaded')
if index in self.buffer :
data = self.buffer[index]
self.buffer_record.remove(index)
self.buffer_record.append(index)
return data
else:
for idx in [index, index+1, index+2, index+3] :
if 0 <= idx < self.pieces_size and idx not in self.buffer :
data = self.read(idx)
assert data
self.buffer[idx] = data
self.buffer_record.append(idx)
data = self.read_piece(index)
return data
def write_piece(self, index, data) :
if not (0 <= index < self.pieces_size) :
raise BTFileError('index is out of range')
if not self.bitfield_need[index] :
raise BTFileError('index is not need')
if not self.btfile.do_hash_test(index, data):
raise BTHashTestError()
else:
self.bitfield_have[index] = 1
self.bitfield_need[index] = 0
if index in self.buffer :
self.buffer[index] = data
self.buffer_dirty[index] = data
if self.bitfield_have.allOne():
logging.info('almost done!')
return True
def read(self, index):
if index in self.buffer_dirty:
return self.buffer_dirty[index]
return self.btfile[index]
def write(self, index, data) :
self.btfile[index] = data
| apache-2.0 | -8,943,494,309,047,883,000 | 29.057851 | 93 | 0.537256 | false | 3.921294 | false | false | false |
odoo-l10n-ar/l10n_ar_wsafip_fe | addons/l10n_ar_wsafip_fe/wizard/validate_invoices.py | 1 | 2508 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
class validate_invoices(osv.osv_memory):
_name = 'l10n_ar_wsafip_fe.validate_invoices'
_description = 'Generate CAE from validated invoices'
_columns = {
'journal_id': fields.many2one(
'account.journal', 'Journal', required=True),
'first_invoice_number': fields.integer(
'First invoice number', required=True),
'last_invoice_number': fields.integer(
'Last invoice number', required=True),
}
_defaults = {
'first_invoice_number': 1,
'last_invoice_number': 1,
}
def onchange_journal_id(self, cr, uid, ids, first_invoice_number,
journal_id):
journal_obj = self.pool.get('account.journal')
res = {}
if journal_id:
num_items = journal_obj.browse(cr, uid, journal_id
).sequence_id.number_next - 1
res['value'] = {
'first_invoice_number': min(first_invoice_number, num_items),
'last_invoice_number': num_items,
}
return res
def execute(self, cr, uid, ids, context=None):
context = context or {}
invoice_obj = self.pool.get('account.invoice')
for qi in self.browse(cr, uid, ids):
journal_id = qi.journal_id.id
number_format = "%s%%0%sd%s" % (
qi.journal_id.sequence_id.prefix,
qi.journal_id.sequence_id.padding,
qi.journal_id.sequence_id.suffix)
# Obtengo la lista de facturas necesitan un CAE y están validadas.
inv_ids = invoice_obj.search(cr, uid,
[('journal_id', '=', journal_id),
('state', '!=', 'draft'),
('afip_cae', '=', False),
('number', '>=', number_format %
qi.first_invoice_number),
('number', '<=', number_format %
qi.last_invoice_number)],
order='date_invoice')
invoice_obj.action_retrieve_cae(cr, uid, inv_ids)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,137,015,824,672,131,600 | 37.569231 | 78 | 0.493418 | false | 4.19933 | false | false | false |
bcgov/gwells | app/backend/wells/migrations/0104_auto_20191121_2152.py | 1 | 21615 | # Generated by Django 2.2.7 on 2019-11-21 21:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0103_auto_20191016_2137'),
]
operations = [
migrations.AlterField(
model_name='activitysubmission',
name='aquifer_lithology',
field=models.ForeignKey(blank=True, db_column='aquifer_lithology_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.AquiferLithologyCode', verbose_name='Aquifer Lithology'),
),
migrations.AlterField(
model_name='activitysubmission',
name='boundary_effect',
field=models.ForeignKey(blank=True, db_column='boundary_effect_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BoundaryEffectCode', verbose_name='Boundary Effect'),
),
migrations.AlterField(
model_name='activitysubmission',
name='filter_pack_material',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialCode', verbose_name='Filter Pack Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='filter_pack_material_size',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_size_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialSizeCode', verbose_name='Filter Pack Material Size'),
),
migrations.AlterField(
model_name='activitysubmission',
name='ground_elevation_method',
field=models.ForeignKey(blank=True, db_column='ground_elevation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.GroundElevationMethodCode', verbose_name='Elevation Determined By'),
),
migrations.AlterField(
model_name='activitysubmission',
name='intended_water_use',
field=models.ForeignKey(blank=True, db_column='intended_water_use_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.IntendedWaterUseCode', verbose_name='Intended Water Use'),
),
migrations.AlterField(
model_name='activitysubmission',
name='land_district',
field=models.ForeignKey(blank=True, db_column='land_district_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LandDistrictCode', verbose_name='Land District'),
),
migrations.AlterField(
model_name='activitysubmission',
name='liner_material',
field=models.ForeignKey(blank=True, db_column='liner_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LinerMaterialCode', verbose_name='Liner Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='owner_province_state',
field=models.ForeignKey(blank=True, db_column='province_state_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ProvinceStateCode', verbose_name='Province'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_bottom',
field=models.ForeignKey(blank=True, db_column='screen_bottom_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenBottomCode', verbose_name='Bottom'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_intake_method',
field=models.ForeignKey(blank=True, db_column='screen_intake_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenIntakeMethodCode', verbose_name='Intake'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_material',
field=models.ForeignKey(blank=True, db_column='screen_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_opening',
field=models.ForeignKey(blank=True, db_column='screen_opening_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenOpeningCode', verbose_name='Opening'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_type',
field=models.ForeignKey(blank=True, db_column='screen_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenTypeCode', verbose_name='Type'),
),
migrations.AlterField(
model_name='activitysubmission',
name='surface_seal_material',
field=models.ForeignKey(blank=True, db_column='surface_seal_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMaterialCode', verbose_name='Surface Seal Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='surface_seal_method',
field=models.ForeignKey(blank=True, db_column='surface_seal_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMethodCode', verbose_name='Surface Seal Installation Method'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.Well'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_activity_type',
field=models.ForeignKey(db_column='well_activity_code', on_delete=django.db.models.deletion.PROTECT, to='submissions.WellActivityCode', verbose_name='Type of Work'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_class',
field=models.ForeignKey(blank=True, db_column='well_class_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellClassCode', verbose_name='Well Class'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_publication_status',
field=models.ForeignKey(db_column='well_publication_status_code', default='Published', on_delete=django.db.models.deletion.PROTECT, to='wells.WellPublicationStatusCode', verbose_name='Well Publication Status'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_status',
field=models.ForeignKey(blank=True, db_column='well_status_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellStatusCode', verbose_name='Well Status'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_subclass',
field=models.ForeignKey(blank=True, db_column='well_subclass_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellSubclassCode', verbose_name='Well Subclass'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='activitysubmission',
name='yield_estimation_method',
field=models.ForeignKey(blank=True, db_column='yield_estimation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.YieldEstimationMethodCode', verbose_name='Estimation Method'),
),
migrations.AlterField(
model_name='casing',
name='casing_code',
field=models.ForeignKey(db_column='casing_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.CasingCode', verbose_name='Casing Type Code'),
),
migrations.AlterField(
model_name='casing',
name='casing_material',
field=models.ForeignKey(blank=True, db_column='casing_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.CasingMaterialCode', verbose_name='Casing Material Code'),
),
migrations.AlterField(
model_name='casing',
name='drive_shoe_status',
field=models.ForeignKey(blank=True, db_column='drive_shoe_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DriveShoeCode', verbose_name='Drive Shoe Code'),
),
migrations.AlterField(
model_name='lithologydescription',
name='bedrock_material',
field=models.ForeignKey(blank=True, db_column='bedrock_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.BedrockMaterialCode', verbose_name='Bedrock Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='bedrock_material_descriptor',
field=models.ForeignKey(blank=True, db_column='bedrock_material_descriptor_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.BedrockMaterialDescriptorCode', verbose_name='Descriptor'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_colour',
field=models.ForeignKey(blank=True, db_column='lithology_colour_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyColourCode', verbose_name='Colour'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_description',
field=models.ForeignKey(blank=True, db_column='lithology_description_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyDescriptionCode', verbose_name='Description'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_hardness',
field=models.ForeignKey(blank=True, db_column='lithology_hardness_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyHardnessCode', verbose_name='Hardness'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_material',
field=models.ForeignKey(blank=True, db_column='lithology_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_moisture',
field=models.ForeignKey(blank=True, db_column='lithology_moisture_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyMoistureCode', verbose_name='Moisture'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_structure',
field=models.ForeignKey(blank=True, db_column='lithology_structure_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyStructureCode', verbose_name='Bedding'),
),
migrations.AlterField(
model_name='lithologydescription',
name='secondary_surficial_material',
field=models.ForeignKey(blank=True, db_column='secondary_surficial_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='secondary_surficial_material_set', to='gwells.SurficialMaterialCode', verbose_name='Secondary Surficial Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='surficial_material',
field=models.ForeignKey(blank=True, db_column='surficial_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='surficial_material_set', to='gwells.SurficialMaterialCode', verbose_name='Surficial Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='water_bearing_estimated_flow_units',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode', verbose_name='Units'),
),
migrations.AlterField(
model_name='screen',
name='assembly_type',
field=models.ForeignKey(blank=True, db_column='screen_assembly_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenAssemblyTypeCode'),
),
migrations.AlterField(
model_name='well',
name='aquifer_lithology',
field=models.ForeignKey(blank=True, db_column='aquifer_lithology_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.AquiferLithologyCode', verbose_name='Aquifer Lithology'),
),
migrations.AlterField(
model_name='well',
name='boundary_effect',
field=models.ForeignKey(blank=True, db_column='boundary_effect_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BoundaryEffectCode', verbose_name='Boundary Effect'),
),
migrations.AlterField(
model_name='well',
name='drilling_company',
field=models.ForeignKey(blank=True, db_column='drilling_company_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DrillingCompany', verbose_name='Drilling Company'),
),
migrations.AlterField(
model_name='well',
name='filter_pack_material',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialCode', verbose_name='Filter Pack Material'),
),
migrations.AlterField(
model_name='well',
name='filter_pack_material_size',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_size_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialSizeCode', verbose_name='Filter Pack Material Size'),
),
migrations.AlterField(
model_name='well',
name='ground_elevation_method',
field=models.ForeignKey(blank=True, db_column='ground_elevation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.GroundElevationMethodCode', verbose_name='Elevation Determined By'),
),
migrations.AlterField(
model_name='well',
name='intended_water_use',
field=models.ForeignKey(blank=True, db_column='intended_water_use_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.IntendedWaterUseCode', verbose_name='Intended Water Use'),
),
migrations.AlterField(
model_name='well',
name='land_district',
field=models.ForeignKey(blank=True, db_column='land_district_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LandDistrictCode', verbose_name='Land District'),
),
migrations.AlterField(
model_name='well',
name='liner_material',
field=models.ForeignKey(blank=True, db_column='liner_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LinerMaterialCode', verbose_name='Liner Material'),
),
migrations.AlterField(
model_name='well',
name='owner_province_state',
field=models.ForeignKey(blank=True, db_column='province_state_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ProvinceStateCode', verbose_name='Province'),
),
migrations.AlterField(
model_name='well',
name='screen_bottom',
field=models.ForeignKey(blank=True, db_column='screen_bottom_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenBottomCode', verbose_name='Bottom'),
),
migrations.AlterField(
model_name='well',
name='screen_intake_method',
field=models.ForeignKey(blank=True, db_column='screen_intake_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenIntakeMethodCode', verbose_name='Intake Method'),
),
migrations.AlterField(
model_name='well',
name='screen_material',
field=models.ForeignKey(blank=True, db_column='screen_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='well',
name='screen_opening',
field=models.ForeignKey(blank=True, db_column='screen_opening_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenOpeningCode', verbose_name='Opening'),
),
migrations.AlterField(
model_name='well',
name='screen_type',
field=models.ForeignKey(blank=True, db_column='screen_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenTypeCode', verbose_name='Type'),
),
migrations.AlterField(
model_name='well',
name='surface_seal_material',
field=models.ForeignKey(blank=True, db_column='surface_seal_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMaterialCode', verbose_name='Surface Seal Material'),
),
migrations.AlterField(
model_name='well',
name='surface_seal_method',
field=models.ForeignKey(blank=True, db_column='surface_seal_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMethodCode', verbose_name='Surface Seal Installation Method'),
),
migrations.AlterField(
model_name='well',
name='well_class',
field=models.ForeignKey(db_column='well_class_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellClassCode', verbose_name='Well Class'),
),
migrations.AlterField(
model_name='well',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
migrations.AlterField(
model_name='well',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AlterField(
model_name='well',
name='well_publication_status',
field=models.ForeignKey(db_column='well_publication_status_code', default='Published', on_delete=django.db.models.deletion.PROTECT, to='wells.WellPublicationStatusCode', verbose_name='Well Publication Status'),
),
migrations.AlterField(
model_name='well',
name='well_status',
field=models.ForeignKey(blank=True, db_column='well_status_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellStatusCode', verbose_name='Well Status'),
),
migrations.AlterField(
model_name='well',
name='well_subclass',
field=models.ForeignKey(blank=True, db_column='well_subclass_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellSubclassCode', verbose_name='Well Subclass'),
),
migrations.AlterField(
model_name='well',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='well',
name='yield_estimation_method',
field=models.ForeignKey(blank=True, db_column='yield_estimation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.YieldEstimationMethodCode', verbose_name='Estimation Method'),
),
]
| apache-2.0 | 4,704,822,000,817,943,000 | 61.834302 | 280 | 0.661161 | false | 3.898106 | false | false | false |
zippynk/ripoffbot | tools/printDatabase.py | 1 | 2552 | #!/usr/bin/env python
# Tool for viewing ripoffbot databases.
# Created by Nathan Krantz-Fire (a.k.a zippynk).
# Ships with ripoffbot - http://github.com/zippynk/ripoffbot
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import pickle
from datetime import datetime
thisVersion = [0,4,0] # The version of ripoffbot, as a list of numbers (eg [0,1,0] means "v0.1.0"). A "d" at the end means that the current version is a development version and very well may break at some point.
# Begin dev edition code.
if "d" in thisVersion:
print "WARNING! This is a development version of ripoffbot. Proceeding may corrupt ripoffbot database files, crash, and/or have other consequences. Proceed at your own risk."
if not raw_input("Are you sure you want to proceed? (y/n) ").lower() in ["yes","y","true","continue","yea","yeah","yup","sure"]:
print "Aborting."
exit(0)
# End Dev Edition Code.
if os.path.isfile(os.path.expanduser("~") +'/.ripoffbot_database.p'):
dbLoad = pickle.load(open(os.path.expanduser("~") +'/.ripoffbot_database.p','rb'))
if dbLoad['version'] == [0,2,0]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,3,0]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,3,1]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,4,0]:
messages = dbLoad['messages']
else:
print "This database was created with an old or unknown version of ripoffbot. Please use the newest version (or correct fork) and try again. If this is not possible or does not work, move or delete the file '~/.ripoffbot_database.p' and re-run ripoffbot. A new database will be created automatically. You may also want to try running recoverDeveloperVersion.py to recover a script marked with a developer version tag."
exit(0)
else:
print "No database found."
exit(0)
def saveDb(): # not needed for current functionality, but keeping just in case
if USEDB == True:
pickle.dump({'messages':messages,'version':thisVersion}, open(os.path.expanduser("~") +'/.ripoffbot_database.p','wb'))
print "Created with ripoffbot version: " +str(dbLoad['version'])
for i in messages:
print "{0} -> {1} - {2} ({3}, {4}): {5}".format(i[0],i[1],str(i[5]),"Sent publically" if i[4] else "Sent privately","To be delivered privately" if i[3] else "To be delivered publically",i[2])
| mpl-2.0 | 4,806,929,526,086,902,000 | 51.081633 | 426 | 0.680251 | false | 3.271795 | false | false | false |
khangnguyen/LeapController | LeapController.py | 1 | 6250 | ################################################################################
# Author: Khang Nguyen #
# Written: September 2013 #
################################################################################
import Leap, sys
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
from appscript import *
from osax import *
class ItunesListener(Leap.Listener):
def on_init(self, controller):
print "Initialized"
self.itunes = app('itunes')
self.osax = OSAX()
self.swipes = {}
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
if not frame.hands.is_empty:
# Gestures
for gesture in frame.gestures():
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
# Determine clock direction using the angle between the pointable and the circle normal
if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/4:
clockwiseness = "clockwise"
else:
clockwiseness = "counterclockwise"
# Calculate the angle swept since the last frame
swept_angle = 0
if circle.state != Leap.Gesture.STATE_START:
previous_update = CircleGesture(controller.frame(1).gesture(circle.id))
swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI
print "Circle id: %d, %s, progress: %f, radius: %f, angle: %f degrees, %s" % (
gesture.id, self.state_string(gesture.state),
circle.progress, circle.radius, swept_angle * Leap.RAD_TO_DEG, clockwiseness)
volumeSettings = self.osax.get_volume_settings()
currentVolume = volumeSettings[k.output_volume]
# Max vlue volumeSettings returns is 100
# But max value set_volume takes is 7
currentVolume = currentVolume * 7.0 / 100
if clockwiseness == 'clockwise':
self.osax.set_volume(currentVolume + 0.1)
else:
self.osax.set_volume(currentVolume - 0.1)
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
print "Swipe id: %d, state: %s, position: %s" % (
gesture.id, self.state_string(gesture.state), swipe.position)
if not self.swipes.get(gesture.id):
self.swipes[gesture.id] = {}
gestures = self.swipes.get(gesture.id)
if self.state_string(gesture.state) == "STATE_START":
gestures['STATE_START'] = gesture
if self.state_string(gesture.state) == "STATE_STOP":
gestures['STATE_STOP'] = gesture
if gestures.get('STATE_START') and gestures.get('STATE_STOP'):
startGesture = SwipeGesture(gestures['STATE_START'])
stopGesture = SwipeGesture(gestures['STATE_STOP'])
if startGesture.position[0] - stopGesture.position[0] > 70:
self.itunes.next_track()
elif startGesture.position[0] - stopGesture.position[0] < -70:
self.itunes.previous_track()
print "START x", startGesture.position[0]
print "STOP x", stopGesture.position[0]
if gesture.type == Leap.Gesture.TYPE_KEY_TAP:
keytap = KeyTapGesture(gesture)
print "Key Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
keytap.position, keytap.direction )
if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
screentap = ScreenTapGesture(gesture)
print "Screen Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
screentap.position, screentap.direction )
playerState = self.itunes.player_state()
if playerState == k.playing:
self.itunes.pause()
if playerState == k.paused:
self.itunes.play()
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create an itunes listener and controller
listener = ItunesListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
sys.stdin.readline()
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
| mit | -6,920,043,446,760,458,000 | 43.326241 | 107 | 0.51616 | false | 4.147313 | false | false | false |
danielhkl/matplotlib2tikz | test/test_image_plot.py | 1 | 1702 | # -*- coding: utf-8 -*-
#
import helpers
import pytest
# the picture 'lena.png' with origin='lower' is flipped upside-down.
# So it has to be upside-down in the pdf-file as well.
# test for monochrome picture
def plot1():
from matplotlib import rcParams
import matplotlib.pyplot as plt
from PIL import Image
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
lena = Image.open(os.path.join(this_dir, 'lena.png'))
lena = lena.convert('L')
dpi = rcParams['figure.dpi']
figsize = lena.size[0]/dpi, lena.size[1]/dpi
fig = plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
plt.imshow(lena, cmap='viridis', origin='lower')
# Set the current color map to HSV.
plt.hsv()
plt.colorbar()
return fig
# test for rgb picture
def plot2():
from matplotlib import rcParams
import matplotlib.pyplot as plt
from PIL import Image
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
lena = Image.open(os.path.join(this_dir, 'lena.png'))
dpi = rcParams['figure.dpi']
figsize = lena.size[0] / dpi, lena.size[1] / dpi
fig = plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
plt.imshow(lena, cmap='viridis', origin='lower')
# Set the current color map to HSV.
plt.hsv()
plt.colorbar()
return fig
@pytest.mark.parametrize(
'plot, reference_phash', [
(plot1, '455361ec211d72fb'),
(plot2, '7558d3b30f634b06'),
]
)
def test(plot, reference_phash):
phash = helpers.Phash(plot())
assert phash.phash == reference_phash, phash.get_details()
return
| mit | -2,738,905,011,422,670,000 | 26.451613 | 68 | 0.640423 | false | 3.077758 | false | false | false |
thurairaj92/pythonProjects | Treemap.py/node.py | 1 | 4358 | class Tree(object):
'''A tree that stores information of a directory, each node in the tree
contains the name and size of a file in the directory.'''
class Node(object):
'''A node in tree stores the size and name of a file.'''
def __init__(self, k):
'''(Node, tuple) -> Nonetype
Create a Node with key k, k is in the form of (filename, filesize).
and _range None, _range is the range that one file occupied
in the pygame screen.'''
self.key = k
self._range = None
def total(self):
'''Node -> int
Return the size of a file in the directory stored in Node.'''
return self.key[1]
def getting_range(self, x, y):
'''(Node, int, int) -> tuple
Getting the range that a file occupied in the pygame window,
using a helper function.'''
return _getting_range(self, x, y)
def __init__(self):
'''Tree -> NoneType
Create a Tree with root None, child as an empty list
and _range as None.'''
self.root = None
self.child = []
self._range = None
def __str__(self):
'''Tree -> str
Return the string representation of the root of a tree.'''
return self.root.key
def insert_directory(self, k):
'''(Tree, tuple) -> Nonetype
Insert a new directory at the end of Tree.
Tuple k is in the form of (directory, size)'''
if self.root:
new_tree = Tree()
new_tree.insert_directory(k)
self.child.append(new_tree)
else:
self.root = Tree.Node(k)
def insert_files(self, k):
'''(Tree, tuple) -> Nonetype
Insert a new file to a directory Tree.
Tuple k is in the form of (filename, size)'''
self.child.append(Tree.Node(k))
def search_tree(self, d):
'''(Tree, unicode) -> object
Search if the directory d is in the tree by a helper function.'''
return _search_tree(self, d)
def total(self):
'''Tree -> Nonetype
Return the total size of a directory Tree by a helper function.'''
return _total(self)
def getting_range(self, x, y):
'''(Tree, int, int) -> onject
Return the range of a Tree.'''
return _getting_range(self, x, y)
def _total(tree):
'''Tree -> tuple
Return the total size of a directory stored in Tree.
tuple is in the form of (x coordinate, y coordinate).'''
if tree.child:
_sum = tree.root.key[1]
for child in tree.child:
if type(child) == Tree:
_sum += child.total()
else:
_sum += child.total()
else:
return tree.root.key[1]
return _sum
def _getting_range(tree, x, y):
'''(Object, int, int) -> object
Return the file name and file size that (x, y) indicates in
pygame window.'''
if type(tree) == Tree:
if tree.child and tree._range:
if x in tree._range[0] and y in tree._range[1]:
for child in tree.child:
filename = _getting_range(child, x, y)
if filename:
return filename
return tree.root.key
else:
return None
elif tree._range and x in tree._range[0] and y in tree._range[1]:
return tree.root.key[0]
elif type(tree) == Tree.Node:
if tree._range and x in tree._range[0] and y in tree._range[1]:
return tree.key
return None
def _search_tree(tree, name):
'''(Tree, unicode) -> object
If name is in the tree, return the subtree start from where name is
located in the tree. Return True or False if name is leaf or not in
the tree.'''
if type(tree) == Tree:
if tree.root.key[0] == name:
return tree
else:
for child in tree.child:
contain_tree = _search_tree(child, name)
if type(contain_tree) == Tree:
return contain_tree
elif contain_tree == True:
return tree
return None
else:
if tree.key[0] == name:
return True
else:
return False
| mit | -6,524,453,870,722,641,000 | 27.860927 | 79 | 0.532354 | false | 4.150476 | false | false | false |
johnwyles/PySpeak | pyspeak/commandline.py | 1 | 3124 | """
Commandline execute function
Example (already included in 'pyspeak' executable in this project):
#!/usr/bin/evn python
from pyspeak.commandline import execute
execute()
"""
import argparse
import logging
from pyspeak import __version__
from listener import Listener
from speaker import Speaker
def execute():
"""
Execute method called from the commandline executeable
"""
# Parse command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help='Subcommand for pyspeak to either listen for microphone input (speech-to-text) or text input (text-to-speech)')
# Setup Listener argument parser subcommand
parser_listener = subparsers.add_parser('listen', help='Listen for microphone input (speech-to-text)')
parser_listener.add_argument('-f', '--filename-prefix', default='pyspeak_file', help='Default prefix location and filename for the temporary file %(prog) uses to store data. This stores a .wav and .flac file of this prefix (e.g. "./pyspeak_file" => [./pyspeak_file.wav, ./pyspeak_file.flac])')
parser_listener.add_argument('-s', '--silence-time', type=int, default=2, help='Amount of silence time (in seconds) to listen for before dispatching voice data for recognition')
parser_listener.add_argument('-t', '--threshold', type=int, default=80, help='Threshold for detecting speech input; depending on your microphone settings you may need to experiment a bit with this value')
parser_listener.set_defaults(func=_listener)
# Setup Speaker argument parser subcommand
parser_speaker = subparsers.add_parser('speak', help='Listen for text input (text-to-speech')
parser_speaker.set_defaults(func=_speaker)
parser.add_argument('-l', '--loglevel', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Log level for console output')
parser.add_argument('-v', '--version', help='Get the version number and exit', action='store_true')
arguments = parser.parse_args()
# Print the version number and exit
if arguments.version:
print __name__ + ": " + __version__
exit(0)
# Setup logging
try:
number_level = getattr(logging, arguments.loglevel.upper(), None)
if not isinstance(number_level, int):
raise ValueError('Invalid log level: %s' % number_level)
except:
number_level
logging.basicConfig(format='[%(asctime)s] [%(name)s] [%(levelname)s]: %(message)s', level=number_level, nargs='?')
# Callback to argument parser subcommands
arguments.func(arguments)
def _listener(arguments):
"""
Listener subcommand callback
"""
logging.info('Starting Listener')
listener = Listener(silence_time=arguments.silence_time, threshold=arguments.threshold, filename_prefix=arguments.filename_prefix)
listener.get_utterance()
def _speaker(arguments):
"""
Speaker subcommand callback
"""
logging.info('Starting Speaker')
speaker = Speaker()
speaker.get_google_translation('en', 'Hello World!')
| gpl-3.0 | -7,192,280,384,282,254,000 | 39.571429 | 298 | 0.702945 | false | 4.015424 | false | false | false |
Captricity/captricity-api-quickstarts | hackathon/fppc_data_munging_example.py | 1 | 6683 | import csv
import re
import time
from captools.api import Client
from cStringIO import StringIO
from pprint import PrettyPrinter
from collections import Counter
CAP_API_TOKEN = 'YOUR CAPTRICITY API TOKEN'
pp = PrettyPrinter(indent=4)
def create_fancy_csv_from_job(job_id, name):
# Initialize Captricity Python Client (installation instructions in README
# at https://github.com/Captricity/captools)
start = time.time()
client = Client(api_token=CAP_API_TOKEN)
# Read all the Instance Sets associated with this job
isets = client.read_instance_sets(job_id)
# For each Instance Set, we will pull down all the Shreds and record the
# transcribed value and generate a link to the Shred image.
all_iset_data = []
fields = {}
fields['0'] = 'Form Name'
fields['0.5'] = 'Form Name Image Link'
for iset in isets:
shreds = client.read_instance_set_shreds(iset['id'])
iset_data = {}
iset_data['0'] = iset['name']
for shred in shreds:
if '0.5' not in iset_data:
iset_data['0.5'] = 'https://shreddr.captricity.com/api/v1/instance/%s/aligned-image' % shred['instance_id']
# Key on Field id because Field name can be duplicated
field_id = shred['field']['id']
iset_data[str(field_id)] = shred['best_estimate'].encode('utf-8') if shred['best_estimate'] else None
iset_data[str(field_id + 0.5)] = 'https://shreddr.captricity.com/api/v1/shred/%s/image' % shred['id']
# We'll order export by Field ID, links will be (field_id + 0.5) so they will be next to the Field in CSV
fields[str(field_id)] = shred['field']['name']
fields[str(field_id + 0.5)] = shred['field']['name'] + ' Image Link'
all_iset_data.append(iset_data)
if len(all_iset_data) % 5 == 0:
print 'Done with %s Instance Sets from Job %s in %s sec, %s to go' % (len(all_iset_data), job_id, time.time() - start, len(isets) - len(all_iset_data))
# Export all the data as CSV
data_out = [fields] + all_iset_data
header = sorted(fields.keys())
if job_id in [3968, 4606]:
# No depivot for cover page or addenda
buffer = open('%s.csv' % name, 'w')
else:
buffer = StringIO()
csv_writer = csv.DictWriter(buffer, header, restval=u'--no data--')
csv_writer.writerows(data_out)
if job_id in [3968, 4606]:
buffer.close()
else:
buffer.seek(0)
depivot_data(csv.reader(buffer), '%s.csv' % name)
def depivot_data(reader, outfile_name):
"""
This takes in a csv and 'depivots' the data. This is useful when a single
row of the data actually includes multiple rows. This depivots the data
using the heuristic that when something can be depivoted, the field names
(column headers) are the same for each depivoted row (so that we have a 'name'
column for each depivoted row in the raw row).
"""
headers = reader.next()
header_counts = Counter()
# First count all the headers, to find candidates for depivoting
for header in headers:
header_counts[header] += 1
# Seperate out the singletons from the candidates for depivoting
singleton_headers = [k for k,v in header_counts.items() if v == 1]
# Figure out the column indices of each depivoted row group
singleton_column_index = {} # The column indices of the singletons
repeated_column_sets = [] # The column indices of each row group
leftmost = None
for i, header in enumerate(headers):
# Seperately track the singleton column indices
if header in singleton_headers:
singleton_column_index[header] = i
else:
# First, we want to find the left most column.
# This will be used to determine when we need to
# add another row group
if not leftmost:
leftmost = header
if leftmost == header:
repeated_column_sets.append({})
# Figure out the most likely row group this header belongs to
for x in repeated_column_sets:
if header not in x:
x[header] = i
break
# At this point we know how many row groups exist, and which headers
# correspond to each row group. We will use this information to construct
# the depivoted csv
# First, construct the new headers. This consists of all the singletons,
# and all the headers in one of the repeated column sets, with the
# repeated column headers coming before the singletons. Note that
# we will sort each header based on the original ordering.
new_headers = []
# Add the "depivoted" row headers
if len(repeated_column_sets) > 0:
tmp = repeated_column_sets[0]
tmp_new_headers = tmp.keys()
tmp_new_headers.sort(key=lambda x: tmp[x])
for t in tmp_new_headers:
new_headers.append(t)
# Add the singletons
new_singleton_headers = singleton_column_index.keys()
new_singleton_headers.sort(key=lambda x: singleton_column_index[x])
for h in new_singleton_headers[1:]:
new_headers.append(h)
# Keep the first column the same, since that includes the name of the row
new_headers.insert(0, new_singleton_headers[0])
# Construct the depivoted row
depivoted_csv_out = csv.DictWriter(open(outfile_name, 'w'), new_headers)
depivoted_csv_out.writeheader()
for row in reader:
# For each row, we want to extract the depivoted rows (if there are any that
# need depivoting). We will simply repeat the singletons in each depivoted row.
if len(repeated_column_sets) == 0:
depivoted_row = {k: row[v] for k,v in singleton_column_index.items()}
depivoted_csv_out.writerow(depivoted_row)
else:
for column_set in repeated_column_sets:
depivoted_row = {k: row[v] for k,v in singleton_column_index.items()}
depivoted_row.update({k : row[v] for k,v in column_set.items()})
depivoted_csv_out.writerow(depivoted_row)
if __name__ == '__main__':
for job_id, name in [
(3968, 'Cover Page'),
(3975, 'A-1 Form 700--Investment Disclosures'),
(3977, 'A-2 Form 700--Business Entity Ownership'),
(3978, 'B Form 700--Real Property Listings'),
(4036, 'C Form 700--Income Reporting'),
(3980, 'D Form 700--Gift Disclosures'),
(3981, 'E Form 700--Travel Payments'),
(4607, 'FPPC Judges Addenda')
]:
create_fancy_csv_from_job(job_id, name)
| mit | 4,663,777,163,566,287,000 | 43.258278 | 163 | 0.625617 | false | 3.626153 | false | false | false |
nedbat/zellij | zellij/cmd.py | 1 | 6188 | """Command-line interface for Zellij."""
import math
import pprint
import click
from zellij.color import random_color, parse_color
from zellij.debug import debug_world, debug_click_options, should_debug
from zellij.design import get_design
from zellij.drawing import Drawing
from zellij.path import combine_paths, draw_paths, clip_paths, perturb_paths
from zellij.path_tiler import PathTiler
from zellij.strap import strapify
def size_type(s):
"""For specifying the size: either WxH, or W (square)"""
if 'x' in s:
width, height = s.split('x')
else:
width = height = s
return int(width.strip()), int(height.strip())
_common_options = {
'common':[
*debug_click_options,
],
'drawing': [
click.option('--output', help='File name to write to'),
click.option('--tiles', type=float, default=3, help='How many tiles to fit in the drawing'),
click.option('--size', type=size_type, default='800', help='Size of the output'),
click.option('--rotate', type=float, default=0, help='Angle to rotate the drawing'),
click.option('--background', type=parse_color, help='The color of the background'),
click.option('--format', help='The output format, png or svg'),
click.option('--perturb', type=float, default=0, help='A random amount to jostle points'),
click.argument('design'),
],
}
def common_options(category):
"""Provide a set of common options to a click command."""
def _wrapped(func):
# from: https://github.com/pallets/click/issues/108#issuecomment-194465429
for option in reversed(_common_options[category]):
func = option(func)
return func
return _wrapped
def start_drawing(opt, **drawing_args):
"""Make a Drawing based on the options passed."""
width, height = opt['size']
bg = opt['background']
def_bg = drawing_args.pop('bg', (1, 1, 1))
if bg is None:
bg = def_bg
name = opt['output']
def_name = drawing_args.pop('name', 'drawing')
format = opt['format']
dwg = Drawing(width, height, name=name or def_name, format=format, bg=bg, **drawing_args)
dwg.translate(width/2, height/2)
dwg.rotate(opt['rotate'])
dwg.translate(-width/2, -height/2)
return dwg
@click.group()
def clickmain():
"""Make Islamic-inspired geometric art."""
pass
@clickmain.command()
@common_options('common')
@common_options('drawing')
@click.option("--strap-width", type=float, default=6, help='Width of the straps, in tile-percent')
def straps(**opt):
"""Draw with over-under straps"""
dwg = start_drawing(opt, name="straps", bg=(.8, .8, .8))
tilew = int(dwg.width/opt['tiles'])
if opt['strap_width'] > 0:
strap_kwargs = dict(width=tilew * opt['strap_width'] / 100, random_factor=0)
else:
strap_kwargs = dict(width=tilew / 60, random_factor=4.9)
tiler = PathTiler(dwg)
design_class = get_design(opt['design'])
draw = design_class(tilew)
draw.draw(tiler)
paths_all = combine_paths(tiler.paths)
paths = clip_paths(paths_all, dwg.perimeter().bounds())
if opt['perturb']:
paths = perturb_paths(paths, opt['perturb'])
if should_debug('world'):
debug_world(dwg, paths_styles=[
(paths_all, dict(width=1, rgb=(.75, .75, .75))),
(paths, dict(width=1.5, rgb=(1, 0, 0))),
])
straps = strapify(paths, **strap_kwargs)
with dwg.style(rgb=(1, 1, 1)):
for strap in straps:
strap.sides[0].draw(dwg)
strap.sides[1].draw(dwg, append=True, reverse=True)
dwg.close_path()
dwg.fill()
with dwg.style(rgb=(0, 0, 0), width=2):
for strap in straps:
for side in strap.sides:
side.draw(dwg)
dwg.stroke()
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def candystripe(**opt):
"""Draw with crazy colors and a white stripe"""
dwg = start_drawing(opt, name="candy")
tilew = int(dwg.width/opt['tiles'])
tiler = PathTiler(dwg)
design_class = get_design(opt['design'])
draw = design_class(tilew)
draw.draw(tiler)
paths = combine_paths(tiler.paths)
LINE_WIDTH = tilew/4
dwg.multi_stroke(paths, [
#(LINE_WIDTH, (0, 0, 0)),
(LINE_WIDTH-2, random_color),
#(7, (0, 0, 0)),
(5, (1, 1, 1)),
])
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def diagram(**opt):
"""Draw the underlying structure of a design"""
width, height = opt['size']
tilew = int(width/opt['tiles'])
dwg = Drawing(width, height, name="diagram")
design_class = get_design(opt['design'])
draw = design_class(tilew)
# The full pattern.
tiler = PathTiler(dwg)
draw.draw(tiler)
with dwg.style(rgb=(.5, .5, .5)):
draw_paths(tiler.paths, dwg)
dwg.stroke()
# The symmetry.
tiler = PathTiler(dwg)
tiler.tile_p6m(draw.draw_tiler_unit, tilew)
with dwg.style(rgb=(1, .75, .75), width=1, dash=[5, 5]):
draw_paths(tiler.paths, dwg)
dwg.stroke()
def single_tiler():
"""Make a PathTiler right for drawing just one unit."""
tiler = PathTiler(dwg)
# TODO: make this work for other symmetries
tiler.pc.translate(2 * tilew * math.sqrt(3) / 2, tilew)
tiler.pc.reflect_xy(0, 0)
return tiler
# The tiler unit.
tiler = single_tiler()
draw.draw_tiler_unit(tiler.pc)
with dwg.style(rgb=(1, 0, 0), width=3):
draw_paths(tiler.paths, dwg)
dwg.stroke()
# The design.
tiler = single_tiler()
draw.draw_tile(tiler.pc)
with dwg.style(rgb=(0, 0, 0), width=6):
draw_paths(tiler.paths, dwg)
dwg.stroke()
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def show_opts(**opt):
"""Dump the provided options"""
pprint.pprint(opt)
def main():
"""The main Zellij entry point."""
try:
clickmain()
except:
#print("Whoops!")
raise
if __name__ == '__main__':
main()
| apache-2.0 | 2,686,110,141,197,884,400 | 27.385321 | 100 | 0.60488 | false | 3.191336 | false | false | false |
skywind3000/vim | lib/escope.py | 1 | 33279 | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
#======================================================================
#
# escope.py -
#
# Created by skywind on 2016/11/02
# Last change: 2016/11/02 18:12:09
#
#======================================================================
import sys
import time
import os
import json
import hashlib
import datetime
if sys.version_info[0] >= 3:
raise "Must be using Python 2"
#----------------------------------------------------------------------
# execute and capture
#----------------------------------------------------------------------
def execute(args, shell = False, capture = False):
import sys, os
parameters = []
if type(args) in (type(''), type(u'')):
import shlex
cmd = args
if sys.platform[:3] == 'win':
ucs = False
if type(cmd) == type(u''):
cmd = cmd.encode('utf-8')
ucs = True
args = shlex.split(cmd.replace('\\', '\x00'))
args = [ n.replace('\x00', '\\') for n in args ]
if ucs:
args = [ n.decode('utf-8') for n in args ]
else:
args = shlex.split(cmd)
for n in args:
if sys.platform[:3] != 'win':
replace = { ' ':'\\ ', '\\':'\\\\', '\"':'\\\"', '\t':'\\t',
'\n':'\\n', '\r':'\\r' }
text = ''.join([ replace.get(ch, ch) for ch in n ])
parameters.append(text)
else:
if (' ' in n) or ('\t' in n) or ('"' in n):
parameters.append('"%s"'%(n.replace('"', ' ')))
else:
parameters.append(n)
cmd = ' '.join(parameters)
if sys.platform[:3] == 'win' and len(cmd) > 255:
shell = False
if shell and (not capture):
os.system(cmd)
return ''
elif (not shell) and (not capture):
import subprocess
if 'call' in subprocess.__dict__:
subprocess.call(args)
return ''
import subprocess
if 'Popen' in subprocess.__dict__:
if sys.platform[:3] != 'win' and shell:
p = None
stdin, stdouterr = os.popen4(cmd)
else:
p = subprocess.Popen(args, shell = shell,
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
stdin, stdouterr = (p.stdin, p.stdout)
else:
p = None
stdin, stdouterr = os.popen4(cmd)
text = stdouterr.read()
stdin.close()
stdouterr.close()
if p: p.wait()
if not capture:
sys.stdout.write(text)
sys.stdout.flush()
return ''
return text
#----------------------------------------------------------------------
# redirect process output to reader(what, text)
#----------------------------------------------------------------------
def redirect(args, reader, combine = True):
import subprocess
if 'Popen' in subprocess.__dict__:
p = subprocess.Popen(args, shell = False,
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = combine and subprocess.STDOUT or subprocess.PIPE)
stdin, stdout, stderr = p.stdin, p.stdout, p.stderr
if combine: stderr = None
else:
p = None
if combine == False:
stdin, stdout, stderr = os.popen3(cmd)
else:
stdin, stdout = os.popen4(cmd)
stderr = None
stdin.close()
while 1:
text = stdout.readline()
if text == '':
break
reader('stdout', text)
while stderr != None:
text = stderr.readline()
if text == '':
break
reader('stderr', text)
stdout.close()
if stderr: stderr.close()
retcode = None
if p:
retcode = p.wait()
return retcode
#----------------------------------------------------------------------
# configure
#----------------------------------------------------------------------
class configure (object):
def __init__ (self, ininame = None):
self.ininame = ininame
self.unix = (sys.platform[:3] != 'win') and 1 or 0
self.config = {}
self.rc = None
self._search_config()
self._search_cscope()
self._search_gtags()
self._search_pycscope()
self._search_rc()
rc = self.option('default', 'rc', None)
if rc and os.path.exists(rc):
self.rc = self.abspath(rc)
self.config['default']['rc'] = rc
self.has_cscope = (self.option('default', 'cscope') != None)
self.has_gtags = (self.option('default', 'gtags') != None)
self.has_pycscope = (self.option('default', 'pycscope') != None)
self.exename = {}
if self.has_cscope:
cscope = self.option('default', 'cscope')
if self.unix:
self.exename['cscope'] = os.path.join(cscope, 'cscope')
else:
self.exename['cscope'] = os.path.join(cscope, 'cscope.exe')
if self.has_gtags:
gtags = self.option('default', 'gtags')
if self.unix:
f = lambda n: os.path.join(gtags, n)
else:
g = lambda n: os.path.join(gtags, n + '.exe')
f = lambda n: os.path.abspath(g(n))
self.exename['gtags'] = f('gtags')
self.exename['global'] = f('global')
self.exename['gtags-cscope'] = f('gtags-cscope')
if self.has_pycscope:
pycscope = self.option('default', 'pycscope')
if self.unix:
pycscope = os.path.join(pycscope, 'pycscope')
else:
pycscope = os.path.join(pycscope, 'pycscope.exe')
self.exename['pycscope'] = pycscope
self.GetShortPathName = None
self.database = None
# search escope config
def _search_config (self):
self.config = {}
self.config['default'] = {}
if self.ininame and os.path.exists(self.ininame):
self._read_ini(self.ininame)
return 0
fullname = os.path.abspath(__file__)
testname = os.path.splitext(fullname)[0] + '.ini'
if os.path.exists(testname):
self._read_ini(testname)
self.ininame = testname
if self.unix:
self._read_ini('/etc/escope.ini')
self._read_ini('/usr/local/etc/escope.ini')
self._read_ini(os.path.expanduser('~/.config/escope.ini'))
return 0
def _read_ini (self, filename):
import ConfigParser
if not os.path.exists(filename):
return -1
fp = open(filename, 'r')
cp = ConfigParser.ConfigParser(fp)
for sect in cp.sections():
if not sect in self.config:
self.config[sect] = {}
for key, value in cp.items(sect):
self.config[sect.lower()][key.lower()] = value
fp.close()
return 0
# read option
def option (self, sect, item, default = None):
if not sect in self.config:
return default
return self.config[sect].get(item, default)
# search cscope
def _search_cscope (self):
def _test_cscope(path):
if not os.path.exists(path):
return False
if sys.platform[:3] != 'win':
if not os.path.exists(os.path.join(path, 'cscope')):
return False
else:
if not os.path.exists(os.path.join(path, 'cscope.exe')):
return False
return True
cscope = self.option('default', 'cscope')
if cscope:
if _test_cscope(cscope):
self.config['default']['cscope'] = os.path.abspath(cscope)
return 0
self.config['default']['cscope'] = None
cscope = os.path.abspath(os.path.dirname(__file__))
if _test_cscope(cscope):
self.config['default']['cscope'] = cscope
return 0
PATH = os.environ.get('PATH', '').split(self.unix and ':' or ';')
for path in PATH:
if _test_cscope(path):
self.config['default']['cscope'] = os.path.abspath(path)
return 0
return -1
# search gtags executables
def _search_gtags (self):
def _test_gtags(path):
if not os.path.exists(path):
return False
if sys.platform[:3] != 'win':
if not os.path.exists(os.path.join(path, 'gtags')):
return False
if not os.path.exists(os.path.join(path, 'global')):
return False
if not os.path.exists(os.path.join(path, 'gtags-cscope')):
return False
else:
if not os.path.exists(os.path.join(path, 'gtags.exe')):
return False
if not os.path.exists(os.path.join(path, 'global.exe')):
return False
if not os.path.exists(os.path.join(path, 'gtags-cscope.exe')):
return False
return True
gtags = self.option('default', 'gtags')
if gtags:
if _test_gtags(gtags):
self.config['default']['gtags'] = os.path.abspath(gtags)
return 0
self.config['default']['gtags'] = None
gtags = os.path.abspath(os.path.dirname(__file__))
if _test_gtags(gtags):
self.config['default']['gtags'] = gtags
return 0
PATH = os.environ.get('PATH', '').split(self.unix and ':' or ';')
for path in PATH:
if _test_gtags(path):
self.config['default']['gtags'] = os.path.abspath(path)
return 0
return -1
# search pycscope
def _search_pycscope (self):
def _test_pycscope(path):
if not os.path.exists(path):
return False
if sys.platform[:3] != 'win':
if not os.path.exists(os.path.join(path, 'pycscope')):
return False
else:
if not os.path.exists(os.path.join(path, 'pycscope.exe')):
return False
return True
pycscope = self.option('default', 'pycscope')
if pycscope:
if _test_pycscope(pycscope):
pycscope = os.path.abspath(pycscope)
self.config['default']['pycscope'] = pycscope
return 0
self.config['default']['pycscope'] = None
pycscope = os.path.abspath(os.path.dirname(__file__))
if _test_pycscope(pycscope):
self.config['default']['pycscope'] = pycscope
return 0
PATH = os.environ.get('PATH', '').split(self.unix and ':' or ';')
for path in PATH:
if _test_pycscope(path):
self.config['default']['pycscope'] = os.path.abspath(path)
return 0
return -1
# abspath
def abspath (self, path, resolve = False):
if path == None:
return None
if '~' in path:
path = os.path.expanduser(path)
path = os.path.abspath(path)
if not self.unix:
return path.lower().replace('\\', '/')
if resolve:
return os.path.abspath(os.path.realpath(path))
return path
# search gtags rc
def _search_rc (self):
rc = self.option('default', 'rc', None)
if rc != None:
rc = self.abspath(rc)
if os.path.exists(rc):
self.config['default']['rc'] = rc
return 0
self.config['default']['rc'] = None
rc = self.abspath('~/.globalrc')
if os.path.exists(rc):
self.config['default']['rc'] = rc
return 0
if self.unix:
rc = '/etc/gtags.conf'
if os.path.exists(rc):
self.config['default']['rc'] = rc
return 0
rc = '/usr/local/etc/gtags.conf'
if os.path.exists(rc):
self.config['default']['rc'] = rc
return 0
gtags = self.option('default', 'gtags')
if gtags == None:
return -1
rc = os.path.join(gtags, '../share/gtags/gtags.conf')
rc = self.abspath(rc)
if os.path.exists(rc):
self.config['default']['rc'] = rc
return -1
# short name in windows
def pathshort (self, path):
path = os.path.abspath(path)
if self.unix:
return path
if not self.GetShortPathName:
self.kernel32 = None
self.textdata = None
try:
import ctypes
self.kernel32 = ctypes.windll.LoadLibrary("kernel32.dll")
self.textdata = ctypes.create_string_buffer('\000' * 1024)
self.GetShortPathName = self.kernel32.GetShortPathNameA
args = [ ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int ]
self.GetShortPathName.argtypes = args
self.GetShortPathName.restype = ctypes.c_uint32
except: pass
if not self.GetShortPathName:
return path
retval = self.GetShortPathName(path, self.textdata, 1024)
shortpath = self.textdata.value
if retval <= 0:
return ''
return shortpath
# recursion make directory
def mkdir (self, path):
path = os.path.abspath(path)
if os.path.exists(path):
return 0
name = ''
part = os.path.abspath(path).replace('\\', '/').split('/')
if self.unix:
name = '/'
if (not self.unix) and (path[1:2] == ':'):
part[0] += '/'
for n in part:
name = os.path.abspath(os.path.join(name, n))
if not os.path.exists(name):
os.mkdir(name)
return 0
# execute a gnu global executable
def execute (self, name, args, capture = False, printcmd = False):
if name in self.exename:
name = self.exename[name]
name = self.pathshort(name)
#printcmd = True
if printcmd:
print [name] + args
if not capture in (0, 1, True, False, None):
return redirect([name] + args, capture)
return execute([name] + args, False, capture)
# initialize environment
def init (self):
if self.rc and os.path.exists(self.rc):
os.environ['GTAGSCONF'] = os.path.abspath(self.rc)
os.environ['GTAGSFORCECPP'] = '1'
PATH = os.environ.get('PATH', '')
gtags = self.option('default', 'gtags')
if self.unix:
if gtags: PATH = gtags + ':' + PATH
else:
if gtags: PATH = gtags + ';' + PATH
os.environ['PATH'] = PATH
database = self.option('default', 'database', None)
if database:
database = self.abspath(database, True)
elif 'ESCOPE' in os.environ:
escope = os.environ['ESCOPE']
if not escope.lower() in (None, '', '/', '\\', 'c:/', 'c:\\'):
database = self.abspath(escope)
if database == None:
database = self.abspath('~/.local/var/escope', True)
if not os.path.exists(database):
self.mkdir(database)
if not os.path.exists(database):
raise Exception('Cannot create database folder: %s'%database)
self.database = database
return 0
# get project db path
def pathdb (self, root):
if (self.database == None) or (root == None):
return None
root = root.strip()
root = self.abspath(root)
hash = hashlib.md5(root).hexdigest().lower()
hash = hash[:16]
path = os.path.abspath(os.path.join(self.database, hash))
return (self.unix) and path or path.replace('\\', '/')
# load project desc
def load (self, root):
db = self.pathdb(root)
if db == None:
return None
cfg = os.path.join(db, 'config.json')
if not os.path.exists(cfg):
return None
fp = open(cfg, 'r')
content = fp.read()
fp.close()
try:
obj = json.loads(content)
except:
return None
if type(obj) != type({}):
return None
return obj
# save project desc
def save (self, root, obj):
db = self.pathdb(root)
if db == None or type(obj) != type({}):
return -1
cfg = os.path.join(db, 'config.json')
text = json.dumps(obj, indent = 4)
fp = open(cfg, 'w')
fp.write(text)
fp.close()
return 0
def timestamp (self):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def strptime (self, text):
return datetime.datetime.strptime(text, "%Y-%m-%d %H:%M:%S")
def get_size (self, path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
# list all projects in database
def list (self, garbage = None):
roots = []
if garbage == None:
garbage = []
if self.database == None:
return None
if not os.path.exists(self.database):
return None
for name in os.listdir(self.database):
name = name.strip()
if len(name) != 16:
garbage.append(name)
continue
path = os.path.join(self.database, name)
if not os.path.isdir(path):
garbage.append(name)
continue
desc = None
cfg = os.path.join(path, 'config.json')
if os.path.exists(cfg):
try:
fp = open(cfg, 'r')
text = fp.read()
fp.close()
desc = json.loads(text)
except:
desc = None
if type(desc) != type({}):
desc = None
root = (desc != None) and desc.get('root', '') or ''
if desc == None or root == '':
garbage.append(name)
continue
if desc.get('db', '') == '':
garbage.append(name)
continue
roots.append((name, root, desc))
return roots
# select and initialize a project
def select (self, root):
if root == None:
return None
root = root.strip()
root = self.abspath(root)
db = self.pathdb(root)
self.mkdir(db)
os.environ['GTAGSROOT'] = os.path.abspath(root)
os.environ['GTAGSDBPATH'] = os.path.abspath(db)
desc = self.load(root)
if desc:
if not 'root' in desc:
desc = None
elif not 'db' in desc:
desc = None
if desc == None:
desc = {}
desc['root'] = root
desc['db'] = db
desc['ctime'] = self.timestamp()
desc['mtime'] = self.timestamp()
desc['version'] = 0
desc['size'] = 0
self.save(root, desc)
return desc
# clear invalid files in the database path
def clear (self):
if self.database == None:
return -1
if not os.path.exists(self.database):
return -2
if self.database == '/':
return -3
database = os.path.abspath(self.database)
if len(self.database) == 3 and self.unix == 0:
if self.database[1] == ':':
return -4
garbage = []
self.list(garbage)
import shutil
for name in garbage:
path = os.path.join(self.database, name)
if not os.path.exists(path):
continue
if os.path.isdir(path):
shutil.rmtree(path, True)
else:
try: os.remove(path)
except: pass
return 0
#----------------------------------------------------------------------
# escope - gtags wrapper
#----------------------------------------------------------------------
class escope (object):
def __init__ (self, ininame = None):
self.config = configure(ininame)
self.desc = None
self.root = None
self.db = None
self.cscope_names = ['.c', '.h', '.cpp', '.cc', '.hpp', '.hh']
self.cscope_names += ['.go', '.java', '.js', '.m', '.mm']
self.ignores = ('CVS', '.git', '.svn', '.hg', '.bzr')
def init (self):
if self.config.database != None:
return 0
self.config.init()
return 0
def select (self, root):
self.desc = None
self.root = None
desc = self.config.select(root)
if desc == None:
return -1
self.desc = desc
self.root = self.config.abspath(root)
self.db = self.desc['db']
return 0
def abort (self, message, code = 1):
sys.stderr.write(message + '\n')
sys.stderr.flush()
sys.exit(1)
return -1
def check_cscope (self):
if not self.config.has_cscope:
self.abort('cscope executable cannot be found in $PATH')
return False
return True
def check_gtags (self):
if not self.config.has_gtags:
msg = 'GNU Global (gtags) executables cannot be found in $PATH'
self.abort(msg)
return False
return True
def check_pycscope (self):
if not self.config.has_pycscope:
self.abort('pycscope executable cannot be found in $PATH')
return False
return True
def find_files (self, path, extnames = None):
result = []
if extnames:
if not self.config.unix:
extnames = [ n.lower() for n in extnames ]
extnames = tuple(extnames)
for root, dirs, files in os.walk(path):
for ignore in self.ignores:
if ignore in dirs:
dirs.remove(ignore)
for name in files:
if extnames:
ext = os.path.splitext(name)[-1]
if not self.config.unix:
ext = ext.lower()
if not ext in extnames:
continue
result.append(os.path.abspath(os.path.join(root, name)))
return result
def find_list (self, path, filelist):
result = []
lines = []
if filelist == '-':
for line in sys.stdin:
lines.append(line.rstrip('\r\n\t '))
else:
for line in open(filelist):
lines.append(line.rstrip('\r\n\t '))
for line in lines:
if not line:
continue
line = os.path.join(path, line)
result.append(os.path.abspath(line))
return result
def cscope_generate (self, include = None, kernel = False, filelist = None, verbose = 0):
if not self.check_cscope():
return -1
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -2
if not filelist:
names = self.find_files(self.root, self.cscope_names)
else:
names = self.find_list(self.root, filelist)
listname = os.path.join(self.db, 'cscope.txt')
outname = os.path.join(self.db, 'cscope.out')
if verbose:
for fn in names:
print fn
sys.stdout.flush()
fp = open(listname, 'w')
for line in names:
fp.write(line + '\n')
fp.close()
args = ['-b']
if kernel:
args += ['-k']
if include:
for inc in include:
args += ['-I', os.path.join(self.root, inc)]
if self.config.unix:
args += ['-q']
args += ['-i', 'cscope.txt']
savecwd = os.getcwd()
os.chdir(self.db)
self.config.execute('cscope', args)
os.chdir(savecwd)
self.desc['mtime'] = self.config.timestamp()
self.desc['version'] = self.desc['version'] + 1
self.config.save(self.desc['root'], self.desc)
return 0
def gtags_generate (self, label = None, update = False, filelist = None, verbose = False):
if not self.check_gtags():
return -1
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -2
args = ['--skip-unreadable']
if label:
args += ['--gtagslabel', label]
if verbose:
args += ['-v']
if update:
if not type(update) in (type(''), type(u'')):
args += ['-i']
else:
args += ['--single-update', update]
if filelist:
names = self.find_list(self.root, filelist)
listname = os.path.join(self.root, 'gtags.txt')
fp = open(listname, 'w')
for name in names:
fp.write(name + '\n')
fp.close()
args += ['-f', listname]
db = self.desc['db']
args += [db]
cwd = os.getcwd()
os.chdir(self.root)
self.config.execute('gtags', args)
os.chdir(cwd)
self.desc['mtime'] = self.config.timestamp()
self.desc['version'] = self.desc['version'] + 1
self.config.save(self.desc['root'], self.desc)
return 0
def pycscope_generate (self, filelist = None, verbose = False):
if not self.check_pycscope():
return -1
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -2
if not filelist:
names = self.find_files(self.root, ['.py', '.pyw'])
else:
names = self.find_list(self.root, filelist)
listname = os.path.join(self.db, 'pycscope.txt')
outname = os.path.join(self.db, 'pycscope.out')
if verbose:
for fn in names:
print fn
sys.stdout.flush()
fp = open(listname, 'w')
for name in names:
fp.write(name + '\n')
fp.close()
args = ['-i', 'pycscope.txt', '-f', 'pycscope.out']
savecwd = os.getcwd()
os.chdir(self.db)
self.config.execute('pycscope', args)
os.chdir(savecwd)
self.desc['mtime'] = self.config.timestamp()
self.desc['version'] = self.desc['version'] + 1
self.config.save(self.desc['root'], self.desc)
return 0
def cscope_translate (self, where, text):
text = text.rstrip('\r\n')
if text == '':
return -1
p1 = text.find(' ')
if p1 < 0:
return -2
p2 = text.find(' ', p1 + 1)
if p2 < 0:
return -3
p3 = text.find(' ', p2 + 1)
if p3 < 0:
return -4
cname = text[:p1]
csymbol = text[p1 + 1:p2]
cline = text[p2 + 1:p3]
ctext = text[p3 + 1:]
output = '%s:%s: <<%s>> %s'%(cname, cline, csymbol, ctext)
sys.stdout.write(output + '\n')
sys.stdout.flush()
return 0
def cscope_find (self, mode, name):
if not self.check_cscope():
return -1
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -2
args = ['-dl', '-L', '-f', 'cscope.out', '-' + str(mode), name]
savecwd = os.getcwd()
os.chdir(self.db)
self.config.execute('cscope', args, self.cscope_translate)
os.chdir(savecwd)
return 0
def pycscope_find (self, mode, name):
if not self.check_cscope():
return -1
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -2
args = ['-dl', '-L', '-f', 'pycscope.out', '-' + str(mode), name]
savecwd = os.getcwd()
os.chdir(self.db)
self.config.execute('cscope', args, self.cscope_translate)
os.chdir(savecwd)
return 0
def gtags_find (self, mode, name):
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -1
args = ['-a', '--result', 'grep']
if mode in (0, '0', 's', 'symbol'):
self.config.execute('global', args + ['-d', '-e', name])
self.config.execute('global', args + ['-r', '-e', name])
self.config.execute('global', args + ['-s', '-e', name])
elif mode in (1, '1', 'g', 'definition'):
self.config.execute('global', args + ['-d', '-e', name])
elif mode in (3, '3', 'c', 'reference'):
self.config.execute('global', args + ['-r', '-e', name])
elif mode in (4, '4', 't', 'string', 'text'):
self.config.execute('global', args + ['-gGo', '-e', name])
elif mode in (6, '6', 'e', 'grep', 'egrep'):
self.config.execute('global', args + ['-gEo', '-e', name])
elif mode in (7, '7', 'f', 'file'):
self.config.execute('global', args + ['-P', '-e', name])
else:
sys.stderr.write('unsupported')
sys.stderr.flush()
return 0
def generate (self, backend, parameter, update = False, filelist = None, verbose = False):
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -1
if backend in ('cscope', 'cs'):
kernel = True
if parameter.lower() in ('1', 'true', 'sys', 'system'):
kernel = False
return self.cscope_generate(None, kernel, filelist, verbose)
elif backend in ('pycscope', 'py'):
return self.pycscope_generate(filelist, verbose)
elif backend in ('gtags', 'global', 'gnu'):
return self.gtags_generate(parameter, update, filelist, verbose)
else:
self.abort('unknow backend: %s'%backend)
return 0
def find (self, backend, mode, name):
if (self.desc == None) or (self.root == None):
self.abort('Project has not been selected')
return -1
backend = backend.split('/')
engine = backend[0]
parameter = len(backend) >= 2 and backend[1] or ''
if engine in ('cscope', 'cs'):
return self.cscope_find(mode, name)
elif engine in ('pycscope', 'py'):
return self.pycscope_find(mode, name)
elif engine in ('gtags', 'global', 'gnu'):
return self.gtags_find(mode, name)
else:
self.abort('unknow backend: %s'%backend)
return 0
def list (self):
if self.config.database == None:
self.abort('Initialzing is required')
return -1
print 'Database:', self.config.database
print ''
print 'Hash'.ljust(16), 'Size(KB)'.rjust(11), ' Modified'.ljust(12), ' Root'
def add_commas(instr):
rng = reversed(range(1, len(instr) + (len(instr) - 1)//3 + 1))
out = [',' if j%4 == 0 else instr[-(j - j//4)] for j in rng]
return ''.join(out)
for name, root, desc in self.config.list():
db = os.path.join(self.config.database, name)
size = (self.config.get_size(db) + 1023) / 1024
size = add_commas(str(size))
print name, size.rjust(11), '', desc['mtime'][:10], ' ', desc['root']
print ''
return 0
def clean (self, days):
if self.config.database == None:
self.abort('Initialzing is required')
return -1
self.config.clear()
import datetime, time, shutil
d0 = datetime.datetime.fromtimestamp(time.time())
for name, root, desc in self.config.list():
mtime = desc['mtime']
path = os.path.join(self.config.database, name)
d1 = self.config.strptime(mtime)
dd = d0 - d1
if dd.days >= days and os.path.exists(path):
sys.stdout.write('%s ... '%name)
sys.stdout.flush()
shutil.rmtree(path)
sys.stdout.write('(removed)\n')
sys.stdout.flush()
return 0
#----------------------------------------------------------------------
# errmsg
#----------------------------------------------------------------------
def errmsg(message, abort = False):
sys.stderr.write('error: ' + message + '\n')
sys.stderr.flush()
if abort:
sys.exit(2)
return 0
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
def main(argv = None):
argv = (argv == None) and sys.argv or argv
argv = [ n for n in argv ]
if len(argv) <= 1:
errmsg('no operation specified (use -h for help)', True)
return -1
operator = argv[1]
program = os.path.split(argv[0])[-1]
if operator in ('-h' , '--help'):
print 'usage %s <operation> [...]'%program
print 'operations:'
head = ' %s '%program
print head + '{-h --help}'
print head + '{-V --version}'
print head + '{-B --build} [-k backend] [-r root] [-l label] [-u] [-i] [-v] [-s]'
print head + '{-F --find} [-k backend] [-r root] -num pattern'
print head + '{-C --clean} [-d days]'
print head + '{-L --list}'
print ''
head = ' '
print '-k backend Choose backend, which can be one of: cscope, gtags or pycscope.'
print '-r root Root path of source files, use current directory by default.'
print '-i filelist Give a list of candidates of target files, - for stdin.'
print '-s System mode - use /usr/include for #include files (cscope).'
print '-l label Label of gtags which can be : native, ctags, pygments ... etc.'
print '-u Update database only (gtags backend is required).'
print '-num pattern Go to cscope input field num (counting from 0) and find pattern.'
print '-d days Clean databases modified before given days (default is 30).'
print '-v Build the cross reference database in verbose mode.'
if 0:
print '-0 pattern Find this C symbol'
print '-1 pattern Find this definition'
print '-2 pattern Find functions called by this function (cscope/pycscope)'
print '-3 pattern Find functions calling this function'
print '-4 pattern Find this text string'
print '-6 pattern Find this egrep pattern'
print '-7 pattern Find this file'
print '-8 pattern Find files #including this file'
print '-9 pattern Find places where this symbol is assigned a value'
print ''
return 0
if operator in ('-V', '--version'):
print 'escope: version 1.0.1'
return 0
if not operator in ('-B', '--build', '-F', '--find', '-L', '--list', '-C', '--clean'):
errmsg('unknow operation: ' + operator, True)
return -1
es = escope()
es.init()
if operator in ('-L', '--list'):
es.list()
return 0
options = {}
index = 2
while index < len(argv):
opt = argv[index]
if opt in ('-k', '-r', '-l', '-d', '-i'):
if index + 1 >= len(argv):
errmsg('not enough parameter for option: ' + opt, True)
return -2
options[opt] = argv[index + 1]
index += 2
elif opt >= '-0' and opt <= '-9' and len(opt) == 2:
if index + 1 >= len(argv):
errmsg('require pattern for field: ' + opt, True)
return -2
options['num'] = int(opt[1:])
options['name'] = argv[index + 1]
index += 2
elif opt in ('-s', '-u', '-v'):
options[opt] = True
index += 1
else:
errmsg('unknow option: ' + opt, True)
return -2
if not '-k' in options:
errmsg('require backend name, use one of cscope, gtags, pycscope after -k', True)
return -3
backend = options['-k']
if not backend in ('cscope', 'gtags', 'pycscope'):
errmsg('bad backend name, use one of cscope, gtags, pycscope after -k', True)
return -3
root = options.get('-r', os.getcwd())
if not os.path.exists(root):
errmsg('path does not exist: ' + root, True)
return -3
es.select(root)
if operator in ('-B', '--build'):
label = options.get('-l', '')
if backend != 'gtags' and label != '':
errmsg('label can only be used with gtags backend', True)
return -5
label = (label == '') and 'native' or label
system = options.get('-s') and True or False
update = options.get('-u') and True or False
verbose = options.get('-v') and True or False
filelist = options.get('-i', None)
if backend != 'cscope' and system != False:
errmsg('system mode can only be used with cscope backend', True)
return -5
if backend != 'gtags' and update != False:
errmsg('update mode can only be used with gtags backend', True)
return -5
parameter = ''
if backend == 'cscope' and system:
parameter = 'system'
elif backend == 'gtags':
parameter = label
if verbose:
sys.stdout.write('Buiding %s database for: %s\n'%(backend, root))
sys.stdout.flush()
if filelist:
if filelist != '-' and (not os.path.exists(filelist)):
errmsg('cannot read file list: ' + filelist, True)
return -5
es.generate(backend, parameter, update, filelist, verbose)
return 0
if operator in ('-F', '--find'):
if not 'num' in options:
errmsg('-num pattern required', True)
return -6
num = options['num']
if num in (2, 5, 8, 9) and backend == 'gtags':
errmsg('gtags does not support -%d pattern'%num, True)
return -6
name = options['name']
es.find(backend, num, name)
return 0
if operator in ('-C', '--clean'):
count = 30
es.clean(count)
return 0
return 0
#----------------------------------------------------------------------
# testing case
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
config = configure()
config.init()
print config.select('e:/lab/casuald/src/')
print ''
sys.stdout.flush()
for hash, root, desc in config.list():
print hash, root, desc['ctime']
config.clear()
#os.system('cmd /c start cmd')
return 0
def test2():
sc = escope()
sc.init()
sc.select('e:/lab/casuald/src/')
sc.gtags_generate(label = 'pygments', update = True, verbose = False)
sys.stdout.flush()
sc.find('gtags', 0, 'itm_send')
return 0
def test3():
os.environ['ESCOPE'] = 'd:/temp/escope'
sc = escope()
os.chdir('e:/lab/casuald/src')
sc.init()
sc.select('e:/lab/casuald/src')
sc.cscope_generate()
sc.pycscope_generate()
sc.cscope_find(3, 'itm_send')
sc.pycscope_find(0, 'vimtool')
def test4():
main([__file__, '-h'])
#main([__file__, '--version'])
#main([__file__, '--clean'])
return 0
def test5():
main([__file__, '-B', '-k', 'cscope', '-r', 'e:/lab/casuald'])
main([__file__, '-F', '-k', 'cscope', '-r', 'e:/lab/casuald', '-2', 'itm_sendudp'])
def test6():
main([__file__, '-B', '-k', 'pycscope', '-r', 'e:/lab/casuald'])
main([__file__, '-F', '-k', 'pycscope', '-r', 'e:/lab/casuald', '-2', 'plog'])
def test7():
main([__file__, '-B', '-k', 'gtags', '-r', 'e:/lab/casuald', '-l', 'pygments', '-v', '-u'])
main([__file__, '-F', '-k', 'gtags', '-r', 'e:/lab/casuald', '-1', 'plog'])
#test4()
main()
| mit | -667,623,758,801,562,200 | 28.013949 | 93 | 0.597644 | false | 2.928201 | true | false | false |
adaptive-learning/matmat-web | matmat/management/commands/export2csv.py | 1 | 8381 | from collections import defaultdict
from contextlib import closing
import csv
import zipfile
import sys
from django.db import connection
import os
from django.core.management import BaseCommand, CommandError
import re
from proso_models.models import ItemRelation, Answer, AnswerMeta
from proso_tasks.models import Task, Context, TaskInstance, TaskAnswer, Skill
from matmat import settings
import pandas as pd
import json
class Command(BaseCommand):
args = 'table name'
help = "Export data"
BATCH_SIZE = 5 * 10**5
MODELS_TO_EXPORT = [Task, Context, TaskInstance, Skill, Answer, TaskAnswer, ItemRelation, AnswerMeta]
def __init__(self):
super(Command, self).__init__()
self.tables_to_export = []
for model in self.MODELS_TO_EXPORT:
self.tables_to_export.append(model._meta.db_table)
def handle(self, *args, **options):
if len(args) > 0 and len(args) != 1:
raise CommandError('''
The command requires exactly one arguments:
- table name
or no argument.
''')
if len(args) > 0:
table_name = args[0]
self.handle_one_table(table_name)
else:
self.handle_all_tables()
def handle_all_tables(self):
if not os.path.exists(os.path.join(settings.MEDIA_ROOT, "raw")):
os.makedirs(os.path.join(settings.MEDIA_ROOT, "raw"))
for table_name in self.tables_to_export:
self.handle_one_table(table_name)
prepare_data(input_dir=os.path.join(settings.MEDIA_ROOT, "raw"), output_dir=settings.MEDIA_ROOT)
filename_zip = os.path.join(settings.MEDIA_ROOT, "matmat_export_raw.zip")
files = [os.path.join(settings.MEDIA_ROOT, "raw", f + '.csv') for f in self.tables_to_export]
zip_files(filename_zip, files)
filename_zip = os.path.join(settings.MEDIA_ROOT, "matmat_export.zip")
files = [os.path.join(settings.MEDIA_ROOT, f + '.csv') for f in ["answers", "items", "skills"]]
zip_files(filename_zip, files)
def handle_one_table(self, table_name):
if table_name not in self.tables_to_export:
raise CommandError('table "%s" is not supported' % table_name)
count = 0
with closing(connection.cursor()) as cursor:
cursor.execute('SELECT COUNT(*) FROM ' + table_name)
count, = cursor.fetchone()
print('processing %s' % table_name, ',', count, 'items')
sql = 'SELECT * FROM ' + table_name
filename_csv = settings.MEDIA_ROOT + '/raw/' + table_name + '.csv'
for offset in range(0, count, self.BATCH_SIZE):
with closing(connection.cursor()) as cursor:
cursor.execute(sql + ' LIMIT ' + str(self.BATCH_SIZE) + ' OFFSET ' + str(offset))
self.dump_cursor(
cursor,
filename_csv,
append=(offset > 0))
def dump_cursor(self, cursor, filename, append=False):
headers = [re.sub(r'_id$', '', col[0]) for col in cursor.description]
with open(filename, 'a' if append else 'w', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
if not append:
writer.writerow(headers)
for row in cursor:
writer.writerow(row)
def zip_files(filename_zip, files):
if os.path.exists(filename_zip):
os.remove(filename_zip)
zf = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in files:
zf.write(filename, os.path.basename(filename))
# os.remove(filename)
zf.close()
def get_skill_parents(skills, relations):
map = {}
for id, skill in skills.iterrows():
map[id] = int(skill['parent']) if not pd.isnull(skill['parent']) else None
return map
def get_skill_parent_lists(skills, relations):
map = get_skill_parents(skills, relations)
lists = defaultdict(lambda: [])
for skill in map:
s = skill
while s:
lists[skill].append(s)
s = map[s]
return lists
def parse_question(item, data):
if item["visualization"] == "pairing":
return ""
question = data["question"] if "question" in data else data["text"]
if type(question) is list and len(question) == 3 and type(question[0]) is str:
question = question[0]
if type(question) is list and len(question) == 3 and type(question[0]) is int:
question = "".join(map(str, question))
if type(question) is list and len(question) == 1:
question = question[0]
question = str(question).replace("×", "x").replace("÷", "/").replace(" ", "")
return question
def prepare_data(input_dir="data/source", output_dir="data"):
csv.field_size_limit(sys.maxsize)
answers = pd.read_csv(os.path.join(input_dir, "proso_models_answer.csv"), engine='python', index_col=0)
answers = answers.join(pd.read_csv(os.path.join(input_dir, "proso_tasks_taskanswer.csv"), engine='python', index_col=0))
answers = answers.join(pd.read_csv(os.path.join(input_dir, "proso_models_answermeta.csv"), engine='python', index_col=0), on='metainfo')
tasks = pd.read_csv(os.path.join(input_dir, "proso_tasks_task.csv"), index_col=0)
task_instances = pd.read_csv(os.path.join(input_dir, "proso_tasks_taskinstance.csv"), index_col=0)
contexts = pd.read_csv(os.path.join(input_dir, "proso_tasks_context.csv"), index_col=0)
skills = pd.read_csv(os.path.join(input_dir, "proso_tasks_skill.csv"), index_col=0)
relations = pd.read_csv(os.path.join(input_dir, "proso_models_itemrelation.csv"), index_col=0)
skills = skills.join(relations.set_index('child').parent, on='item')
for id, skill in skills.iterrows():
skill_id = skills.loc[skills['item'] == skill['parent']].index[0] if not pd.isnull(skill["parent"]) else None
skills.loc[id, 'parent'] = skill_id
skill_parents = get_skill_parent_lists(skills, relations)
items = task_instances.join(tasks, on='task', rsuffix='_task')
items = items.join(contexts, on='context', rsuffix='_context')
items["answer"] = 0
items["question"] = ""
items["skill_lvl_1"], items["skill_lvl_2"], items["skill_lvl_3"] = None, None, None
for id, item in items.iterrows():
data = json.loads(item["content"])
items.loc[id, "content"] = item["content"].replace('"', "'")
items.loc[id, "answer"] = int(data["answer"]) if item["identifier_context"] != "pairing" else None
items.loc[id, "question"] = item['identifier_task']
skill_item = relations.loc[relations['child'] == item['item_task'], 'parent'].data[0]
skill = skills.loc[skills['item'] == skill_item].index.tolist()[0]
items.loc[id, "skill"] = skill
for i, skill in enumerate(skill_parents[skill][::-1][1:]):
items.loc[id, "skill_lvl_{}".format(i + 1)] = skill
items["skill"] = items["skill"].astype(int)
items.rename(inplace=True, columns={"identifier_context": "visualization", 'content': 'data'})
answers['correct'] = 1 * (answers['item_asked'] == answers['item_answered'])
answers = answers.join(pd.Series(data=items.index, index=items['item'], name='item_id'), on='item')
answers = answers.join(items[["answer"]], on="item_id", rsuffix="_expected")
del answers['item']
answers.rename(inplace=True, columns={"user": "student", 'content': 'log', 'item_id': 'item'})
answers = answers[["time", "item", "student", "session", "response_time", "correct", "answer", "answer_expected", "log"]]
answers['random'] = 1 * answers['log'].str.contains('random_without_options')
answers = answers.round({"response_time": 3})
skills.rename(inplace=True, columns={"note": "name_cz",})
skills = skills[['identifier', "name", "parent"]]
contexts.rename(inplace=True, columns={"note": "name_cz",})
answers.to_csv(os.path.join(output_dir, "answers.csv"), float_format="%.0f", encoding='utf-8')
items = items[["question", "answer", "visualization", "skill", "skill_lvl_1", "skill_lvl_2", "skill_lvl_3", "data"]]
items.to_csv(os.path.join(output_dir, "items.csv"), encoding='utf-8')
# contexts.to_csv(os.path.join(output_dir, "visualizations.csv"))
skills.to_csv(os.path.join(output_dir, "skills.csv"), float_format="%.0f", encoding='utf-8')
| mit | -7,513,176,340,553,469,000 | 43.110526 | 140 | 0.624031 | false | 3.419421 | false | false | false |
joajfreitas/bookmarks | marcador/marcador_lib.py | 1 | 6761 | import os
import sqlite3
from subprocess import call
class Database:
def __init__(self, filename):
self.filename = filename
self.conn = self.open_database(self.filename)
self.cursor = self.conn.cursor()
def open_db(self, filename):
return sqlite3.connect(filename)
def set_default_db(self, filename):
conn = self.open_db(filename)
c = conn.cursor()
c.execute(
"""CREATE TABLE bookmarks (
identifier INTEGER PRIMARY KEY,
url TEXT,
description TEXT,
count INTEGER,
thumbnail TEXT,
score REAL)
"""
)
c.execute(
"""CREATE TABLE tags (
identifier INTEGER PRIMARY KEY,
tag TEXT)
"""
)
c.execute(
"""CREATE TABLE bookmarks_tags (
bookmark REFERENCES bookmarks(identifier),
tag REFERENCES tags(identifier))
"""
)
conn.commit()
return conn
def open_database(self, filename):
if not os.path.isfile(filename):
return self.set_default_db(filename)
return self.open_db(filename)
def get_bookmarks(self, sorted=False):
self.cursor.execute(
"""select identifier, url, description, thumbnail, count from bookmarks""" + (" order by score DESC" if sorted else "")
)
bookmarks = self.cursor.fetchall()
for id, url, desc, thumbnail, count in bookmarks:
tags = self.get_bookmark_tags(id)
tags = [tag for tag, id in tags]
yield id, url, thumbnail, tags
def open_bookmark(self, id):
self.cursor.execute(f"select url, count from bookmarks where identifier='{id}'")
url, count = self.cursor.fetchone()
self.hit_url(url)
import webbrowser
webbrowser.open(url)
def add_bookmark(self, url, tags):
self.cursor.execute(f'insert into bookmarks (url,count,score) values ("{url}",0,1)')
book_id = self.cursor.lastrowid
for tag in tags:
self.cursor.execute(f'insert into tags (tag) values ("{tag}")')
tag_id = self.cursor.lastrowid
self.cursor.execute(
f"insert into bookmarks_tags (bookmark, tag) values ({book_id}, {tag_id})"
)
self.conn.commit()
def rm_bookmark(self, id):
self.cursor.execute(
f"delete from bookmarks_tags as bt where bt.bookmark = {id}"
)
self.cursor.execute(f"delete from bookmarks where identifier = {id}")
self.conn.commit()
def get_url(self, id):
if id == 0:
return None
self.cursor.execute(f"select url from bookmarks where identifier={id}")
url = self.cursor.fetchone()
return url
def get_bookmark(self, id):
self.cursor.execute(
f"""select identifier, url, description, thumbnail, count
from bookmarks where identifier={id}"""
)
id, url, desc, thumbnail, count = self.cursor.fetchone()
return id, url, desc, thumbnail, count
def set_bookmark(self, id, url, tags):
self.cursor.execute(f"update bookmarks set url='{url}' where identifier={id}")
tag_set = self.bookmark_tag_list()
_tags = [tag for tag in tags if tag not in tag_set]
for tag in _tags:
self.cursor.execute(f"insert into tags (tag) values ('{tag}')")
self.cursor.execute(f"delete from bookmarks_tags as bt where bt.bookmark={id}")
for tag in tags:
tag_id = self.get_tag_id(tag)
self.cursor.execute(
f"insert into bookmarks_tags as bt values ({id},{tag_id})"
)
self.conn.commit()
def set_thumbnail(self, id, thumbnail):
self.cursor.execute(
f"update bookmarks set thumbnail='{thumbnail}' where identifier={id}"
)
self.conn.commit()
def edit_bookmark(self, id):
id, url, desc, thumbnail, count = self.get_bookmark(id)
tags = self.get_bookmark_tags(id)
tmp_file = "/tmp/bookmarks.tmp"
with open(tmp_file, "w") as tmp:
tmp.write(url + "\n")
for tag, tag_id in tags:
tmp.write(tag + "\n")
term = os.path.expandvars("$TERM")
editor = os.path.expandvars("$EDITOR")
call([term, "-e", editor, tmp_file])
with open(tmp_file, "r") as tmp:
lines = tmp.readlines()
lines = [l.strip("\n") for l in lines if l != ""]
url = lines[0]
tags = [tag for tag in lines[1:]]
self.set_bookmark(id, url, tags)
def get_bookmark_tags(self, id):
self.cursor.execute(
f"""select tags.tag, tags.identifier from
bookmarks_tags as bt, tags where bt.bookmark={id} and bt.tag = tags.identifier"""
)
return list(self.cursor.fetchall())
def bookmark_tag_search(self, tag):
self.cursor.execute(f"select identifier from tags where tag='{tag}'")
r = self.cursor.fetchone()
if r == None:
return []
id = r[0]
self.cursor.execute(
f"select bt.bookmark from bookmarks_tags as bt where bt.tag = {id}"
)
bookmarks = self.cursor.fetchall()
for _book in bookmarks:
book = _book[0]
self.cursor.execute(
f"""select identifier, url, description, count
from bookmarks where identifier = {book}""")
id, url, desc, count = self.cursor.fetchone()
yield id, url, desc, count
def bookmark_tag_list(self):
self.cursor.execute("select tag from tags")
tags = self.cursor.fetchall()
for tag in tags:
yield tag[0]
def get_tag_id(self, tag):
self.cursor.execute(f"select identifier from tags where tag='{tag}'")
r = self.cursor.fetchone()
return None if r == None else r[0]
def hit_url(self, url):
self.cursor.execute(f"select identifier, count, score from bookmarks where url='{url}'")
id, count, score = self.cursor.fetchone()
count = int(count)
count += 1
score += 1
self.cursor.execute(f"update bookmarks set score = score*0.95 where identifier<>'{id}'")
self.cursor.execute(
f"update bookmarks set count = {count}, score = {score} where identifier='{id}'")
self.conn.commit()
def bookmark_to_str(bookmark):
id, url, thumbnail, tags = bookmark
output = f"{id}, {url} "
for tag in tags:
output += f"{tag},"
output = output[:-1] + "\n"
return output
| gpl-3.0 | -4,935,841,480,642,641,000 | 29.454955 | 131 | 0.560716 | false | 4.055789 | false | false | false |
shawncaojob/LC | PY/85_maximal_rectangle.py | 1 | 6263 | # 85. Maximal Rectangle QuestionEditorial Solution My Submissions
# Total Accepted: 50686
# Total Submissions: 202962
# Difficulty: Hard
# Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
#
# For example, given the following matrix:
#
# 1 0 1 0 0
# 1 0 1 1 1
# 1 1 1 1 1
# 1 0 0 1 0
# Return 6.
# Subscribe to see which companies asked this question
# 2018.02.24 Used a list comprehension
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
res = 0
heights = [0] * n
for i in xrange(m):
heights = [ x + int(y) if y != "0" else 0 for x,y in zip(heights, matrix[i])]
res = max(res, self.getMaxHeight(heights))
return res
def getMaxHeight(self, heights):
res = 0
heights = heights + [0]
d = deque()
for i in xrange(len(heights)):
while d and heights[i] < heights[d[-1]]:
h = heights[d.pop()]
left = d[-1] if d else -1
res = max(res, h * (i - left - 1))
d.append(i)
return res
# 2017.03.18 One stack solution
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = [ 0 for y in xrange(n + 1) ]
res = 0
for i in xrange(m):
for j in xrange(n):
heights[j] = 0 if matrix[i][j] == "0" else heights[j] + int(matrix[i][j])
res = max(res, self.maxArea(heights))
return res
def maxArea(self, heights):
res = 0
d = deque()
for i in xrange(len(heights)):
while d and heights[d[-1]] >= heights[i]:
h = heights[d.pop()]
side = d[-1] if d else -1
res = max(res, h * (i - side - 1))
d.append(i)
return res
# 12.30.2016 rewrite
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
res = 0
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = [ [ 0 for j in xrange(n + 1) ] for i in xrange(m) ]
for i in xrange(m):
for j in xrange(n):
if i == 0 and matrix[i][j] == "1":
heights[i][j] = 1
elif matrix[i][j] == "1":
heights[i][j] += heights[i-1][j] + 1
else:
pass
for i in xrange(m):
d = deque()
for j in xrange(n + 1):
while d and heights[i][j] < heights[i][d[-1]]:
index = d.pop()
h = heights[i][index]
l = -1 if not d else d[-1]
side = j - l - 1
res = max(res, h * side)
d.append(j)
return res
# 11.29.2016 Rewrite
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = []
res = 0
for i in xrange(m):
if i == 0:
heights = [ int(digit) for digit in matrix[0] ]
heights.append(0)
else:
for j in xrange(n):
if matrix[i][j] == "1":
heights[j] += int(matrix[i][j])
else:
heights[j] = 0
d = []
j, l = 0, -1
while j < len(heights):
while d and heights[d[-1]] >= heights[j]:
index = d.pop()
h = heights[index]
if d:
l = d[-1]
else:
l = -1
res = max(res, h * (j - 1 - l))
d.append(j)
j += 1
return res
if __name__ == "__main__":
A = ["10100","10111","11111","10010"]
print(Solution().maximalRectangle(A))
#
#
# class Solution2(object):
# def maximalRectangle(self, matrix):
# """
# :type matrix: List[List[str]]
# :rtype: int
# """
# if not matrix:
# return 0
# res, m, n = 0, len(matrix), len(matrix[0])
#
# # Initialize first height
# H = list(matrix[0]) # Convert string to list of int
# for j in xrange(n):
# H[j] = int(H[j])
#
# for i in xrange(m):
# #initiate L, R
# L = [0 for x in xrange(n)]
# R = [0 for x in xrange(n)]
#
# # Get the height and left
# for j in xrange(n):
# if i == 0:
# pass
# elif matrix[i][j] == "1":
# H[j] += 1
# else:
# H[j] = 0
#
# # Get the left
# k = j - 1
# while k >= 0 and H[k] >= H[j]:
# L[j] = L[j] + L[k] + 1
# k = k - L[k] - 1
#
# # Get the right
# for j in reversed(xrange(n)):
# k = j + 1
# while k < n and H[j] <= H[k]:
# R[j] = R[j] + R[k] + 1
# k = k + R[k] + 1
#
# # Calculate area for each and update res if bigger
# for j in xrange(n):
# if H[j] != 0:
# res = max(res, H[j] * (L[j] + R[j] + 1))
#
# return res
| gpl-3.0 | -2,447,503,830,545,572,400 | 28.266355 | 119 | 0.406355 | false | 3.651895 | false | false | false |
lanmaster53/honeybadger | server/honeybadger/__init__.py | 1 | 2391 | from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
import logging
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-gk", "--googlekey", dest="googlekey", type=str, default='',
help="Google API Key")
parser.add_argument("-ik", "--ipstackkey", dest="ipstackkey", type=str, default='',
help="IPStack API Key")
opts = parser.parse_args()
basedir = os.path.abspath(os.path.dirname(__file__))
# configuration
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data.db')
DEBUG = True
SECRET_KEY = 'development key'
SQLALCHEMY_TRACK_MODIFICATIONS = False
GOOGLE_API_KEY = opts.googlekey # Provide your google api key via command-line argument
IPSTACK_API_KEY = opts.ipstackkey
app = Flask(__name__)
app.config.from_object(__name__)
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
# Logger cannot be imported until the db is initialized
from honeybadger.utils import Logger
logger = Logger()
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
# only use handler if gunicorn detected, otherwise default
if gunicorn_logger.handlers:
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
from honeybadger import models
from honeybadger import views
def initdb(username, password):
db.create_all()
import binascii
u = models.User(email=username, password_hash=bcrypt.generate_password_hash(binascii.hexlify(password.encode())), role=0, status=1)
db.session.add(u)
db.session.commit()
print('Database initialized.')
# remove below for production
t = models.Target(name='demo', guid='aedc4c63-8d13-4a22-81c5-d52d32293867')
db.session.add(t)
db.session.commit()
b = models.Beacon(target_guid='aedc4c63-8d13-4a22-81c5-d52d32293867', agent='HTML', ip='1.2.3.4', port='80', useragent='Mac OS X', comment='this is a comment.', lat='38.2531419', lng='-85.7564855', acc='5')
db.session.add(b)
db.session.commit()
b = models.Beacon(target_guid='aedc4c63-8d13-4a22-81c5-d52d32293867', agent='HTML', ip='5.6.7.8', port='80', useragent='Mac OS X', comment='this is a comment.', lat='34.855117', lng='-82.114192', acc='1')
db.session.add(b)
db.session.commit()
def dropdb():
db.drop_all()
print('Database dropped.')
| gpl-3.0 | -8,402,595,862,418,852,000 | 36.359375 | 210 | 0.695943 | false | 3.171088 | false | false | false |
mdaal/rap | rap/sweeps/data_management/load_hf5_2.py | 1 | 1682 | from .utils import _define_sweep_data_columns
import tables
import os
import logging
def load_hf5_2(metadata, hf5_database_path, tablepath):
''' This function is for loading data taken with KIDs_DAQ_75uW. It use the columns defined in that hf5 file to
define the columns in sweep_data_columns
table path is path to the database to be loaded starting from root. e.g. load_hf5('/Run44b/T201312102229')
hf5_database_path is the name of the hf5 database to be accessed for the table informaiton'''
if not os.path.isfile(hf5_database_path):
logging.error('Speficied h5 database does not exist. Aborting...')
return
wmode = 'a'
# use "with" context manage to ensure file is always closed. no need for fileh.close()
with tables.open_file(hf5_database_path, mode = wmode) as fileh:
table = fileh.get_node(tablepath)
Sweep_Array = table.read()
for key in table.attrs.keys:
exec('metadata.{0} = table.attrs.{0}'.format(key))
imported_sweep_data_columns = Sweep_Array.dtype
fsteps = imported_sweep_data_columns['Frequencies'].shape[0]
tpoints = imported_sweep_data_columns['Temperature_Readings'].shape[0]
sweep_data_columns_list, sweep_data_columns = _define_sweep_data_columns(fsteps, tpoints)
for name in imported_sweep_data_columns.names:
if name not in sweep_data_columns.names:
sweep_data_columns_list.append((name,imported_sweep_data_columns[name] ))
sweep_data_columns = np.dtype(sweep_data_columns_list)
Sweep_Array = np.array(Sweep_Array, dtype = sweep_data_columns)
return Sweep_Array, sweep_data_columns, sweep_data_columns_list
| mit | 3,572,133,839,036,354,000 | 41.05 | 114 | 0.70214 | false | 3.337302 | false | false | false |
ecederstrand/exchangelib | tests/test_items/test_bulk.py | 1 | 7409 | import datetime
from exchangelib.errors import ErrorItemNotFound, ErrorInvalidChangeKey, ErrorInvalidIdMalformed
from exchangelib.fields import FieldPath
from exchangelib.folders import Inbox, Folder, Calendar
from exchangelib.items import Item, Message, SAVE_ONLY, SEND_ONLY, SEND_AND_SAVE_COPY, CalendarItem
from .test_basics import BaseItemTest
class BulkMethodTest(BaseItemTest):
TEST_FOLDER = 'inbox'
FOLDER_CLASS = Inbox
ITEM_CLASS = Message
def test_fetch(self):
item = self.get_test_item()
self.test_folder.bulk_create(items=[item, item])
ids = self.test_folder.filter(categories__contains=item.categories)
items = list(self.account.fetch(ids=ids))
for item in items:
self.assertIsInstance(item, self.ITEM_CLASS)
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=['subject']))
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=[FieldPath.from_string('subject', self.test_folder)]))
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=['id', 'changekey']))
self.assertEqual(len(items), 2)
def test_no_account(self):
# Test bulk operations on items with no self.account
item = self.get_test_item()
item.account = None
res = self.test_folder.bulk_create(items=[item])[0]
item.id, item.changekey = res.id, res.changekey
item.account = None
self.assertEqual(list(self.account.fetch(ids=[item]))[0].id, item.id)
item.account = None
res = self.account.bulk_update(items=[(item, ('subject',))])[0]
item.id, item.changekey = res
item.account = None
res = self.account.bulk_copy(ids=[item], to_folder=self.account.trash)[0]
item.id, item.changekey = res
item.account = None
res = self.account.bulk_move(ids=[item], to_folder=self.test_folder)[0]
item.id, item.changekey = res
item.account = None
self.assertEqual(self.account.bulk_delete(ids=[item]), [True])
item = self.get_test_item().save()
item.account = None
self.assertEqual(self.account.bulk_send(ids=[item]), [True])
def test_empty_args(self):
# We allow empty sequences for these methods
self.assertEqual(self.test_folder.bulk_create(items=[]), [])
self.assertEqual(list(self.account.fetch(ids=[])), [])
self.assertEqual(self.account.bulk_create(folder=self.test_folder, items=[]), [])
self.assertEqual(self.account.bulk_update(items=[]), [])
self.assertEqual(self.account.bulk_delete(ids=[]), [])
self.assertEqual(self.account.bulk_send(ids=[]), [])
self.assertEqual(self.account.bulk_copy(ids=[], to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move(ids=[], to_folder=self.account.trash), [])
self.assertEqual(self.account.upload(data=[]), [])
self.assertEqual(self.account.export(items=[]), [])
def test_qs_args(self):
# We allow querysets for these methods
qs = self.test_folder.none()
self.assertEqual(list(self.account.fetch(ids=qs)), [])
with self.assertRaises(ValueError):
# bulk_create() does not allow queryset input
self.account.bulk_create(folder=self.test_folder, items=qs)
with self.assertRaises(ValueError):
# bulk_update() does not allow queryset input
self.account.bulk_update(items=qs)
self.assertEqual(self.account.bulk_delete(ids=qs), [])
self.assertEqual(self.account.bulk_send(ids=qs), [])
self.assertEqual(self.account.bulk_copy(ids=qs, to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move(ids=qs, to_folder=self.account.trash), [])
self.assertEqual(self.account.upload(data=qs), [])
self.assertEqual(self.account.export(items=qs), [])
def test_no_kwargs(self):
self.assertEqual(self.test_folder.bulk_create([]), [])
self.assertEqual(list(self.account.fetch([])), [])
self.assertEqual(self.account.bulk_create(self.test_folder, []), [])
self.assertEqual(self.account.bulk_update([]), [])
self.assertEqual(self.account.bulk_delete([]), [])
self.assertEqual(self.account.bulk_send([]), [])
self.assertEqual(self.account.bulk_copy([], to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move([], to_folder=self.account.trash), [])
self.assertEqual(self.account.upload([]), [])
self.assertEqual(self.account.export([]), [])
def test_invalid_bulk_args(self):
# Test bulk_create
with self.assertRaises(ValueError):
# Folder must belong to account
self.account.bulk_create(folder=Folder(root=None), items=[1])
with self.assertRaises(AttributeError):
# Must have folder on save
self.account.bulk_create(folder=None, items=[1], message_disposition=SAVE_ONLY)
# Test that we can send_and_save with a default folder
self.account.bulk_create(folder=None, items=[], message_disposition=SEND_AND_SAVE_COPY)
with self.assertRaises(AttributeError):
# Must not have folder on send-only
self.account.bulk_create(folder=self.test_folder, items=[1], message_disposition=SEND_ONLY)
# Test bulk_update
with self.assertRaises(ValueError):
# Cannot update in send-only mode
self.account.bulk_update(items=[1], message_disposition=SEND_ONLY)
def test_bulk_failure(self):
# Test that bulk_* can handle EWS errors and return the errors in order without losing non-failure results
items1 = [self.get_test_item().save() for _ in range(3)]
items1[1].changekey = 'XXX'
for i, res in enumerate(self.account.bulk_delete(items1)):
if i == 1:
self.assertIsInstance(res, ErrorInvalidChangeKey)
else:
self.assertEqual(res, True)
items2 = [self.get_test_item().save() for _ in range(3)]
items2[1].id = 'AAAA=='
for i, res in enumerate(self.account.bulk_delete(items2)):
if i == 1:
self.assertIsInstance(res, ErrorInvalidIdMalformed)
else:
self.assertEqual(res, True)
items3 = [self.get_test_item().save() for _ in range(3)]
items3[1].id = items1[0].id
for i, res in enumerate(self.account.fetch(items3)):
if i == 1:
self.assertIsInstance(res, ErrorItemNotFound)
else:
self.assertIsInstance(res, Item)
class CalendarBulkMethodTest(BaseItemTest):
TEST_FOLDER = 'calendar'
FOLDER_CLASS = Calendar
ITEM_CLASS = CalendarItem
def test_no_account(self):
# Test corner cases with bulk operations on items with no self.account
item = self.get_test_item()
item.recurrence = None
item.is_all_day = True
item.start, item.end = datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)
item.account = None
res = self.test_folder.bulk_create(items=[item])[0]
item.id, item.changekey = res.id, res.changekey
item.account = None
self.account.bulk_update(items=[(item, ('start',))])
| bsd-2-clause | -3,528,518,229,622,913,500 | 46.191083 | 115 | 0.634364 | false | 3.780102 | true | false | false |
luzhuomi/collamine-client-python | webapp/webapp/settings.py | 1 | 2832 | """
Django settings for webapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# Django settings for moodtweet project.
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vw_+8()m^o3mxkqxcu%n#$^gjqx8_qn$p&#krg3(+a8cq^1ty&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crawler'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djangocrawler',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
BASE_DIR + 'webapp/static/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| apache-2.0 | 8,016,252,388,269,246,000 | 23.413793 | 75 | 0.707627 | false | 3.39976 | false | false | false |
SamHames/scikit-image | skimage/viewer/tests/test_widgets.py | 1 | 2740 |
import os
from skimage import data, img_as_float, io
from skimage.viewer import ImageViewer, viewer_available
from skimage.viewer.widgets import (
Slider, OKCancelButtons, SaveButtons, ComboBox, Text)
from skimage.viewer.plugins.base import Plugin
from skimage.viewer.qt import QtGui, QtCore
from numpy.testing import assert_almost_equal, assert_equal
from numpy.testing.decorators import skipif
def get_image_viewer():
image = data.coins()
viewer = ImageViewer(img_as_float(image))
viewer += Plugin()
return viewer
@skipif(not viewer_available)
def test_combo_box():
viewer = get_image_viewer()
cb = ComboBox('hello', ('a', 'b', 'c'))
viewer.plugins[0] += cb
assert_equal(str(cb.val), 'a')
assert_equal(cb.index, 0)
cb.index = 2
assert_equal(str(cb.val), 'c'),
assert_equal(cb.index, 2)
@skipif(not viewer_available)
def test_text_widget():
viewer = get_image_viewer()
txt = Text('hello', 'hello, world!')
viewer.plugins[0] += txt
assert_equal(str(txt.text), 'hello, world!')
txt.text = 'goodbye, world!'
assert_equal(str(txt.text), 'goodbye, world!')
@skipif(not viewer_available)
def test_slider_int():
viewer = get_image_viewer()
sld = Slider('radius', 2, 10, value_type='int')
viewer.plugins[0] += sld
assert_equal(sld.val, 4)
sld.val = 6
assert_equal(sld.val, 6)
sld.editbox.setText('5')
sld._on_editbox_changed()
assert_equal(sld.val, 5)
@skipif(not viewer_available)
def test_slider_float():
viewer = get_image_viewer()
sld = Slider('alpha', 2.1, 3.1, value=2.1, value_type='float',
orientation='vertical', update_on='move')
viewer.plugins[0] += sld
assert_equal(sld.val, 2.1)
sld.val = 2.5
assert_almost_equal(sld.val, 2.5, 2)
sld.editbox.setText('0.1')
sld._on_editbox_changed()
assert_almost_equal(sld.val, 2.5, 2)
@skipif(not viewer_available)
def test_save_buttons():
viewer = get_image_viewer()
sv = SaveButtons()
viewer.plugins[0] += sv
import tempfile
_, filename = tempfile.mkstemp(suffix='.png')
os.remove(filename)
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
sv.save_to_stack()
sv.save_to_file(filename)
img = img_as_float(data.imread(filename))
assert_almost_equal(img, viewer.image)
img = io.pop()
assert_almost_equal(img, viewer.image)
@skipif(not viewer_available)
def test_ok_buttons():
viewer = get_image_viewer()
ok = OKCancelButtons()
viewer.plugins[0] += ok
ok.update_original_image(),
ok.close_plugin()
| bsd-3-clause | -3,345,162,663,781,999,600 | 24.346154 | 66 | 0.629197 | false | 3.186047 | true | false | false |
tylertian/Openstack | openstack F/glance/glance/tests/unit/common/test_exception.py | 1 | 1680 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.common import exception
from glance.tests import utils as test_utils
class GlanceExceptionTestCase(test_utils.BaseTestCase):
def test_default_error_msg(self):
class FakeGlanceException(exception.GlanceException):
message = "default message"
exc = FakeGlanceException()
self.assertEquals(unicode(exc), 'default message')
def test_specified_error_msg(self):
self.assertTrue('test' in unicode(exception.GlanceException('test')))
def test_default_error_msg_with_kwargs(self):
class FakeGlanceException(exception.GlanceException):
message = "default message: %(code)s"
exc = FakeGlanceException(code=500)
self.assertEquals(unicode(exc), "default message: 500")
def test_specified_error_msg_with_kwargs(self):
self.assertTrue('test: 500' in
unicode(exception.GlanceException('test: %(code)s',
code=500)))
| apache-2.0 | 3,956,610,928,525,240,000 | 37.181818 | 78 | 0.677976 | false | 4.221106 | true | false | false |
hrahadiant/mini_py_project | basic_number_game_v0.py | 1 | 1543 | # basic number game
import random
# rules
# you must enter the integer
# for win this game, you must hit the secret number within 3 chances
print("Basic rules:")
print("You only have 3 chances to guess the number")
print("You can type the integer between 1-10 only")
print("You can choose play again when you lose")
print("Enjoy this game!")
guess_limit = 3
def play_again():
play_more = input("Do you want to play again? y/n ")
if play_more.lower() == "n":
print("Bye!")
exit()
elif play_more.lower() == "y":
main()
def check_number(number, hit):
global guess_limit
guess_limit -= 1
number = int(number)
if number == hit:
print("You hit the number!")
play_again()
elif number > hit:
print("Your guess is too high.")
print("Try another number. Remaining number of guesses is {}".format(guess_limit))
elif number < hit:
print("Your guess is too low.")
print("Try another number. Remaining number of guesses is {}". format(guess_limit))
if guess_limit == 0:
print("Sorry, you lose this game. My secret number is {}".format(hit))
play_again()
def check_hit(number):
try:
int(number)
except ValueError:
print("Please input the integer between 1 - 10")
main()
def main():
hit_number = random.randint(1, 10)
while True:
guess_number = input("Guess the number (1 - 10)> ")
check_hit(guess_number)
check_number(guess_number, hit_number)
main()
| apache-2.0 | -2,485,010,881,475,866,600 | 23.887097 | 91 | 0.616332 | false | 3.763415 | false | false | false |
huntzhan/magic-constraints | magic_constraints/argument.py | 1 | 2768 | # -*- coding: utf-8 -*-
from __future__ import (
division, absolute_import, print_function, unicode_literals,
)
from builtins import * # noqa
from future.builtins.disabled import * # noqa
from magic_constraints.exception import MagicSyntaxError, MagicTypeError
def transform_to_slots(constraints_package, *args, **kwargs):
class UnFill(object):
pass
plen = len(constraints_package.parameters)
if len(args) > plen:
raise MagicSyntaxError(
'argument length unmatched.',
parameters=constraints_package.parameters,
args=args,
)
slots = [UnFill] * plen
unfill_count = plen
# 1. fill args.
for i, val in enumerate(args):
slots[i] = val
unfill_count -= len(args)
# 2. fill kwargs.
for key, val in kwargs.items():
if key not in constraints_package.name_hash:
raise MagicSyntaxError(
'invalid keyword argument',
parameters=constraints_package.parameters,
key=key,
)
i = constraints_package.name_hash[key]
if slots[i] is not UnFill:
raise MagicSyntaxError(
'key reassignment error.',
parameters=constraints_package.parameters,
key=key,
)
slots[i] = val
unfill_count -= 1
# 3. fill defaults if not set.
# 3.1. deal with the case that default not exists.
default_begin = constraints_package.start_of_defaults
if default_begin < 0:
default_begin = plen
# 3.2 fill defaults.
for i in range(default_begin, plen):
parameter = constraints_package.parameters[i]
j = constraints_package.name_hash[parameter.name]
if slots[j] is UnFill:
slots[j] = parameter.default
unfill_count -= 1
# 4. test if slots contains UnFill.
if unfill_count != 0:
raise MagicSyntaxError(
'slots contains unfilled argument(s).',
parameters=constraints_package.parameters,
slots=slots,
)
return slots
def check_and_bind_arguments(parameters, slots, bind_callback):
plen = len(parameters)
for i in range(plen):
arg = slots[i]
parameter = parameters[i]
wrapper = parameter.wrapper_for_deferred_checking()
# defer checking by wrapping the element of slot.
if wrapper:
slots[i] = wrapper(arg)
# check now.
elif not parameter.check_instance(arg):
raise MagicTypeError(
'argument unmatched.',
parameter=parameter,
argument=arg,
)
# bind.
bind_callback(parameter.name, arg)
| mit | 3,002,848,033,951,175,000 | 26.405941 | 72 | 0.58237 | false | 4.25192 | false | false | false |
mfnch/pyrtist | pyrtist/lib2d/core_types.py | 1 | 16345 | # Copyright (C) 2017 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
'''Fundamental types for the pyrtist 2D graphic library.
This module defines Point, Matrix, and other types commonly used in the
library.
'''
__all__ = ('create_enum', 'alias', 'Scalar', 'Point', 'Px', 'Py', 'Pang',
'PointTaker', 'GenericMatrix', 'Matrix', 'Close', 'Container',
'Offset', 'Scale', 'Center', 'AngleDeg', 'Radii', 'Through', 'Tri',
'View', 'Taker', 'combination', 'RejectError', 'Args')
import math
import numbers
import copy
from .base import Taker, combination, RejectError, Args
class Enum(object):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __str__(self):
return '{}.{}'.format(get_class_name(self), self.name)
def __repr__(self):
args = ((self.name,) if self.value is None
else (self.name, self.value))
return '{}({})'.format(get_class_name(self),
', '.join(map(repr, args)))
def create_enum(name, doc, *enums):
d = ({'__doc__': doc} if doc is not None else {})
new_class = type(name, (Enum,), d)
for name in enums:
setattr(new_class, name, new_class(name))
return new_class
def alias(name, target, **attrs):
return type(name, (target,), attrs)
class Scalar(float):
'''Used to identify scalars that need to be transformed (such as line
widths) in a CmdStream.
'''
class Point(object):
'''Point with 2 components.
A Point() can be created in one of the following ways:
- Point(), Point(x) or Point(x, y). Use the provided argument to set the
x and y components. Missing components are set to zero.
- Point(Point(...)) to copy the point given as first argument.
- Point(tuple) to set from a tuple.
'''
@staticmethod
def vx(delta_x=1.0):
'''Return a vector with the given x component and zero y component.'''
return Point(delta_x)
@staticmethod
def vy(delta_y=1.0):
'''Return a vector with the given y component and zero x component.'''
return Point(0.0, delta_y)
@staticmethod
def vangle(angle):
'''Return a unit vector forming the specified angle with the x axis.'''
return Point(math.cos(angle), math.sin(angle))
@staticmethod
def sum(points, default=None):
return sum(points, default or Point())
@staticmethod
def interpolate(point_list, index):
'''Interpolate a point according to the given index.
Given a list of points `point_list` return an interpolated point,
according to the given index `index`. In particular, if `index` is:
- an integer, then this function simply returns `point_list[index]`
- a floating point number, then this function returns an interpolation
of `points_list[floor(index)]` and `points_list[ceil(index)]`.
- a Point, then the result is similar to just giving a single float
`index.x`, with the additional addition of a vector `index.y * ort`
where `ort` is the vector orthogonal to the segment selected by
`index.x`.
'''
if isinstance(index, int):
return point_list[index]
elif isinstance(index, float):
index = Point(index, 0.0)
else:
index = Point(index)
n = len(point_list)
if n < 2:
if n == 0:
raise ValueError('Attempt to index empty point container')
return point_list[0]
prev_idx = math.floor(index.x)
x = index.x - prev_idx
prev = point_list[int(prev_idx) % n]
succ = point_list[(int(prev_idx) + 1) % n]
ret = prev*(1.0 - x) + succ*x
if index.y == 0.0:
return ret
ort = (succ - prev).ort()
return ret + ort * index.y
def __init__(self, *args, **kwargs):
self.x = self.y = 0.0
self.set(*args, **kwargs)
def set(self, *args, **kwargs):
if len(args) > 0:
arg0 = args[0]
if isinstance(arg0, numbers.Number):
xy = args
elif isinstance(arg0, (Point, tuple)):
xy = tuple(arg0) + args[1:]
else:
raise TypeError('Cannot handle first argument of {}()'
.format(self.__class__.__name__))
if len(xy) == 2:
self.x = float(xy[0])
self.y = float(xy[1])
elif len(xy) > 2:
raise TypeError('Too many arguments to {}()'
.format(self.__class__.__name__))
else:
assert len(xy) == 1
self.x = xy[0]
# The code below is there for compatibility reasons, but we should get
# rid of it eventually.
if 'x' in kwargs:
self.x = kwargs['x']
if 'y' in kwargs:
self.y = kwargs['y']
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.x, self.y)
def __iter__(self):
return iter((self.x, self.y))
def __neg__(self):
return type(self)(*tuple(-x for x in self))
def __pos__(self):
return self.copy()
def __add__(self, value):
if value == 0.0:
# This allows using `sum' without having to provide a `start'
# value. For example: sum([Point(...), Point(...), ...])
return self.copy()
return Point(x=self.x + value.x, y=self.y + value.y)
def __sub__(self, value):
return Point(x=self.x - value.x, y=self.y - value.y)
def __mul__(self, value):
if isinstance(value, numbers.Number):
return Point(x=self.x*value, y=self.y*value)
else:
return float(self.x*value.x + self.y*value.y)
def __rmul__(self, value):
return self.__mul__(value)
def __div__(self, value):
return Point(x=self.x/value, y=self.y/value)
__truediv__ = __div__
def copy(self):
return Point(x=self.x, y=self.y)
def dot(self, p):
'''Return the scalar product with p.'''
return self.x*p.x + self.y*p.y
def norm2(self):
'''Return the square of the norm for this vector.'''
return self.x*self.x + self.y*self.y
def norm(self):
'''Return the vector's norm.'''
return math.sqrt(self.norm2())
def angle(self):
'''Return the angle between the vector and the x axis.'''
return math.atan2(self.y, self.x)
def normalize(self):
'''Normalized this vector.'''
n = self.norm()
if n != 0.0:
self.x /= n
self.y /= n
def normalized(self):
'''Return a normalized copy of this vector.'''
p = self.copy()
p.normalize()
return p
def ort(self):
'''Return the ortogonal vector, rotated by 90 degrees anticlockwise.'''
return Point(-self.y, self.x)
def mirror(self, axes):
'''Mirror the point with respect to the x axis of the given Axes()
object.
'''
d = self - axes.origin
u10 = (axes.one_zero - axes.origin).normalized()
d_ort = u10*u10.dot(d)
return axes.origin - d + d_ort*2.0
def mirror_x(self, p):
'''Mirror the point with respect to an horizontal axis passing
through `p`.
'''
return Point(self.x, 2.0*p.y - self.y)
def mirror_y(self, p):
'''Mirror the point with respect to a vertical axis passing
through `p`.
'''
return Point(2.0*p.x - self.x, self.y)
def mirror_xy(self, p):
'''Mirror this point with respect to the specified point.'''
return 2.0*p - self
def Px(value):
return Point.vx(value)
def Py(value):
return Point.vy(value)
def Pang(angle):
'''Return a Point of unit norm forming the specified angle with the x axis.
'''
return Point(math.cos(angle), math.sin(angle))
class PointTaker(Taker):
def __init__(self, *args):
self.points = []
super(PointTaker, self).__init__(*args)
def __iter__(self):
return iter(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, index):
return self.points[index]
@combination(Point, PointTaker)
def fn(point, point_taker):
point_taker.points.append(point)
@combination(tuple, PointTaker)
def fn(tp, point_taker):
if len(tp) != 2:
raise RejectError()
point_taker.take(Point(tp))
class GenericMatrix(object):
@classmethod
def diag(cls, *entries):
'''Construct a diagonal matrix with the given diagonal entries.'''
m, n = cls.size
num_args = min(m, n)
if len(entries) < num_args:
raise TypeError('diag takes exactly {} arguments ({} given)'
.format(num_args, len(entries)))
mx = [[(entries[i] if i == j else 0.0) for j in range(n)]
for i in range(m)]
return cls(mx)
class Matrix(GenericMatrix):
size = (2, 3)
identity = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]]
@classmethod
def rotation(cls, angle):
rcos = math.cos(angle)
rsin = math.sin(angle)
return cls([[rcos, -rsin, 0.0],
[rsin, rcos, 0.0]])
@classmethod
def translation(cls, t):
return cls([[1.0, 0.0, t.x],
[0.0, 1.0, t.y]])
def __init__(self, value=None):
super(Matrix, self).__init__()
self.set(value)
def set(self, value):
'''Set the matrix to the given value.'''
if value is None:
value = Matrix.identity
elif isinstance(value, Matrix):
value = value.value
self.value = [list(value[0]), list(value[1])]
def __repr__(self):
return 'Matrix({})'.format(repr(self.value))
def __mul__(self, b):
if isinstance(b, Point):
return self.apply(b)
if isinstance(b, tuple) and len(b) == 2:
return self.apply(Point(b))
ret = self.copy()
if isinstance(b, numbers.Number):
ret.scale(b)
else:
ret.multiply(b)
return ret
def get_entries(self):
'''Get the matrix entries as a tuple of 6 scalars.'''
return tuple(self.value[0] + self.value[1])
def multiply(self, b):
(a11, a12, a13), (a21, a22, a23) = ab = self.value
(b11, b12, b13), (b21, b22, b23) = b.value
ab[0][0] = a11*b11 + a12*b21; ab[0][1] = a11*b12 + a12*b22
ab[1][0] = a21*b11 + a22*b21; ab[1][1] = a21*b12 + a22*b22
ab[0][2] = a13 + a11*b13 + a12*b23
ab[1][2] = a23 + a21*b13 + a22*b23
def __rmul__(self, b):
if isinstance(b, numbers.Number):
return self.__mul__(b)
raise NotImplementedError()
def copy(self):
'''Return a copy of the matrix.'''
return Matrix(value=self.value)
def scale(self, s):
'''Scale the matrix by the given factor (in-place).'''
v = self.value
v[0][0] *= s; v[0][1] *= s; v[0][2] *= s
v[1][0] *= s; v[1][1] *= s; v[1][2] *= s
def translate(self, p):
'''Translate the matrix by the given Point value (in-place).'''
self.value[0][2] += p.x
self.value[1][2] += p.y
def apply(self, p):
'''Apply the matrix to a Point.'''
if not isinstance(p, Point):
p = Point(p)
(a11, a12, a13), (a21, a22, a23) = self.value
return Point(a11*p.x + a12*p.y + a13,
a21*p.x + a22*p.y + a23)
def det(self):
'''Return the determinant of the matrix.'''
m = self.value
return m[0][0]*m[1][1] - m[0][1]*m[1][0]
def invert(self):
'''Invert the matrix in place.'''
(m11, m12, m13), (m21, m22, m23) = m = self.value
det = m11*m22 - m12*m21
if det == 0.0:
raise ValueError('The matrix is singular: cannot invert it')
m[0][0] = new11 = m22/det; m[0][1] = new12 = -m12/det
m[1][0] = new21 = -m21/det; m[1][1] = new22 = m11/det
m[0][2] = -new11*m13 - new12*m23
m[1][2] = -new21*m13 - new22*m23
def get_inverse(self):
'''Return the inverse of the matrix.'''
ret = self.copy()
ret.invert()
return ret
Close = create_enum('Close', 'Whether to close a path',
'no', 'yes')
class Container(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
return '{name}({args})'.format(name=self.__class__.__name__,
args=', '.join(map(repr, self.args)))
def __iter__(self):
return iter(self.args)
def copy(self):
return self.__class__(*self.args)
def check(self, min_args, max_args):
if len(self.args) < min_args:
raise TypeError('{} object needs at least {} arguments'
.format(self.__class__.__name__, min_args))
if len(self.args) > max_args:
raise TypeError('{} object takes at most {} arguments'
.format(self.__class__.__name__, max_args))
class Offset(Point):
'''Alias for Point used to pass unitless offsets.'''
class Scale(Point):
'''Alias for Point used to pass scale factors.'''
def __init__(self, *args):
super(Scale, self).__init__()
self.y = None
self.set(*args)
if self.y is None:
self.y = self.x
class Center(Point):
'''Alias for Point used to pass the center for a rotation.'''
class Radii(Container):
'''Container which groups one or more radii (e.g. the x, y radii of
an ellipsoid.
'''
class Through(list):
'''List of points that a geometric shape (e.g. a Circle) passes
through.'''
def __init__(self, *args):
super(Through, self).__init__(args)
class AngleDeg(float):
'''Floating point number representing an angle in degrees.'''
class Tri(Container):
'''Container which groups up to 3 points used to define a Bezier curve.'''
def __init__(self, *args):
n = len(args)
if n == 1:
self.args = (None, args[0], None)
elif n == 2:
self.args = (args[0], args[1], None)
elif n == 3:
self.args = args
else:
raise TypeError('Tri takes at most 3 points')
def copy(self, p):
return type(self)(*p.args)
@property
def p(self):
return self.args[1]
@property
def ip(self):
p_in = self.args[0]
if p_in is not None:
return p_in
p_out = self.args[2]
if p_out is not None:
return 2.0*self.args[1] - p_out
return self.args[1]
@property
def op(self):
p_out = self.args[2]
if p_out is not None:
return p_out
p_in = self.args[0]
if p_in is not None:
return 2.0*self.args[1] - p_in
return self.args[1]
class View(object):
'''Object used to pass information to the GUI.'''
def __init__(self, bbox, origin, size):
self.bbox = bbox
self.origin = origin
self.size = size
def __repr__(self):
b1 = self.bbox.min_point
b2 = self.bbox.max_point
bbox_args = ', '.join(map(str, (b1.x, b1.y, b2.x, b2.y)))
other_args = ', '.join(map(str, (self.origin.x, self.origin.y,
self.size.x, self.size.y)))
return '{}\n{}\n'.format(bbox_args, other_args)
| lgpl-2.1 | 7,144,760,757,763,698,000 | 29.212569 | 79 | 0.542918 | false | 3.538645 | false | false | false |
yeephycho/densenet-tensorflow | data_provider/data_provider.py | 1 | 5117 | # Brief: Data provdier for image classification using tfrecord
# Data: 28/Aug./2017
# E-mail: [email protected]
# License: Apache 2.0
# By: Yeephycho @ Hong Kong
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import sys
import config as config
FLAGS = tf.app.flags.FLAGS
DATA_DIR = FLAGS.train_data_path
TRAINING_SET_SIZE = FLAGS.TRAINING_SET_SIZE
BATCH_SIZE = FLAGS.BATCH_SIZE
IMAGE_SIZE = FLAGS.IMAGE_SIZE
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# image object from tfrecord
class _image_object:
def __init__(self):
self.image = tf.Variable([], dtype = tf.string, trainable=False)
self.height = tf.Variable([], dtype = tf.int64, trainable=False)
self.width = tf.Variable([], dtype = tf.int64, trainable=False)
self.filename = tf.Variable([], dtype = tf.string, trainable=False)
self.label = tf.Variable([], dtype = tf.int32, trainable=False)
def read_and_decode(filename_queue):
with tf.name_scope('data_provider'):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features = {
"image/encoded": tf.FixedLenFeature([], tf.string),
"image/height": tf.FixedLenFeature([], tf.int64),
"image/width": tf.FixedLenFeature([], tf.int64),
"image/filename": tf.FixedLenFeature([], tf.string),
"image/class/label": tf.FixedLenFeature([], tf.int64),})
image_encoded = features["image/encoded"]
image_raw = tf.image.decode_jpeg(image_encoded, channels=3)
image_object = _image_object()
# image_object.image = tf.image.resize_image_with_crop_or_pad(image_raw, IMAGE_SIZE, IMAGE_SIZE)
image_object.image = tf.image.resize_images(image_raw, [IMAGE_SIZE, IMAGE_SIZE], method=0, align_corners=True)
image_object.height = features["image/height"]
image_object.width = features["image/width"]
image_object.filename = features["image/filename"]
image_object.label = tf.cast(features["image/class/label"], tf.int64)
return image_object
def feed_data(if_random = True, if_training = True):
with tf.name_scope('image_reader_and_preprocessor') as scope:
if(if_training):
filenames = [os.path.join(DATA_DIR, "train.tfrecord")]
else:
filenames = [os.path.join(DATA_DIR, "test.tfrecord")]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError("Failed to find file: " + f)
filename_queue = tf.train.string_input_producer(filenames)
image_object = read_and_decode(filename_queue)
if(if_training):
image = tf.cast(tf.image.random_flip_left_right(image_object.image), tf.float32)
# image = tf.image.adjust_gamma(tf.cast(image_object.image, tf.float32), gamma=1, gain=1) # Scale image to (0, 1)
# image = tf.image.per_image_standardization(image)
else:
image = tf.cast(image_object.image, tf.float32)
# image = tf.image.per_image_standardization(image_object.image)
label = image_object.label
filename = image_object.filename
if(if_training):
num_preprocess_threads = 2
else:
num_preprocess_threads = 1
if(if_random):
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(TRAINING_SET_SIZE * min_fraction_of_examples_in_queue)
print("Filling queue with %d images before starting to train. " "This will take some time." % min_queue_examples)
image_batch, label_batch, filename_batch = tf.train.shuffle_batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3 * BATCH_SIZE,
min_after_dequeue = min_queue_examples)
image_batch = tf.reshape(image_batch, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch = tf.one_hot(tf.add(label_batch, label_offset), depth=5, on_value=1.0, off_value=0.0)
else:
image_batch, label_batch, filename_batch = tf.train.batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = num_preprocess_threads)
image_batch = tf.reshape(image_batch, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch = tf.one_hot(tf.add(label_batch, label_offset), depth=5, on_value=1.0, off_value=0.0)
return image_batch, label_batch, filename_batch
| apache-2.0 | -6,233,930,229,090,380,000 | 42.364407 | 125 | 0.632402 | false | 3.519257 | false | false | false |
jmrbcu/foundation | foundation/log_utils.py | 1 | 1148 | # python imports
import os
import sys
import logging
import platform
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
def setup_root_logger(level=logging.DEBUG, formatter=None, log_file=None,
log_size=5242880, log_count=5):
logger = logging.getLogger()
logger.setLevel(level)
if formatter is None:
formatter = '"%(asctime)s - %(levelname)s - %(name)s - %(message)s"'
formatter = logging.Formatter(formatter)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(NullHandler())
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
rotating_handler = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=log_size,
backupCount=log_count
)
rotating_handler.setFormatter(formatter)
logger.addHandler(rotating_handler)
return logger
| gpl-2.0 | 2,679,257,480,493,880,300 | 27 | 76 | 0.658537 | false | 4.02807 | false | false | false |
eranroz/revscoring | revscoring/languages/french.py | 1 | 1882 | import sys
import enchant
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from .language import Language, LanguageUtility
STEMMER = SnowballStemmer("french")
STOPWORDS = set(stopwords.words('french') + ['a'])
BADWORDS = set([
'anus',
'con', 'cul',
'fesse', 'Foutre',
'gay',
'herpes', 'hiv', 'homosexuel',
'idiot',
'lesbien',
'merde', 'merdique',
'penis', 'prostituee', 'Putain', 'putes',
'Salop', 'stupide',
])
STEMMED_BADWORDS = set(STEMMER.stem(w) for w in BADWORDS)
try:
DICTIONARY = enchant.Dict("fr")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'fr'. " +
"Consider installing 'myspell-fr'.")
def stem_word_process():
def stem_word(word):
return STEMMER.stem(word.lower())
return stem_word
stem_word = LanguageUtility("stem_word", stem_word_process, depends_on=[])
def is_badword_process(stem_word):
def is_badword(word):
return stem_word(word) in STEMMED_BADWORDS
return is_badword
is_badword = LanguageUtility("is_badword", is_badword_process,
depends_on=[stem_word])
def is_misspelled_process():
def is_misspelled(word):
return not DICTIONARY.check(word)
return is_misspelled
is_misspelled = LanguageUtility("is_misspelled", is_misspelled_process,
depends_on=[])
def is_stopword_process():
def is_stopword(word):
return word.lower() in STOPWORDS
return is_stopword
is_stopword = LanguageUtility("is_stopword", is_stopword_process, depends_on=[])
sys.modules[__name__] = Language(
__name__,
[stem_word, is_badword, is_misspelled, is_stopword]
)
"""
Implements :class:`~revscoring.languages.language.Language` for French. Comes
complete with all language utilities.
"""
| mit | -3,909,994,930,598,729,700 | 27.953846 | 80 | 0.659405 | false | 3.184433 | false | false | false |
nyu-mhealth/project-smsurvey | main/smsurvey/core/services/enrollment_service.py | 1 | 2019 | import time
import pytz
from datetime import datetime
from smsurvey.core.model.model import Model
from smsurvey.core.model.query.where import Where
class EnrollmentService:
@staticmethod
def get(enrollment_id):
enrollments = Model.repository.enrollments
return enrollments.select(Where(enrollments.id, Where.EQUAL, enrollment_id))
@staticmethod
def get_by_owner(owner_id):
enrollments = Model.repository.enrollments
return enrollments.select(Where(enrollments.owner_id, Where.EQUAL, owner_id), force_list=True)
@staticmethod
def add_enrollment(name, owner_id, open_date, close_date, expiry_date):
enrollments = Model.repository.enrollments
enrollment = enrollments.create()
enrollment.name = name
enrollment.owner_id = owner_id
enrollment.open_date = open_date
enrollment.close_date = close_date
enrollment.expiry_date = expiry_date
return enrollment.save()
@staticmethod
def delete_enrollment(enrollment_id):
enrollments = Model.repository.enrollments
enrollments.delete(Where(enrollments.id, Where.E, enrollment_id))
@staticmethod
def is_enrollment_open(enrollment_id):
enrollment = EnrollmentService.get(enrollment_id)
now = datetime.now()
return enrollment.open_date <= now < enrollment.close_date
@staticmethod
def enrollment_accessible(enrollment_id):
enrollment = EnrollmentService.get(enrollment_id)
return enrollment is not None and enrollment.expiry_date > datetime.now(tz=pytz.utc)
@staticmethod
def is_owned_by(enrollment_id, owner_id):
enrollment = EnrollmentService.get(enrollment_id)
return enrollment.owner_id == owner_id
@staticmethod
def participant_count(enrollment_id):
participants = Model.repository.participants
p = participants.select(Where(participants.enrollment_id, Where.E, enrollment_id), force_list=True)
return len(p)
| gpl-3.0 | 6,002,717,555,732,236,000 | 32.65 | 107 | 0.702823 | false | 4.120408 | false | false | false |
MazamaScience/ispaq | ispaq/utils.py | 1 | 7864 | """
Utility functions for ISPAQ.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pandas as pd
from obspy import UTCDateTime
# Utility functions ------------------------------------------------------------
def write_simple_df(df, filepath, sigfigs=6):
"""
Write a pretty dataframe with appropriate significant figures to a .csv file.
:param df: Dataframe of simpleMetrics.
:param filepath: File to be created.
:param sigfigs: Number of significant figures to use.
:return: status
"""
if df is None:
raise("Dataframe of simple metrics does not exist.")
# Sometimes 'starttime' and 'endtime' get converted from UTCDateTime to float and need to be
# converted back. Nothing happens if this column is already of type UTCDateTime.
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
# Get pretty values
pretty_df = format_simple_df(df, sigfigs=sigfigs)
# Reorder columns, putting non-standard columns at the end and omitting 'qualityFlag'
columns = ['snclq','starttime','endtime','metricName','value']
original_columns = pretty_df.columns
extra_columns = list( set(original_columns).difference(set(columns)) )
extra_columns.remove('qualityFlag')
columns.extend(extra_columns)
# Write out .csv file
pretty_df[columns].to_csv(filepath, index=False)
# No return value
def format_simple_df(df, sigfigs=6):
"""
Create a pretty dataframe with appropriate significant figures.
:param df: Dataframe of simpleMetrics.
:param sigfigs: Number of significant figures to use.
:return: Dataframe of simpleMetrics.
The following conversions take place:
* Round the 'value' column to the specified number of significant figures.
* Convert 'starttime' and 'endtime' to python 'date' objects.
"""
# TODO: Why is type(df.value[0]) = 'str' at this point? Because metrics are always character strings?
# First convert 'N' to missing value
N_mask = df.value.str.contains('^N$')
df.loc[N_mask,'value'] = np.nan
# Then conver the rest of the values to float
df.value = df.value.astype(float)
format_string = "." + str(sigfigs) + "g"
df.value = df.value.apply(lambda x: format(x, format_string))
if 'starttime' in df.columns:
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.starttime = df.starttime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
if 'endtime' in df.columns:
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
# NOTE: df.time from SNR metric is already a string, otherwise it is NA
#if 'time' in df.columns:
#df.time = df.time.apply(lambda x: x.format_iris_web_service())
if 'qualityFlag' in df.columns:
df.qualityFlag = df.qualityFlag.astype(int)
return df
def write_numeric_df(df, filepath, sigfigs=6):
"""
Write a pretty dataframe with appropriate significant figures to a .csv file.
:param df: PSD dataframe.
:param filepath: File to be created.
:param sigfigs: Number of significant figures to use.
:return: status
"""
# Get pretty values
pretty_df = format_numeric_df(df, sigfigs=sigfigs)
# Write out .csv file
pretty_df.to_csv(filepath, index=False)
# No return value
def format_numeric_df(df, sigfigs=6):
"""
Create a pretty dataframe with appropriate significant figures.
:param df: Dataframe with only UTCDateTimes or numeric.
:param sigfigs: Number of significant figures to use.
:return: Dataframe of simpleMetrics.
The following conversions take place:
* Round the 'value' column to the specified number of significant figures.
* Convert 'starttime' and 'endtime' to python 'date' objects.
"""
format_string = "." + str(sigfigs) + "g"
for column in df.columns:
if column == 'starttime':
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.starttime = df.starttime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
elif column == 'endtime':
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
elif column == 'target':
pass # 'target' is the SNCL Id
else:
df[column] = df[column].astype(float)
df[column] = df[column].apply(lambda x: format(x, format_string))
return df
def get_slot(r_object, prop):
"""
Return a property from the R_Stream.
:param r_object: IRISSeismic Stream, Trace or TraceHeader object
:param prop: Name of slot in the R object or any child object
:return: python version value contained in the named property (aka 'slot')
This convenience function allows business logic code to easily extract
any property that is an atomic value in one of the R objects defined in
the IRISSeismic R package.
IRISSeismic slots as of 2016-04-07
stream_slots = r_stream.slotnames()
* url
* requestedStarttime
* requestedEndtime
* act_flags
* io_flags
* dq_flags
* timing_qual
* traces
trace_slots = r_stream.do_slot('traces')[0].slotnames()
* stats
* Sensor
* InstrumentSensitivity
* InputUnits
* data
stats_slots = r_stream.do_slot('traces')[0].do_slot('stats').slotnames()
* sampling_rate
* delta
* calib
* npts
* network
* location
* station
* channel
* quality
* starttime
* endtime
* processing
"""
slotnames = list(r_object.slotnames())
# R Stream object
if 'traces' in slotnames:
if prop in ['traces']:
# return traces as R objects
return r_object.do_slot(prop)
elif prop in ['requestedStarttime','requestedEndtime']:
# return times as UTCDateTime
return UTCDateTime(r_object.do_slot(prop)[0])
elif prop in slotnames:
# return atmoic types as is
return r_object.do_slot(prop)[0]
else:
# looking for a property from from lower down the hierarchy
r_object = r_object.do_slot('traces')[0]
slotnames = list(r_object.slotnames())
# R Trace object
if 'stats' in slotnames:
if prop in ['stats']:
# return stats as an R object
return r_object.do_slot(prop)
elif prop in ['data']:
# return data as an array
return list(r_object.do_slot(prop))
elif prop in slotnames:
# return atmoic types as is
return r_object.do_slot(prop)[0]
else:
# looking for a property from from lower down the hierarchy
r_object = r_object.do_slot('stats')
slotnames = list(r_object.slotnames())
# R TraceHeader object
if 'processing' in slotnames:
if prop in ['starttime','endtime']:
# return times as UTCDateTime
return UTCDateTime(r_object.do_slot(prop)[0])
else:
# return atmoic types as is
return r_object.do_slot(prop)[0]
# Should never get here
raise('"%s" is not a recognized slot name' % (prop))
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| gpl-3.0 | -5,262,944,125,174,644,000 | 34.107143 | 106 | 0.626907 | false | 3.862475 | false | false | false |
geodynamics/pylith | examples/2d/subduction/viz/plot_shearratio.py | 1 | 4434 | #!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the fault
# surfaces, colored by the magnitude of fault slip.
#
# This Python script runs using pvpython or within the ParaView Python
# shell.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step05",
"FAULTS": ["fault-slabtop"],
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
import numpy
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read fault data
dataFaults = []
for fault in parameters.faults:
filename = os.path.join(parameters.output_dir, "%s-%s.xmf" % (parameters.sim, fault))
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
data = XDMFReader(FileNames=[filename])
RenameSource("%s-%s" % (parameters.sim, fault), data)
dataFaults.append(data)
groupFaults = GroupDatasets(Input=dataFaults)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
view = GetActiveViewOrCreate('RenderView')
# Ratio of shear to normal traction
calculatorRatio = Calculator(Input=groupFaults)
calculatorRatio.Function = '-abs(traction_X)/traction_Y'
calculatorRatio.ResultArrayName = 'shearRatio'
ratioDisplay = Show(calculatorRatio, view)
ColorBy(ratioDisplay, ('POINTS', 'shearRatio'))
ratioDisplay.RescaleTransferFunctionToDataRange(True)
ratioDisplay.SetScalarBarVisibility(view, True)
ratioDisplay.SetRepresentationType('Wireframe')
ratioDisplay.LineWidth = 8.0
# Rescale color and/or opacity maps used to exactly fit the current data range
ratioLUT = GetColorTransferFunction('shearDivNormal')
ratioDisplay.RescaleTransferFunctionToDataRange(False, False)
# Update a scalar bar component title.
UpdateScalarBarsComponentTitle(ratioLUT, ratioDisplay)
# Annotate time
tstamp = AnnotateTimeFilter(groupFaults)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "FAULTS")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--faults", action="store", dest="faults")
args = parser.parse_args()
if args.faults:
args.faults = args.faults.split(",")
else:
args.faults = DEFAULT["FAULTS"]
visualize(args.sim)
view = GetRenderView()
view.ViewSize = [960, 540]
view.Update()
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| mit | -3,442,991,986,056,270,300 | 30.899281 | 106 | 0.632837 | false | 3.812554 | false | false | false |
anshulc95/exch | exch/cli.py | 1 | 2590 | #!/usr/bin/env python3
""""
exch
=====
A CLI application built using python to see currency exchange rates.
:copyright: (c) 2017 by Anshul Chauhan
"""
import json
import click
import pkg_resources
from exch.helpers import fixer, fixer_sync
from exch.file_handling import get_default_base, get_default_target,\
set_default_base, set_default_target
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
RATES_FIXER_JSON_FILE = pkg_resources.resource_filename('exch', 'data/fixer_rates.json')
DEFAULT_JSON_FILE = pkg_resources.resource_filename('exch', 'data/defaults.json')
@click.group(invoke_without_command=True)
@click.pass_context
@click.option('--base', '-b', default=get_default_base(DEFAULT_JSON_FILE),
type=str, show_default=True,
help='Currency you are converting from.')
@click.option('--target', '-t', default=get_default_target(DEFAULT_JSON_FILE),
type=str, show_default=True, help='Currency you\'re converting to.')
@click.option('--amount', '-a', default=1.0, type=float, show_default=True,
help='Amount to convert.')
@click.option('--set_base', '-sb', is_flag=True, default=False,
help='Set new default base.')
@click.option('--set_target', '-st', is_flag=True, default=False,
help='Set new default target.')
def cli(ctx, base, target, amount, set_base, set_target):
"""
Get the latetst currency exchange rates from:
\b
- fixer.io
"""
if ctx.invoked_subcommand is None:
output = fixer(base, target, amount, RATES_FIXER_JSON_FILE)
if isinstance(output, float):
# 2:.2f for two decimal values, manually specified
output = "{0} {1} = {2:.2f} {3}".format(amount, base, output, target)
if set_base:
set_default_base(base, DEFAULT_JSON_FILE)
if set_target:
set_default_target(target, DEFAULT_JSON_FILE)
click.echo(output)
# subcommands
@cli.command()
def currencies():
""" prints the list of currencies available """
with open(RATES_FIXER_JSON_FILE) as rates_json_file:
json_rates = json.load(rates_json_file)
list_of_currencies = []
list_of_currencies.append(json_rates['base'])
for key in json_rates['rates']:
list_of_currencies.append(key)
list_of_currencies.sort()
click.echo(', '.join(list_of_currencies))
@cli.command()
def sync():
""" download the latest rates """
if fixer_sync(RATES_FIXER_JSON_FILE) in range(200, 300):
click.echo("New rates have been saved.")
| mit | 850,260,063,519,048,700 | 32.636364 | 88 | 0.642085 | false | 3.394495 | false | false | false |
devpixelwolf/Pixelboard | src/money/service.py | 1 | 1135 | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib2 import urlopen
import json
from django.http.response import JsonResponse
from django.shortcuts import render_to_response
def get_embed():
info = get_info()
return render_to_response('money/widget.html', {'info': info}).content
def get_info():
url = 'http://api.promasters.net.br/cotacao/v1/valores'
resposta = urlopen(url).read()
data = json.loads(resposta.decode('utf-8'))
# Acessando os valores/chaves do dicionario/Json.
ARS = (data['valores']['ARS']['valor'])
BTC = (data['valores']['BTC']['valor'])
EUR = (data['valores']['EUR']['valor'])
GBP = (data['valores']['GBP']['valor'])
USD = (data['valores']['USD']['valor'])
return {'ars': {'value': ('%.2f' % ARS).replace('.', ','), 'char': '$'},
'btc': {'value': ('%.2f' % BTC).replace('.', ','), 'char': '฿'},
'eur': {'value': ('%.2f' % EUR).replace('.', ','), 'char': '€'},
'gbp': {'value': ('%.2f' % GBP).replace('.', ','), 'char': '£'},
'usd': {'value': ('%.4f' % USD).replace('.', ','), 'char': 'US$'}}
| gpl-3.0 | 7,660,595,988,941,571,000 | 34.3125 | 78 | 0.537168 | false | 2.997347 | false | false | false |
wangjiezhe/FetchNovels | novel/base.py | 1 | 3729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.error import HTTPError
from urllib.parse import urlparse
import pypinyin
from lxml.etree import XMLSyntaxError
from pyquery import PyQuery
from requests import ConnectionError
from novel.config import get_headers, update_and_save_novel_list
from novel.decorators import retry
from novel.utils import Tool
class BaseNovel(object):
def __init__(self, url,
headers=None, proxies=None,
encoding='UTF-8', tool=None,
tid=None, cache=False):
self.url = url
self._headers = headers or get_headers()
self._proxies = proxies
self.encoding = encoding
self.tool = tool or Tool
self._tid = tid
self.cache = cache
self.running = False
self.overwrite = True
self.refine = self.doc = None
self.title = self.author = ''
@property
def tid(self):
if self._tid is not None:
return str(self._tid)
else:
tp = pypinyin.slug(self.title, errors='ignore', separator='_')
ap = pypinyin.slug(self.author, errors='ignore', separator='_')
tid = '{} {}'.format(tp, ap)
return tid
@classmethod
def get_source_from_class(cls):
return cls.__name__.lower()
def get_source_from_url(self):
source = urlparse(self.url).netloc
source = source.lstrip('www.').replace('.', '_')
return source
@property
def source(self):
return self.get_source_from_class()
def run(self, refresh=False):
if self.running and not refresh:
return
self.refine = self.tool().refine
self.doc = self.get_doc()
self.running = True
def close(self):
return
def update_novel_list(self):
update_and_save_novel_list(self.source, self.tid)
@retry((HTTPError, XMLSyntaxError, ConnectionError))
def get_doc(self):
return PyQuery(url=self.url, headers=self.headers,
proxies=self.proxies, encoding=self.encoding)
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
self._headers = value or {}
@property
def proxies(self):
return self._proxies
@proxies.setter
def proxies(self, value):
self._proxies = value or {}
def dump(self):
raise NotImplementedError('dump')
def dump_and_close(self):
self.run()
self.dump()
self.close()
class SinglePage(BaseNovel):
def __init__(self, url, selector,
headers=None, proxies=None,
encoding='UTF-8', tool=None,
tid=None, cache=False):
super().__init__(url, headers, proxies, encoding, tool, tid, cache)
self.selector = selector
self.content = ''
def run(self, refresh=False):
super().run(refresh=refresh)
if not self.title:
self.title = self.get_title()
if not self.cache:
self.content = self.get_content()
def get_content(self):
if not self.selector:
return ''
content = self.doc(self.selector).html() or ''
content = self.refine(content)
return content
def get_title(self):
if self.title:
return self.title
else:
raise NotImplementedError('get_title')
def dump(self):
filename = '{self.title}.txt'.format(self=self)
print(self.title)
with open(filename, 'w') as fp:
fp.write(self.title)
fp.write('\n\n\n\n')
fp.write(self.content)
fp.write('\n')
| gpl-3.0 | -2,477,648,472,728,676,000 | 25.635714 | 75 | 0.574685 | false | 4.013994 | false | false | false |
pkgw/pwkit | pwkit/lsqmdl.py | 1 | 35401 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2018 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""Model data with least-squares fitting
This module provides tools for fitting models to data using least-squares
optimization.
"""
from __future__ import absolute_import, division, print_function
__all__ = 'ModelBase Model ComposedModel PolynomialModel ScaleModel'.split()
import numpy as np
try:
# numpy 1.7
import numpy.polynomial.polynomial as npoly
except ImportError:
import numpy.polynomial as npoly
from six import get_function_code
from six.moves import range, reduce
from . import binary_type, text_type
class Parameter(object):
"""Information about a parameter in a least-squares model.
These data may only be obtained after solving least-squares problem. These
objects reference information from their parent objects, so changing the
parent will alter the apparent contents of these objects.
"""
def __init__(self, owner, index):
self._owner = owner
self._index = index
def __repr__(self):
return '<Parameter "%s" (#%d) of %s>' % (self.name, self._index, self._owner)
@property
def index(self): # make this read-only
"The parameter's index in the Model's arrays."
return self._index
@property
def name(self):
"The parameter's name."
return self._owner.pnames[self._index]
@property
def value(self):
"The parameter's value."
return self._owner.params[self._index]
@property
def uncert(self):
"The uncertainty in :attr:`value`."
return self._owner.puncerts[self._index]
@property
def uval(self):
"Accesses :attr:`value` and :attr:`uncert` as a :class:`pwkit.msmt.Uval`."
from .msmt import Uval
return Uval.from_norm(self.value, self.uncert)
class ModelBase(object):
"""An abstract base class holding data and a model for least-squares fitting.
The models implemented in this module all derive from this class and so
inherit the attributes and methods described below.
A :class:`Parameter` data structure may be obtained by indexing this
object with either the parameter's numerical index or its name. I.e.::
m = Model(...).solve(...)
p = m['slope']
print(p.name, p.value, p.uncert, p.uval)
"""
data = None
"The data to be modeled; an *n*-dimensional Numpy array."
invsigma = None
"Data weights: 1/σ for each data point."
params = None
"After fitting, a Numpy ndarray of solved model parameters."
puncerts = None
"After fitting, a Numpy ndarray of 1σ uncertainties on the model parameters."
pnames = None
"A list of textual names for the parameters."
covar = None
"""After fitting, the variance-covariance matrix representing the parameter
uncertainties.
"""
mfunc = None
"""After fitting, a callable function evaluating the model fixed at best params.
The resulting function may or may not take arguments depending on the particular
kind of model being evaluated.
"""
mdata = None
"After fitting, the modeled data at the best parameters."
chisq = None
"After fitting, the χ² of the fit."
rchisq = None
"After fitting, the reduced χ² of the fit, or None if there are no degrees of freedom."
resids = None
"After fitting, the residuals: ``resids = data - mdata``."
def __init__(self, data, invsigma=None):
self.set_data(data, invsigma)
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self
def print_soln(self):
"""Print information about the model solution."""
lmax = reduce(max,(len(x) for x in self.pnames), len('r chi sq'))
if self.puncerts is None:
for pn, val in zip(self.pnames, self.params):
print('%s: %14g' % (pn.rjust(lmax), val))
else:
for pn, val, err in zip(self.pnames, self.params, self.puncerts):
frac = abs(100. * err / val)
print('%s: %14g +/- %14g (%.2f%%)' % (pn.rjust(lmax), val, err, frac))
if self.rchisq is not None:
print('%s: %14g' % ('r chi sq'.rjust(lmax), self.rchisq))
elif self.chisq is not None:
print('%s: %14g' % ('chi sq'.rjust(lmax), self.chisq))
else:
print('%s: unknown/undefined' % ('r chi sq'.rjust(lmax)))
return self
def make_frozen_func(self, params):
"""Return a data-generating model function frozen at the specified parameters.
As with the :attr:`mfunc` attribute, the resulting function may or may
not take arguments depending on the particular kind of model being
evaluated.
"""
raise NotImplementedError()
def __getitem__(self, key):
if isinstance(key, binary_type):
# If you're not using the unicode_literals __future__, things get
# annoying really quickly without this.
key = text_type(key)
if isinstance(key, int):
idx = key
if idx < 0 or idx >= len(self.pnames):
raise ValueError('illegal parameter number %d' % key)
elif isinstance(key, text_type):
try:
idx = self.pnames.index(key)
except ValueError:
raise ValueError('no such parameter named "%s"' % key)
else:
raise ValueError('illegal parameter key %r' % key)
return Parameter(self, idx)
def plot(self, modelx, dlines=False, xmin=None, xmax=None,
ymin=None, ymax=None, **kwargs):
"""Plot the data and model (requires `omega`).
This assumes that `data` is 1D and that `mfunc` takes one argument
that should be treated as the X variable.
"""
import omega as om
modelx = np.asarray(modelx)
if modelx.shape != self.data.shape:
raise ValueError('modelx and data arrays must have same shape')
modely = self.mfunc(modelx)
sigmas = self.invsigma**-1 # TODO: handle invsigma = 0
vb = om.layout.VBox(2)
vb.pData = om.quickXYErr(modelx, self.data, sigmas,
'Data', lines=dlines, **kwargs)
vb[0] = vb.pData
vb[0].addXY(modelx, modely, 'Model')
vb[0].setYLabel('Y')
vb[0].rebound(False, True)
vb[0].setBounds(xmin, xmax, ymin, ymax)
vb[1] = vb.pResid = om.RectPlot()
vb[1].defaultField.xaxis = vb[1].defaultField.xaxis
vb[1].addXYErr(modelx, self.resids, sigmas, None, lines=False)
vb[1].setLabels('X', 'Residuals')
vb[1].rebound(False, True)
# ignore Y values since residuals are on different scale:
vb[1].setBounds(xmin, xmax)
vb.setWeight(0, 3)
return vb
def show_cov(self):
"Show the parameter covariance matrix with `pwkit.ndshow_gtk3`."
# would be nice: labels with parameter names (hard because this is
# ndshow, not omegaplot)
from .ndshow_gtk3 import view
view(self.covar, title='Covariance Matrix')
def show_corr(self):
"Show the parameter correlation matrix with `pwkit.ndshow_gtk3`."
from .ndshow_gtk3 import view
d = np.diag(self.covar) ** -0.5
corr = self.covar * d[np.newaxis,:] * d[:,np.newaxis]
view(corr, title='Correlation Matrix')
class Model(ModelBase):
"""Models data with a generic nonlinear optimizer
Basic usage is::
def func(p1, p2, x):
simulated_data = p1 * x + p2
return simulated_data
x = [1, 2, 3]
data = [10, 14, 15.8]
mdl = Model(func, data, args=(x,)).solve(guess).print_soln()
The :class:`Model` constructor can take an optional argument ``invsigma``
after ``data``; it specifies *inverse sigmas*, **not** inverse *variances*
(the usual statistical weights), for the data points. Since most
applications deal in sigmas, take care to write::
m = Model(func, data, 1. / uncerts) # right!
not::
m = Model(func, data, uncerts) # WRONG
If you have zero uncertainty on a measurement, you must wind a way to
express that constraint without including that measurement as part of the
``data`` vector.
"""
lm_prob = None
"""A :class:`pwkit.lmmin.Problem` instance describing the problem to be solved.
After setting up the data-generating function, you can access this item to
tune the solver.
"""
def __init__(self, simple_func, data, invsigma=None, args=()):
if simple_func is not None:
self.set_simple_func(simple_func, args)
if data is not None:
self.set_data(data, invsigma)
def set_func(self, func, pnames, args=()):
"""Set the model function to use an efficient but tedious calling convention.
The function should obey the following convention::
def func(param_vec, *args):
modeled_data = { do something using param_vec }
return modeled_data
This function creates the :class:`pwkit.lmmin.Problem` so that the
caller can futz with it before calling :meth:`solve`, if so desired.
Returns *self*.
"""
from .lmmin import Problem
self.func = func
self._args = args
self.pnames = list(pnames)
self.lm_prob = Problem(len(self.pnames))
return self
def set_simple_func(self, func, args=()):
"""Set the model function to use a simple but somewhat inefficient calling
convention.
The function should obey the following convention::
def func(param0, param1, ..., paramN, *args):
modeled_data = { do something using the parameters }
return modeled_data
Returns *self*.
"""
code = get_function_code(func)
npar = code.co_argcount - len(args)
pnames = code.co_varnames[:npar]
def wrapper(params, *args):
return func(*(tuple(params) + args))
return self.set_func(wrapper, pnames, args)
def make_frozen_func(self, params):
"""Returns a model function frozen to the specified parameter values.
Any remaining arguments are left free and must be provided when the
function is called.
For this model, the returned function is the application of
:func:`functools.partial` to the :attr:`func` property of this object.
"""
params = np.array(params, dtype=np.float, ndmin=1)
from functools import partial
return partial(self.func, params)
def solve(self, guess):
"""Solve for the parameters, using an initial guess.
This uses the Levenberg-Marquardt optimizer described in
:mod:`pwkit.lmmin`.
Returns *self*.
"""
guess = np.array(guess, dtype=np.float, ndmin=1)
f = self.func
args = self._args
def lmfunc(params, vec):
vec[:] = f(params, *args).flatten()
self.lm_prob.set_residual_func(self.data.flatten(),
self.invsigma.flatten(),
lmfunc, None)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
self.mfunc = self.make_frozen_func(soln.params)
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
return self
class PolynomialModel(ModelBase):
"""Least-squares polynomial fit.
Because this is a very specialized kind of problem, we don't need an
initial guess to solve, and we can use fast built-in numerical routines.
The output parameters are named "a0", "a1", ... and are stored in that
order in PolynomialModel.params[]. We have ``y = sum(x**i * a[i])``, so
"a2" = "params[2]" is the quadratic term, etc.
This model does *not* give uncertainties on the derived coefficients. The
as_nonlinear() method can be use to get a `Model` instance with
uncertainties.
Methods:
as_nonlinear - Return a (lmmin-based) `Model` equivalent to self.
"""
def __init__(self, maxexponent, x, data, invsigma=None):
self.maxexponent = maxexponent
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: npoly.polyval(x, params)
def solve(self):
self.pnames = ['a%d' % i for i in range(self.maxexponent + 1)]
self.params = npoly.polyfit(self.x, self.data, self.maxexponent,
w=self.invsigma)
self.puncerts = None # does anything provide this? could farm out to lmmin ...
self.covar = None
self.mfunc = self.make_frozen_func(self.params)
self.mdata = self.mfunc(self.x)
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
if self.x.size > self.maxexponent + 1:
self.rchisq = self.chisq / (self.x.size - (self.maxexponent + 1))
return self
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm
class ScaleModel(ModelBase):
"""Solve `data = m * x` for `m`."""
def __init__(self, x, data, invsigma=None):
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: params[0] * x
def solve(self):
w2 = self.invsigma**2
sxx = np.dot(self.x**2, w2)
sxy = np.dot(self.x * self.data, w2)
m = sxy / sxx
uc_m = 1. / np.sqrt(sxx)
self.pnames = ['m']
self.params = np.asarray([m])
self.puncerts = np.asarray([uc_m])
self.covar = self.puncerts.reshape((1, 1))
self.mfunc = lambda x: m * x
self.mdata = m * self.x
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
self.rchisq = self.chisq / (self.x.size - 1)
return self
# lmmin-based model-fitting when the model is broken down into composable
# components.
class ModelComponent(object):
npar = 0
name = None
pnames = ()
nmodelargs = 0
setguess = None
setvalue = None
setlimit = None
_accum_mfunc = None
def __init__(self, name=None):
self.name = name
def _param_names(self):
"""Overridable in case the list of parameter names needs to be
generated on the fly."""
return self.pnames
def finalize_setup(self):
"""If the component has subcomponents, this should set their `name`,
`setguess`, `setvalue`, and `setlimit` properties. It should also
set `npar` (on self) to the final value."""
pass
def prep_params(self):
"""This should make any necessary calls to `setvalue` or `setlimit`,
though in straightforward cases it should just be up to the user to
do this. If the component has subcomponents, their `prep_params`
functions should be called."""
pass
def model(self, pars, mdata):
"""Modify `mdata` based on `pars`."""
pass
def deriv(self, pars, jac):
"""Compute the Jacobian. `jac[i]` is d`mdata`/d`pars[i]`."""
pass
def extract(self, pars, perr, cov):
"""Extract fit results into the object for ease of inspection."""
self.covar = cov
def _outputshape(self, *args):
"""This is a helper for evaluating the model function at fixed parameters. To
work in the ComposedModel paradigm, we have to allocate an empty array
to hold the model output before we can fill it via the _accum_mfunc
functions. We can't do that without knowing what size it will be. That
size has to be a function of the "free" parameters to the model
function that are implicit/fixed during the fitting process. Given these "free"
parameters, _outputshape returns the shape that the output will have."""
raise NotImplementedError()
def mfunc(self, *args):
if len(args) != self.nmodelargs:
raise TypeError('model function expected %d arguments, got %d' %
(self.nmodelargs, len(args)))
result = np.zeros(self._outputshape(*args))
self._accum_mfunc(result, *args)
return result
class ComposedModel(ModelBase):
def __init__(self, component, data, invsigma=None):
if component is not None:
self.set_component(component)
if data is not None:
self.set_data(data, invsigma)
def _component_setguess(self, vals, ofs=0):
vals = np.asarray(vals)
if ofs < 0 or ofs + vals.size > self.component.npar:
raise ValueError('ofs %d, vals.size %d, npar %d' %
(ofs, vals.size, self.component.npar))
self.force_guess[ofs:ofs+vals.size] = vals
def _component_setvalue(self, cidx, val, fixed=False):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_value(cidx, val, fixed=fixed)
self.force_guess[cidx] = val
def _component_setlimit(self, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_limit(cidx, lower, upper)
def set_component(self, component):
self.component = component
component.setguess = self._component_setguess
component.setvalue = self._component_setvalue
component.setlimit = self._component_setlimit
component.finalize_setup()
from .lmmin import Problem
self.lm_prob = Problem(component.npar)
self.force_guess = np.empty(component.npar)
self.force_guess.fill(np.nan)
self.pnames = list(component._param_names())
component.prep_params()
def solve(self, guess=None):
if guess is None:
guess = self.force_guess
else:
guess = np.array(guess, dtype=np.float, ndmin=1, copy=True)
for i in range(self.force_guess.size):
if np.isfinite(self.force_guess[i]):
guess[i] = self.force_guess[i]
def model(pars, outputs):
outputs.fill(0)
self.component.model(pars, outputs)
self.lm_model = model
self.lm_deriv = self.component.deriv
self.lm_prob.set_residual_func(self.data, self.invsigma, model,
self.component.deriv)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = self.lm_soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
self.component.extract(soln.params, soln.perror, soln.covar)
return self
def make_frozen_func(self):
return self.component.mfunc
def mfunc(self, *args):
return self.component.mfunc(*args)
def debug_derivative(self, guess):
"""returns (explicit, auto)"""
from .lmmin import check_derivative
return check_derivative(self.component.npar, self.data.size,
self.lm_model, self.lm_deriv, guess)
# Now specific components useful in the above framework. The general strategy
# is to err on the side of having additional parameters in the individual
# classes, and the user can call setvalue() to fix them if they're not needed.
class AddConstantComponent(ModelComponent):
npar = 1
pnames = ('value', )
nmodelargs = 0
def model(self, pars, mdata):
mdata += pars[0]
def deriv(self, pars, jac):
jac[0] = 1.
def _outputshape(self):
return()
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars[0]
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_value = pars[0]
self.u_value = perr[0]
class AddValuesComponent(ModelComponent):
"""XXX terminology between this and AddConstant is mushy."""
nmodelargs = 0
def __init__(self, nvals, name=None):
super(AddValuesComponent, self).__init__(name)
self.npar = nvals
def _param_names(self):
for i in range(self.npar):
yield 'v%d' % i
def model(self, pars, mdata):
mdata += pars
def deriv(self, pars, jac):
jac[:,:] = np.eye(self.npar)
def _outputshape(self):
return(self.npar,)
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_vals = pars
self.u_vals = perr
class AddPolynomialComponent(ModelComponent):
nmodelargs = 1
def __init__(self, maxexponent, x, name=None):
super(AddPolynomialComponent, self).__init__(name)
self.npar = maxexponent + 1
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
def _param_names(self):
for i in range(self.npar):
yield 'c%d' % i
def model(self, pars, mdata):
mdata += npoly.polyval(self.x, pars)
def deriv(self, pars, jac):
w = np.ones_like(self.x)
for i in range(self.npar):
jac[i] = w
w *= self.x
def _outputshape(self, x):
return x.shape
def extract(self, pars, perr, cov):
def _accum_mfunc(res, x):
res += npoly.polyval(x, pars)
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_coeffs = pars
self.u_coeffs = perr
def _broadcast_shapes(s1, s2):
"""Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together."""
n1 = len(s1)
n2 = len(s2)
n = max(n1, n2)
res = [1] * n
for i in range(n):
if i >= n1:
c1 = 1
else:
c1 = s1[n1-1-i]
if i >= n2:
c2 = 1
else:
c2 = s2[n2-1-i]
if c1 == 1:
rc = c2
elif c2 == 1 or c1 == c2:
rc = c1
else:
raise ValueError('array shapes %r and %r are not compatible' % (s1, s2))
res[n-1-i] = rc
return tuple(res)
class SeriesComponent(ModelComponent):
"""Apply a set of subcomponents in series, isolating each from the other. This
is only valid if every subcomponent except the first is additive --
otherwise, the Jacobian won't be right."""
def __init__(self, components=(), name=None):
super(SeriesComponent, self).__init__(name)
self.components = list(components)
def add(self, component):
"""This helps, but direct manipulation of self.components should be
supported."""
self.components.append(component)
return self
def _param_names(self):
for c in self.components:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = np.asarray(vals)
if subofs < 0 or subofs + vals.size > npar:
raise ValueError('subofs %d, vals.size %d, npar %d' %
(subofs, vals.size, npar))
return self.setguess(vals, ofs + subofs)
def _offset_setvalue(self, ofs, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(ofs + cidx, value, fixed)
def _offset_setlimit(self, ofs, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(ofs + cidx, lower, upper)
def finalize_setup(self):
from functools import partial
ofs = 0
self.nmodelargs = 0
for i, c in enumerate(self.components):
if c.name is None:
c.name = 'c%d' % i
c.setguess = partial(self._offset_setguess, ofs, c.npar)
c.setvalue = partial(self._offset_setvalue, ofs, c.npar)
c.setlimit = partial(self._offset_setlimit, ofs, c.npar)
c.finalize_setup()
ofs += c.npar
self.nmodelargs += c.nmodelargs
self.npar = ofs
def prep_params(self):
for c in self.components:
c.prep_params()
def model(self, pars, mdata):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
c.model(p, mdata)
ofs += c.npar
def deriv(self, pars, jac):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
j = jac[ofs:ofs+c.npar]
c.deriv(p, j)
ofs += c.npar
def extract(self, pars, perr, cov):
ofs = 0
for c in self.components:
n = c.npar
spar = pars[ofs:ofs+n]
serr = perr[ofs:ofs+n]
scov = cov[ofs:ofs+n,ofs:ofs+n]
c.extract(spar, serr, scov)
ofs += n
def _outputshape(self, *args):
s = ()
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
s = _broadcast_shapes(s, c._outputshape(*cargs))
ofs += c.nmodelargs
return s
def _accum_mfunc(self, res, *args):
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
c._accum_mfunc(res, *cargs)
ofs += c.nmodelargs
class MatMultComponent(ModelComponent):
"""Given a component yielding k**2 data points and k additional components,
each yielding n data points. The result is [A]×[B], where A is the square
matrix formed from the first component's output, and B is the (k, n)
matrix of stacked output from the final k components.
Parameters are ordered in same way as the components named above.
"""
def __init__(self, k, name=None):
super(MatMultComponent, self).__init__(name)
self.k = k
self.acomponent = None
self.bcomponents = [None] * k
def _param_names(self):
pfx = self.acomponent.name + '.' if self.acomponent.name is not None else ''
for p in self.acomponent._param_names():
yield pfx + p
for c in self.bcomponents:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = np.asarray(vals)
if subofs < 0 or subofs + vals.size > npar:
raise ValueError('subofs %d, vals.size %d, npar %d' %
(subofs, vals.size, npar))
return self.setguess(vals, ofs + subofs)
def _offset_setvalue(self, ofs, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(ofs + cidx, value, fixed)
def _offset_setlimit(self, ofs, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(ofs + cidx, lower, upper)
def finalize_setup(self):
from functools import partial
c = self.acomponent
if c.name is None:
c.name = 'a'
c.setguess = partial(self._offset_setguess, 0, c.npar)
c.setvalue = partial(self._offset_setvalue, 0, c.npar)
c.setlimit = partial(self._offset_setlimit, 0, c.npar)
c.finalize_setup()
ofs = c.npar
self.nmodelargs = c.nmodelargs
for i, c in enumerate(self.bcomponents):
if c.name is None:
c.name = 'b%d' % i
c.setguess = partial(self._offset_setguess, ofs, c.npar)
c.setvalue = partial(self._offset_setvalue, ofs, c.npar)
c.setlimit = partial(self._offset_setlimit, ofs, c.npar)
c.finalize_setup()
ofs += c.npar
self.nmodelargs += c.nmodelargs
self.npar = ofs
def prep_params(self):
self.acomponent.prep_params()
for c in self.bcomponents:
c.prep_params()
def _sep_model(self, pars, nd):
k = self.k
ma = np.zeros((k, k))
mb = np.zeros((k, nd))
c = self.acomponent
c.model(pars[:c.npar], ma.reshape(k**2))
pofs = c.npar
for i, c in enumerate(self.bcomponents):
p = pars[pofs:pofs+c.npar]
c.model(p, mb[i])
pofs += c.npar
return ma, mb
def model(self, pars, mdata):
k = self.k
nd = mdata.size // k
ma, mb = self._sep_model(pars, nd)
np.dot(ma, mb, mdata.reshape((k, nd)))
def deriv(self, pars, jac):
k = self.k
nd = jac.shape[1] // k
npar = self.npar
ma, mb = self._sep_model(pars, nd)
ja = np.zeros((npar, k, k))
jb = np.zeros((npar, k, nd))
c = self.acomponent
c.deriv(pars[:c.npar], ja[:c.npar].reshape((c.npar, k**2)))
pofs = c.npar
for i, c in enumerate(self.bcomponents):
p = pars[pofs:pofs+c.npar]
c.deriv(p, jb[pofs:pofs+c.npar,i,:])
pofs += c.npar
for i in range(self.npar):
jac[i] = (np.dot(ja[i], mb) + np.dot(ma, jb[i])).reshape(k * nd)
def extract(self, pars, perr, cov):
c = self.acomponent
c.extract(pars[:c.npar], perr[:c.npar], cov[:c.npar,:c.npar])
ofs = c.npar
for c in self.bcomponents:
n = c.npar
spar = pars[ofs:ofs+n]
serr = perr[ofs:ofs+n]
scov = cov[ofs:ofs+n,ofs:ofs+n]
c.extract(spar, serr, scov)
ofs += n
def _outputshape(self, *args):
aofs = self.acomponent.nmodelargs
sb = ()
for c in self.bcomponents:
a = args[aofs:aofs+c.nmodelargs]
sb = _broadcast_shapes(sb, c._outputshape(*a))
aofs += c.nmodelargs
return (self.k,) + sb
def _accum_mfunc(self, res, *args):
k = self.k
nd = res.shape[1]
ma = np.zeros((k, k))
mb = np.zeros((k, nd))
c = self.acomponent
c._accum_mfunc(ma.reshape(k**2), *(args[:c.nmodelargs]))
aofs = c.nmodelargs
for i, c in enumerate(self.bcomponents):
a = args[aofs:aofs+c.nmodelargs]
c._accum_mfunc(mb[i], *a)
aofs += c.nmodelargs
np.dot(ma, mb, res)
class ScaleComponent(ModelComponent):
npar = 1
def __init__(self, subcomp=None, name=None):
super(ScaleComponent, self).__init__(name)
self.setsubcomp(subcomp)
def setsubcomp(self, subcomp):
self.subcomp = subcomp
return self
def _param_names(self):
yield 'factor'
pfx = self.subcomp.name + '.' if self.subcomp.name is not None else ''
for p in self.subcomp._param_names():
yield pfx + p
def _sub_setguess(self, npar, cidx, vals, ofs=0):
vals = np.asarray(vals)
if ofs < 0 or ofs + vals.size > npar:
raise ValueError('ofs %d, vals.size %d, npar %d' %
(ofs, vals.size, npar))
return self.setguess(vals, ofs + 1)
def _sub_setvalue(self, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(1 + cidx, value, fixed)
def _sub_setlimit(self, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(1 + cidx, lower, upper)
def finalize_setup(self):
if self.subcomp.name is None:
self.subcomp.name = 'c'
from functools import partial
self.subcomp.setvalue = partial(self._sub_setvalue, self.subcomp.npar)
self.subcomp.setlimit = partial(self._sub_setvalue, self.subcomp.npar)
self.subcomp.finalize_setup()
self.npar = self.subcomp.npar + 1
self.nmodelargs = self.subcomp.nmodelargs
def prep_params(self):
self.subcomp.prep_params()
def model(self, pars, mdata):
self.subcomp.model(pars[1:], mdata)
mdata *= pars[0]
def deriv(self, pars, jac):
self.subcomp.model(pars[1:], jac[0])
self.subcomp.deriv(pars[1:], jac[1:])
jac[1:] *= pars[0]
def extract(self, pars, perr, cov):
self.f_factor = pars[0]
self.u_factor = perr[0]
self.c_factor = cov[0]
self.subcomp.extract(pars[1:], perr[1:], cov[1:,1:])
def _outputshape(self, *args):
return self.subcomp._outputshape(*args)
def _accum_mfunc(self, res, *args):
self.subcomp._accum_mfunc(res, *args)
| mit | -420,189,310,659,053,200 | 29.303082 | 91 | 0.581426 | false | 3.538692 | false | false | false |
codito/pomito | tests/test_dispatcher.py | 1 | 2529 | # -*- coding: utf-8 -*-
"""Tests for message dispatcher."""
import unittest
from unittest.mock import Mock
import blinker
from pomito import main
class MessageTests(unittest.TestCase):
def test_send_calls_signal_send_with_kwargs(self):
mock_signal = Mock(blinker.Signal)
msg = main.Message(mock_signal, arg1="arg1", arg2=1)
msg.send()
mock_signal.send.assert_called_once_with(arg1="arg1", arg2=1)
class MessageDispatcherTests(unittest.TestCase):
def setUp(self):
dummy_signal = blinker.signal('dummy_signal')
self.test_message = main.Message(dummy_signal, arg1="arg1", arg2=1)
self.dispatcher = main.MessageDispatcher()
self.mock_callback = Mock()
def tearDown(self):
if self.dispatcher.is_alive():
self.dispatcher.stop()
self.dispatcher.join()
def test_queue_message_throws_for_invalid_message(self):
self.assertRaises(TypeError, self.dispatcher.queue_message, None)
def test_queue_message_doesnt_queue_message_if_there_are_no_receivers(self):
self.dispatcher.queue_message(self.test_message)
assert self.dispatcher._message_queue.qsize() == 0
def test_queue_message_queues_message_if_there_are_receivers(self):
self.test_message.signal.connect(Mock(), weak=False)
self.dispatcher.queue_message(self.test_message)
assert self.dispatcher._message_queue.qsize() == 1
def test_start_should_start_the_dispatcher_thread(self):
self.dispatcher.start()
assert self.dispatcher.is_alive()
assert self.dispatcher._stop_event.is_set() is False
def test_start_should_throw_if_dispatcher_is_already_started(self):
self.dispatcher.start()
self.assertRaises(RuntimeError, self.dispatcher.start)
def test_started_dispatcher_should_process_messages_in_queue(self):
self.test_message.signal.connect(self.mock_callback, weak=False)
self.dispatcher.start()
self.dispatcher.queue_message(self.test_message)
self.dispatcher._message_queue.join()
self.mock_callback.assert_called_once_with(None, arg1="arg1", arg2=1)
def test_stopped_dispatcher_shouldnt_process_messages_in_queue(self):
self.test_message.signal.connect(self.mock_callback, weak=False)
self.dispatcher.start()
self.dispatcher.stop()
self.dispatcher.join()
self.dispatcher.queue_message(self.test_message)
assert self.mock_callback.called is False
| mit | -1,461,367,914,239,270,100 | 31.844156 | 80 | 0.685251 | false | 3.702782 | true | false | false |
Ircam-RnD/xmm | doc/doc-misc/python_example.py | 1 | 1686 | import numpy as np
import mhmm
# Load Training Data
training_motion_1 = np.genfromtxt('training_motion_1.txt')
training_motion_2 = np.genfromtxt('training_motion_2.txt')
training_sound_1 = np.genfromtxt('training_sound_1.txt')
training_sound_2 = np.genfromtxt('training_sound_2.txt')
dim_gesture = training_motion_1.shape[1]
dim_sound = training_sound_1.shape[1]
# Create a multimodal training set
training_set = mhmm.TrainingSet(mhmm.BIMODAL)
training_set.set_dimension(dim_gesture + dim_sound)
training_set.set_dimension_input(dim_sound)
# Record First Phrase
for frame_motion, frame_sound in zip(training_motion_1, training_sound_1):
training_set.recordPhrase_input (0, frame_motion)
training_set.recordPhrase_output(0, frame_sound)
training_set.setPhraseLabel(0, mhmm.Label('one'))
# Record Second Phrase
for frame_motion, frame_sound in zip(training_motion_2, training_sound_2):
training_set.recordPhrase_input (1, frame_motion)
training_set.recordPhrase_output(1, frame_sound)
training_set.setPhraseLabel(1, mhmm.Label('two'))
# Instantiate and Train a Hierarchical Multimodal HMM
xmm = mhmm.HierarchicalHMM(mhmm.BIMODAL, training_set)
xmm.set_nbStates(10)
xmm.set_nbMixtureComponents(1)
xmm.set_varianceOffset(0.1, 0.01)
xmm.train()
# Perform joint recognition and Mapping
test_motion = np.genfromtxt('test_motion.txt')
predicted_sound = np.zeros((len(test_motion), dim_sound))
log_likelihoods = np.zeros((len(test_motion), xmm.size()))
xmm.performance_init()
for t, frame_motion in enumerate(test_motion):
xmm.performance_update(frame)
predicted_sound[t, :] = xmm.results_predicted_output
log_likelihoods[t, :] = xmm.results_log_likelihoods
| gpl-3.0 | 5,467,951,583,736,574,000 | 34.87234 | 74 | 0.758007 | false | 2.96831 | false | false | false |
google/ml-fairness-gym | agents/recommenders/utils.py | 1 | 8346 | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for RecSim agent.
Defines a few functions used by the RecSim RNNAgent.
"""
import itertools
import os
import tempfile
from absl import flags
import file_util
from agents.recommenders import model
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
def accumulate_rewards(rewards, gamma):
"""Computes the discounted reward for the entire episode."""
reversed_rewards = rewards[::-1] # list reversal
acc = list(itertools.accumulate(reversed_rewards, lambda x, y: x*gamma + y))
return np.array(acc[::-1])
def format_data(data_history, gamma, constant_baseline=0.0):
"""The function formats the data into input, output format for keras."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
for curr_recs, curr_rewards in zip(data_history['recommendation_seqs'],
data_history['reward_seqs']):
inp_rec_seq.append(curr_recs[:-1])
inp_reward_seq.append(curr_rewards[:-1])
output_recs.append(np.expand_dims(curr_recs[1:], axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
reward_weights.append(output_rewards)
return {'input': [np.array(inp_rec_seq), np.array(inp_reward_seq)],
'output': np.array(output_recs),
'sample_weights_temporal': np.array(reward_weights)}
def format_data_safe_rl(data_history, gamma, constant_baseline=0.0):
"""The function formats the data into input, output format for keras.
This function is specific to the implementation of CVaR safety constraint.
See https://braintex.goog/read/zyprpgsjbtww for more details.
Args:
data_history: dict with recommendation_seqs, reward_seqs, safety_costs
fields.
gamma: Gamma for reward accumulation over the time horizon.
constant_baseline: Baseline to subtract from each reward to reduce variance.
Returns:
A dictionary with input, output and sample weights_temporal fields
that are input into a keras model.
"""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
trajectories_cost = []
for curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['recommendation_seqs'],
data_history['reward_seqs'],
data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
return {
'input': [np.array(inp_rec_seq),
np.array(inp_reward_seq)],
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def format_data_movielens(data_history, gamma, constant_baseline=0.0,
mask_already_recommended=False, user_id_input=True,
**kwargs):
"""Format data for movielens RNN agent update step."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
user_id_seq = []
trajectories_cost = []
if mask_already_recommended:
# TODO(): Change argument to repeat_movies to be consistent.
masks_for_softmax = []
for user_id, curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['user_id'],
data_history['recommendation_seqs'],
data_history['reward_seqs'],
data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
user_id_seq.append(np.array([user_id] * len(curr_recs[:-1])))
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
masks_for_softmax.append(get_mask_for_softmax(curr_recs[1:-1],
kwargs['action_space_size']))
input_list = [np.array(inp_rec_seq),
np.array(inp_reward_seq)]
if user_id_input:
input_list.append(np.array(user_id_seq))
if mask_already_recommended:
input_list.append(np.array(masks_for_softmax))
return {
'input': input_list,
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def format_data_batch_movielens(data_history,
gamma,
constant_baseline=0.0,
mask_already_recommended=False,
user_id_input=True,
**kwargs):
"""Format data for movielens RNN agent update step."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
user_id_seq = []
trajectories_cost = []
if mask_already_recommended:
# TODO(): Change argument to repeat_movies to be consistent.
masks_for_softmax = []
for user_id, curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['users'], data_history['recommendations'],
data_history['rewards'], data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
user_id_seq.append(user_id[:-1])
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
masks_for_softmax.append(
get_mask_for_softmax(curr_recs[1:-1], kwargs['action_space_size']))
input_list = [
np.array(inp_rec_seq),
np.array(inp_reward_seq),
]
if user_id_input:
input_list.append(np.array(user_id_seq))
if mask_already_recommended:
input_list.append(np.array(masks_for_softmax))
return {
'input': input_list,
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def get_mask_for_softmax(current_recommendations, action_space_size):
mask = np.ones((len(current_recommendations) + 1, action_space_size),
dtype=np.int)
for i in range(len(current_recommendations)):
mask[i+1, current_recommendations[:i+1]] = 0
# TODO(): Add a test to test whether the mask works as expected.
return mask
def load_model(filepath,
optimizer_name,
learning_rate=None,
momentum=None,
gradient_clip_value=None,
gradient_clip_norm=None):
"""Loads RNNAgent model from the path."""
tmp_model_file_path = os.path.join(tempfile.gettempdir(), 'tmp_model.h5')
file_util.copy(filepath, tmp_model_file_path, overwrite=True)
loaded_model = tf.keras.models.load_model(tmp_model_file_path)
file_util.remove(tmp_model_file_path)
optimizer = model.construct_optimizer(optimizer_name, learning_rate, momentum,
gradient_clip_value, gradient_clip_norm)
loaded_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
sample_weight_mode='temporal')
return loaded_model
| apache-2.0 | -1,753,165,117,362,242,800 | 38.933014 | 80 | 0.649653 | false | 3.557545 | false | false | false |
qszhuan/raspberry-pi | logger2.py | 1 | 1054 | import logging
import logging.handlers
from logging.config import dictConfig
logger = logging.getLogger(__name__)
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
}
def configure_logging(logfile_path):
dictConfig(DEFAULT_LOGGING)
default_formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] [PID:%(process)d TID:%(thread)d] %(message)s",
"%d/%m/%Y %H:%M:%S")
file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760, backupCount=300,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(default_formatter)
console_handler.setFormatter(default_formatter)
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(file_handler)
logging.root.addHandler(console_handler)
configure_logging('/tmp/celery.log')
| mit | 3,055,693,595,506,481,700 | 29.114286 | 128 | 0.66888 | false | 3.805054 | false | false | false |
GoodiesHQ/PyPad | pypad/iso_10126.py | 1 | 1166 | """
Implementation of the ISO 10126 algorithm.
"""
import struct
from .utils import random_bytes
from .exceptions import InvalidBlockSize, InvalidMessage
__all__ = ["pad", "unpad", "MAX_BLOCK_SIZE"]
MAX_BLOCK_SIZE = 0x100
def pad(buf, block_size=MAX_BLOCK_SIZE):
"""Padded with random bytes followed by the number of bytes padded."""
if not isinstance(buf, bytes):
raise TypeError("Buffer must be in bytes")
if block_size > MAX_BLOCK_SIZE:
raise InvalidBlockSize("Maximum block size for ISO 10126 is {}".format(MAX_BLOCK_SIZE))
pad_size = block_size - (len(buf) % block_size)
return buf + random_bytes(pad_size - 1) + struct.pack("B", pad_size & 0xff)
def unpad(buf):
"""Extract the last byte and truncate the padded bytes"""
if not isinstance(buf, bytes):
raise TypeError("Buffer must be in bytes")
bufsize = len(buf)
if bufsize == 0:
raise InvalidMessage("The buffer cannot be empty")
pad_size = ord(buf[-1:])
pad_size = pad_size or MAX_BLOCK_SIZE
if bufsize < pad_size:
raise InvalidMessage("The buffer does not match the pad length.")
return buf[:-pad_size]
| mit | -5,746,526,448,586,905,000 | 28.15 | 95 | 0.664666 | false | 3.701587 | false | false | false |
Catch-up-TV-and-More/plugin.video.catchuptvandmore | resources/lib/favourites.py | 1 | 9702 | # -*- coding: utf-8 -*-
# Copyright: (c) 2016, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import range, str
from hashlib import md5
import json
import os
from codequick import Script, utils
from kodi_six import xbmc, xbmcgui, xbmcvfs
from resources.lib.addon_utils import get_item_label, get_item_media_path
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_params, get_selected_item_stream, get_selected_item_info
import resources.lib.mem_storage as mem_storage
FAV_JSON_FP = os.path.join(Script.get_info('profile'), "favourites.json")
FAV_FORMAT_VERSION = 1
def migrate_fav_format(current_fav_format, fav_dict):
"""Migrate favourites dict in last format version
Args:
current_fav_format (int): Current format version of the favourites json file
fav_dict (dict): Favourites dict in old format
Returns:
dict: Updated favourites dict in latest format version
"""
Script.log('Migrate favourites dict in last format version')
new_dict = fav_dict
if current_fav_format == 0:
items = fav_dict
new_dict = {
'items': items,
'format_version': 1
}
current_fav_format = 1
return new_dict
def get_fav_dict_from_json():
"""Get favourites dict from favourites.json
Returns:
dict: Favourites dict
"""
def get_fresh_dict():
return {
'items': {},
'format_version': FAV_FORMAT_VERSION
}
if not xbmcvfs.exists(FAV_JSON_FP):
return get_fresh_dict()
try:
with open(FAV_JSON_FP) as f:
fav_dict = json.load(f)
current_fav_format = fav_dict.get('format_version', 0)
if current_fav_format < FAV_FORMAT_VERSION:
fav_dict = migrate_fav_format(current_fav_format, fav_dict)
return fav_dict
except Exception:
Script.log('Failed to load favourites json data')
xbmcvfs.delete(FAV_JSON_FP)
return get_fresh_dict()
def save_fav_dict_in_json(fav_dict):
"""Dump favourites dict in favourites.json
Args:
fav_dict (dict): Favourites dict to save
"""
with open(FAV_JSON_FP, 'w') as f:
json.dump(fav_dict, f, indent=4)
def guess_fav_prefix(item_id):
"""Keep in memory the current main category (e.g. Live TV, Catch-up TV, ...)
This category label will be used as a prefix when the user add a favourite
"""
prefixes = {
'root': '',
'live_tv': Script.localize(30030),
'replay': Script.localize(30031),
'websites': Script.localize(30032)
}
if item_id in prefixes:
s = mem_storage.MemStorage('fav')
s['prefix'] = prefixes[item_id]
@Script.register
def add_item_to_favourites(plugin, is_playable=False, item_infos={}):
"""Callback function of the 'Add to add-on favourites' item context menu
Args:
plugin (codequick.script.Script)
is_playable (bool): If 'item' is playable
item_infos (dict)
"""
# Need to use same keywords as
# https://scriptmodulecodequick.readthedocs.io/en/latest/_modules/codequick/listing.html#Listitem.from_dict
# in order to be able to directly use `Listitem.from_dict` later
item_dict = {}
# --> callback (string)
item_dict['callback'] = xbmc.getInfoLabel('ListItem.Path').replace(
'plugin://plugin.video.catchuptvandmore', '')
# --> label (string)
item_dict['label'] = get_selected_item_label()
# --> art (dict)
item_dict['art'] = get_selected_item_art()
# --> info (dict)
item_dict['info'] = get_selected_item_info()
# --> stream (dict)
item_dict['stream'] = get_selected_item_stream()
# --> context (list) (TODO)
item_dict['context'] = []
# --> properties (dict) (TODO)
item_dict['properties'] = {}
# --> params (dict)
item_dict['params'] = get_selected_item_params()
# --> subtitles (list) (TODO)
item_dict['subtitles'] = []
if item_infos:
# This item comes from tv_guide_menu
# We need to remove guide TV related
# elements
item_id = item_dict['params']['item_id']
item_dict['label'] = get_item_label(item_id, item_infos)
item_dict['art']["thumb"] = ''
if 'thumb' in item_infos:
item_dict['art']["thumb"] = get_item_media_path(
item_infos['thumb'])
item_dict['art']["fanart"] = ''
if 'fanart' in item_infos:
item_dict['art']["fanart"] = get_item_media_path(
item_infos['fanart'])
item_dict['info']['plot'] = ''
s = mem_storage.MemStorage('fav')
try:
prefix = s['prefix']
except KeyError:
prefix = ''
label_proposal = item_dict['label']
if prefix != '':
label_proposal = prefix + ' - ' + label_proposal
# Ask the user to edit the label
label = utils.keyboard(
plugin.localize(30801), label_proposal)
# If user aborded do not add this item to favourite
if label == '':
return False
item_dict['label'] = label
item_dict['params']['_title_'] = label
item_dict['info']['title'] = label
item_dict['params']['is_playable'] = is_playable
item_dict['params']['is_folder'] = not is_playable
# Compute fav hash
item_hash = md5(str(item_dict).encode('utf-8')).hexdigest()
# Add this item to favourites json file
fav_dict = get_fav_dict_from_json()
item_dict['params']['order'] = len(fav_dict)
fav_dict['items'][item_hash] = item_dict
# Save json file with new fav_dict
save_fav_dict_in_json(fav_dict)
Script.notify(Script.localize(30033), Script.localize(30805), display_time=7000)
@Script.register
def rename_favourite_item(plugin, item_hash):
"""Callback function of the 'Rename' favourite item context menu
Args:
plugin (codequick.script.Script)
item_hash (str): Item hash of the favourite item to rename
"""
item_label = utils.keyboard(plugin.localize(30801),
xbmc.getInfoLabel('ListItem.Label'))
# If user aborded do not edit this item
if item_label == '':
return False
fav_dict = get_fav_dict_from_json()
fav_dict['items'][item_hash]['label'] = item_label
fav_dict['items'][item_hash]['params']['_title_'] = item_label
fav_dict['items'][item_hash]['info']['title'] = item_label
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
@Script.register
def remove_favourite_item(plugin, item_hash):
"""Callback function of the 'Remove' favourite item context menu
Args:
plugin (codequick.script.Script)
item_hash (str): Item hash of the favourite item to remove
"""
fav_dict = get_fav_dict_from_json()
del fav_dict['items'][item_hash]
# We need to fix the order param
# in order to not break the move up/down action
menu = []
for item_hash, item_dict in list(fav_dict['items'].items()):
item = (item_dict['params']['order'], item_hash)
menu.append(item)
menu = sorted(menu, key=lambda x: x[0])
for k in range(0, len(menu)):
item = menu[k]
item_hash = item[1]
fav_dict['items'][item_hash]['params']['order'] = k
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
@Script.register
def move_favourite_item(plugin, direction, item_hash):
"""Callback function of the 'Move Up/Down' favourite item context menu
Args:
plugin (codequick.script.Script)
direction (str): 'down' or 'up'
item_hash (str): Item hash of the favourite item to move
"""
if direction == 'down':
offset = 1
elif direction == 'up':
offset = -1
fav_dict = get_fav_dict_from_json()
item_to_move_id = item_hash
item_to_move_order = fav_dict['items'][item_hash]['params']['order']
menu = []
for item_hash, item_dict in list(fav_dict['items'].items()):
item = (item_dict['params']['order'], item_hash, item_dict)
menu.append(item)
menu = sorted(menu, key=lambda x: x[0])
for k in range(0, len(menu)):
item = menu[k]
item_hash = item[1]
if item_to_move_id == item_hash:
item_to_swap = menu[k + offset]
item_to_swap_order = item_to_swap[0]
item_to_swap_id = item_to_swap[1]
fav_dict['items'][item_to_move_id]['params']['order'] = item_to_swap_order
fav_dict['items'][item_to_swap_id]['params']['order'] = item_to_move_order
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
break
return False
def ask_to_delete_error_fav_item(item_hash):
"""Callback function if a favourite item trigger an error
Suggest user to delete
the fav item that trigger an error
Args:
item_hash (str): Item hash that trigger an error
"""
r = xbmcgui.Dialog().yesno(Script.localize(30600),
Script.localize(30807))
if r:
remove_favourite_item(plugin=None, item_hash=item_hash)
@Script.register
def delete_favourites(plugin):
"""Callback function of 'Delete favourites' setting button
Args:
plugin (codequick.script.Script)
"""
Script.log('Delete favourites db')
xbmcvfs.delete(os.path.join(Script.get_info('profile'), 'favourites.json'))
Script.notify(Script.localize(30374), '')
| gpl-2.0 | -8,269,652,303,382,676,000 | 29.037152 | 159 | 0.612039 | false | 3.430693 | false | false | false |
DolphinDream/sverchok | nodes/quaternion/quaternion_out_mk2.py | 1 | 6879 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, FloatVectorProperty, EnumProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_transform_helper import AngleUnits, SvAngleHelper
from mathutils import Quaternion
from math import pi
mode_items = [
("WXYZ", "WXYZ", "Convert quaternion into components", 0),
("SCALARVECTOR", "Scalar Vector", "Convert quaternion into Scalar & Vector", 1),
("EULER", "Euler Angles", "Convert quaternion into Euler angles", 2),
("AXISANGLE", "Axis Angle", "Convert quaternion into Axis & Angle", 3),
("MATRIX", "Matrix", "Convert quaternion into Rotation Matrix", 4),
]
output_sockets = {
"WXYZ": ["W", "X", "Y", "Z"],
"SCALARVECTOR": ["Scalar", "Vector"],
"EULER": ["Angle X", "Angle Y", "Angle Z"],
"AXISANGLE": ["Angle", "Axis"],
"MATRIX": ["Matrix"]
}
class SvQuaternionOutNodeMK2(bpy.types.Node, SverchCustomTreeNode, SvAngleHelper):
"""
Triggers: Quaternions, Out
Tooltip: Convert quaternions into various quaternion components
"""
bl_idname = 'SvQuaternionOutNodeMK2'
bl_label = 'Quaternion Out'
sv_icon = 'SV_QUATERNION_OUT'
def update_sockets(self):
# hide all output sockets
for k, names in output_sockets.items():
for name in names:
self.outputs[name].hide_safe = True
# show mode specific output sockets
for name in output_sockets[self.mode]:
self.outputs[name].hide_safe = False
def update_mode(self, context):
self.update_sockets()
updateNode(self, context)
mode : EnumProperty(
name='Mode', description='The output component format of the quaternion',
items=mode_items, default="WXYZ", update=update_mode)
quaternion : FloatVectorProperty(
name="Quaternion", description="Quaternion to convert",
size=4, subtype="QUATERNION", default=(0.0, 0.0, 0.0, 0.0),
update=updateNode)
normalize : BoolProperty(
name='Normalize', description='Normalize the input quaternion',
default=False, update=updateNode)
def migrate_from(self, old_node):
self.angle_units = old_node.angle_units
def migrate_props_pre_relink(self, old_node):
self.update_sockets()
def sv_init(self, context):
self.inputs.new('SvQuaternionSocket', "Quaternions").prop_name = "quaternion"
# component outputs
self.outputs.new('SvStringsSocket', "W")
self.outputs.new('SvStringsSocket', "X")
self.outputs.new('SvStringsSocket', "Y")
self.outputs.new('SvStringsSocket', "Z")
# scalar-vector output
self.outputs.new('SvStringsSocket', "Scalar")
self.outputs.new('SvVerticesSocket', "Vector")
# euler angle ouputs
self.outputs.new('SvStringsSocket', "Angle X")
self.outputs.new('SvStringsSocket', "Angle Y")
self.outputs.new('SvStringsSocket', "Angle Z")
# axis-angle output
self.outputs.new('SvVerticesSocket', "Axis")
self.outputs.new('SvStringsSocket', "Angle")
# matrix ouptut
self.outputs.new('SvMatrixSocket', "Matrix")
self.update_mode(context)
def draw_buttons(self, context, layout):
layout.prop(self, "mode", expand=False, text="")
if self.mode == "EULER":
self.draw_angle_euler_buttons(context, layout)
if self.mode in {"WXYZ", "SCALARVECTOR"}:
layout.prop(self, "normalize", toggle=True)
def draw_buttons_ext(self, context, layout):
if self.mode in {"EULER", "AXISANGLE"}:
self.draw_angle_units_buttons(context, layout)
def process(self):
outputs = self.outputs
if not any(s.is_linked for s in outputs):
return
input_Q = self.inputs['Quaternions'].sv_get()
quaternion_list = [Quaternion(q) for q in input_Q]
if self.mode == "WXYZ":
if self.normalize:
quaternion_list = [q.normalized() for q in quaternion_list]
for i, name in enumerate("WXYZ"):
if outputs[name].is_linked:
outputs[name].sv_set([[q[i] for q in quaternion_list]])
elif self.mode == "SCALARVECTOR":
if self.normalize:
quaternion_list = [q.normalized() for q in quaternion_list]
if outputs['Scalar'].is_linked:
scalar_list = [q[0] for q in quaternion_list]
outputs['Scalar'].sv_set([scalar_list])
if outputs['Vector'].is_linked:
vector_list = [tuple(q[1:]) for q in quaternion_list]
outputs['Vector'].sv_set([vector_list])
elif self.mode == "EULER":
# conversion factor from radians to the current angle units
au = self.angle_conversion_factor(AngleUnits.RADIANS, self.angle_units)
for i, name in enumerate("XYZ"):
if outputs["Angle " + name].is_linked:
angles = [q.to_euler(self.euler_order)[i] * au for q in quaternion_list]
outputs["Angle " + name].sv_set([angles])
elif self.mode == "AXISANGLE":
if outputs['Axis'].is_linked:
axis_list = [tuple(q.axis) for q in quaternion_list]
outputs['Axis'].sv_set([axis_list])
if outputs['Angle'].is_linked:
# conversion factor from radians to the current angle units
au = self.angle_conversion_factor(AngleUnits.RADIANS, self.angle_units)
angle_list = [q.angle * au for q in quaternion_list]
outputs['Angle'].sv_set([angle_list])
elif self.mode == "MATRIX":
if outputs['Matrix'].is_linked:
matrix_list = [q.to_matrix().to_4x4() for q in quaternion_list]
outputs['Matrix'].sv_set(matrix_list)
def register():
bpy.utils.register_class(SvQuaternionOutNodeMK2)
def unregister():
bpy.utils.unregister_class(SvQuaternionOutNodeMK2)
| gpl-3.0 | -1,892,818,721,566,083,000 | 36.796703 | 92 | 0.624655 | false | 3.74062 | false | false | false |
MediaKraken/MediaKraken_Deployment | source/database/db_base_metadata_tvmaze.py | 1 | 2561 | """
Copyright (C) 2015 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import uuid
def db_meta_tvmaze_changed_uuid(self, maze_uuid):
"""
# metadata changed date by uuid
"""
self.db_cursor.execute('SELECT mm_metadata_tvshow_json->>\'updated\''
' from mm_metadata_tvshow'
' where mm_metadata_media_tvshow_id->\'tvmaze\' ? %s',
(maze_uuid,))
try:
return self.db_cursor.fetchone()['mm_metadata_tvshow_json']
except:
return None
def db_meta_tvmaze_insert(self, series_id_json, tvmaze_name, show_detail,
image_json):
"""
Insert tv series into db
"""
new_uuid = uuid.uuid4()
self.db_cursor.execute('insert into mm_metadata_tvshow (mm_metadata_tvshow_guid,'
' mm_metadata_media_tvshow_id,'
' mm_metadata_tvshow_name,'
' mm_metadata_tvshow_json,'
' mm_metadata_tvshow_localimage_json)'
' values (%s,%s,%s,%s,%s)',
(new_uuid, series_id_json, tvmaze_name, show_detail, image_json))
self.db_commit()
return new_uuid
def db_meta_tvmaze_update(self, series_id_json, tvmaze_name, show_detail,
tvmaze_id):
"""
Update tv series in db
"""
self.db_cursor.execute('update mm_metadata_tvshow'
' set mm_metadata_media_tvshow_id = %s,'
'mm_metadata_tvshow_name = %s,'
' mm_metadata_tvshow_json = %s '
'where mm_metadata_media_tvshow_id->\'tvmaze\'::text = %s',
(series_id_json, tvmaze_name, show_detail, str(tvmaze_id)))
self.db_commit()
| gpl-3.0 | 5,325,643,444,591,288,000 | 38.015625 | 92 | 0.56189 | false | 3.856928 | false | false | false |
Featuretools/featuretools | featuretools/primitives/base/aggregation_primitive_base.py | 1 | 5863 | import copy
import functools
import inspect
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.primitives.base.utils import inspect_function_args
class AggregationPrimitive(PrimitiveBase):
stack_on = None # whitelist of primitives that can be in input_types
stack_on_exclude = None # blacklist of primitives that can be insigniture
base_of = None # whitelist of primitives this prim can be input for
base_of_exclude = None # primitives this primitive can't be input for
stack_on_self = True # whether or not it can be in input_types of self
def generate_name(self, base_feature_names, relationship_path_name,
parent_entity_id, where_str, use_prev_str):
base_features_str = ", ".join(base_feature_names)
return u"%s(%s.%s%s%s%s)" % (
self.name.upper(),
relationship_path_name,
base_features_str,
where_str,
use_prev_str,
self.get_args_string(),
)
def make_agg_primitive(function, input_types, return_type, name=None,
stack_on_self=True, stack_on=None,
stack_on_exclude=None, base_of=None,
base_of_exclude=None, description=None,
cls_attributes=None, uses_calc_time=False,
default_value=None, commutative=False,
number_output_features=1):
'''Returns a new aggregation primitive class. The primitive infers default
values by passing in empty data.
Args:
function (function): Function that takes in a series and applies some
transformation to it.
input_types (list[Variable]): Variable types of the inputs.
return_type (Variable): Variable type of return.
name (str): Name of the function. If no name is provided, the name
of `function` will be used.
stack_on_self (bool): Whether this primitive can be in input_types of self.
stack_on (list[PrimitiveBase]): Whitelist of primitives that
can be input_types.
stack_on_exclude (list[PrimitiveBase]): Blacklist of
primitives that cannot be input_types.
base_of (list[PrimitiveBase): Whitelist of primitives that
can have this primitive in input_types.
base_of_exclude (list[PrimitiveBase]): Blacklist of
primitives that cannot have this primitive in input_types.
description (str): Description of primitive.
cls_attributes (dict[str -> anytype]): Custom attributes to be added to
class. Key is attribute name, value is the attribute value.
uses_calc_time (bool): If True, the cutoff time the feature is being
calculated at will be passed to the function as the keyword
argument 'time'.
default_value (Variable): Default value when creating the primitive to
avoid the inference step. If no default value if provided, the
inference happen.
commutative (bool): If True, will only make one feature per unique set
of base features.
number_output_features (int): The number of output features (columns in
the matrix) associated with this feature.
Example:
.. ipython :: python
from featuretools.primitives import make_agg_primitive
from featuretools.variable_types import DatetimeTimeIndex, Numeric
def time_since_last(values, time=None):
time_since = time - values.iloc[-1]
return time_since.total_seconds()
TimeSinceLast = make_agg_primitive(
function=time_since_last,
input_types=[DatetimeTimeIndex],
return_type=Numeric,
description="Time since last related instance",
uses_calc_time=True)
'''
if description is None:
default_description = 'A custom primitive'
doc = inspect.getdoc(function)
description = doc if doc is not None else default_description
cls = {"__doc__": description}
if cls_attributes is not None:
cls.update(cls_attributes)
name = name or function.__name__
new_class = type(name, (AggregationPrimitive,), cls)
new_class.name = name
new_class.input_types = input_types
new_class.return_type = return_type
new_class.stack_on = stack_on
new_class.stack_on_exclude = stack_on_exclude
new_class.stack_on_self = stack_on_self
new_class.base_of = base_of
new_class.base_of_exclude = base_of_exclude
new_class.commutative = commutative
new_class.number_output_features = number_output_features
new_class, default_kwargs = inspect_function_args(new_class,
function,
uses_calc_time)
if len(default_kwargs) > 0:
new_class.default_kwargs = default_kwargs
def new_class_init(self, **kwargs):
self.kwargs = copy.deepcopy(self.default_kwargs)
self.kwargs.update(kwargs)
self.partial = functools.partial(function, **self.kwargs)
self.partial.__name__ = name
new_class.__init__ = new_class_init
new_class.get_function = lambda self: self.partial
else:
# creates a lambda function that returns function every time
new_class.get_function = lambda self, f=function: f
if default_value is None:
# infers default_value by passing empty data
try:
new_class.default_value = function(*[[]] * len(input_types))
except Exception:
pass
else:
# avoiding the inference step
new_class.default_value = default_value
return new_class
| bsd-3-clause | -8,215,395,603,012,441,000 | 38.348993 | 83 | 0.622889 | false | 4.342963 | false | false | false |
trhongbinwang/data_science_journey | deep_learning/pytorch/tutorials/09 - Image Captioning/data.py | 1 | 3501 | import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
from vocab import Vocabulary
from pycocotools.coco import COCO
class CocoDataset(data.Dataset):
"""COCO Custom Dataset compatible with torch.utils.data.DataLoader."""
def __init__(self, root, json, vocab, transform=None):
"""Set the path for images, captions and vocabulary wrapper.
Args:
root: image directory.
json: coco annotation file path.
vocab: vocabulary wrapper.
transform: image transformer
"""
self.root = root
self.coco = COCO(json)
self.ids = list(self.coco.anns.keys())
self.vocab = vocab
self.transform = transform
def __getitem__(self, index):
"""Returns one data pair (image and caption)."""
coco = self.coco
vocab = self.vocab
ann_id = self.ids[index]
caption = coco.anns[ann_id]['caption']
img_id = coco.anns[ann_id]['image_id']
path = coco.loadImgs(img_id)[0]['file_name']
image = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target
def __len__(self):
return len(self.ids)
def collate_fn(data):
"""Creates mini-batch tensors from the list of tuples (image, caption).
Args:
data: list of tuple (image, caption).
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions = zip(*data)
# Merge images (from tuple of 3D tensor to 4D tensor)
images = torch.stack(images, 0)
# Merge captions (from tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths
def get_data_loader(root, json, vocab, transform, batch_size, shuffle, num_workers):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
# COCO dataset
coco = CocoDataset(root=root,
json=json,
vocab = vocab,
transform=transform)
# Data loader for COCO dataset
data_loader = torch.utils.data.DataLoader(dataset=coco,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader | apache-2.0 | 3,336,840,735,813,640,000 | 34.373737 | 84 | 0.579834 | false | 4.143195 | false | false | false |
amontefusco/ledpanel-utils | textopt.py | 1 | 1845 | #!/usr/bin/python
# Show a sliding text on RGB led panel
# (c) 2014 Sergio Tanzilli - [email protected]
# Multiple panel capability added by A.Montefusco 2017,
# requires ledpanel.ko 2.0
# All the images are computed in advance in order to improve speed
# in case of lengthy string
#
import time
import sys
import os
from datetime import datetime
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import StringIO
import probe
if len(sys.argv)<6 or len(sys.argv)>6:
print "Syntax:"
print " %s text r g b loop" % (sys.argv[0])
print
print "loop=0 forever loop"
quit()
print "Panel size: %d x %d\n" % (probe.panel_w, probe.panel_h)
font = ImageFont.truetype('fonts/Ubuntu-B.ttf', 32)
width, height = font.getsize(sys.argv[1])
text=sys.argv[1]
r=int(sys.argv[2])
g=int(sys.argv[3])
b=int(sys.argv[4])
loops=int(sys.argv[5])
#
# compute all images
#
print "Computing all the images, please wait...."
x = probe.panel_w
imgs = []
while True:
x=x-1
if x < -(width): break
im = Image.new("RGB", (probe.panel_w, probe.panel_h), "black")
draw = ImageDraw.Draw(im)
draw.fontmode="1" #No antialias
draw.rectangle((0, 0, probe.panel_w - 1, height), outline=0, fill=0)
draw.text((x, -1), text, (r,g,b), font=font)
imgs.append(im)
print "All images generated (%d), stream starts..." % len(imgs)
# setup driver access
out_file = open("/sys/class/ledpanel/rgb_buffer","w")
output = StringIO.StringIO()
x = probe.panel_w
i = 0
while True:
x = x - 1
if x < -(width):
if loops==0:
x = probe.panel_w
i = 0
continue
else:
if loops==1:
break
else:
loops=loops-1
x = probe.panel_w
i = 0
continue
output.truncate(0)
imgs[i].save(output, format='PPM')
buf=output.getvalue()
out_file.seek(0)
out_file.write(buf[13:])
i = i + 1
out_file.close()
| gpl-2.0 | 866,967,959,076,484,500 | 16.912621 | 69 | 0.659079 | false | 2.602257 | false | false | false |
th0mmeke/toyworld | chemistry_model/default_chemistry.py | 1 | 5522 | """
Created on 14 Aug 2013
@author: thom
"""
from rdkit.Chem import AllChem as Chem
class DefaultChemistry(object):
"""A simple Chemistry based on real-world chemistry."""
def __init__(self, parameters=None):
""":param parameters: Parameters object"""
# Single : 77.7 = 777.1/10 = 104.2 + 83 + 38.4 + 35 + 99 + 93 + 111 + 73 + 85.5 + 55
# Double : 148.2 = 889/6 = 185 + 146 + 149 + 119 + 147 + 143
# Triple : 224.3 = 897/4 = 258 + 200 + 226 + 213
default_data = {
'H1H': 104.2,
'C1C': 83,
'N1N': 38.4,
'O1O': 35,
'H1C': 99, 'C1H': 99,
'H1N': 93, 'N1H': 93,
'H1O': 111, 'O1H': 111,
'C1N': 73, 'N1C': 73,
'C1O': 85.5, 'O1C': 85.5,
'N1O': 55, 'O1N': 55,
'C2O': 185, 'O2C': 185, # rough average of range
'C2C': 146,
'N2N': 149,
'O2O': 119,
'C2N': 147, 'N2C': 147,
'N2O': 143, 'O2N': 143,
'C3O': 258, 'O3C': 258,
'C3C': 200,
'N3N': 226,
'C3N': 213, 'N3C': 213,
'C4C': 200 # theoretically possible from valences, but in nature forms a C2C bond instead
}
count = {}
default_bond_energies = {}
for bond, energy in default_data.iteritems():
key = int(bond[1])
try:
count[key] += 1
default_bond_energies[key] += energy
except:
count[key] = 1
default_bond_energies[key] = energy
for i in (1, 2, 3):
default_bond_energies[i] = default_bond_energies[i] / count[i]
self._atoms = ['C', 'N', 'O', 'H']
self._bond_formation_energies = {}
self._bond_break_energies = {}
formation_energies = None
break_energies = None
if parameters is not None: # Parameters object
atoms = parameters.get('Atoms')
if atoms is not None:
self._atoms = []
for atom in atoms.findall('Atom'):
self._atoms.append(atom.text)
formation_energies = parameters.get('BondFormationEnergies')
break_energies = parameters.get('BondBreakEnergies')
for atom_1 in self._atoms:
for atom_2 in self._atoms:
for bond_type, xml_key in {1: 'Single', 2: 'Double', 3: 'Triple'}.iteritems():
key = "{}{}{}".format(atom_1, bond_type, atom_2)
if formation_energies is None:
if key in default_data.keys():
self._bond_formation_energies[key] = default_data[key]
else:
self._bond_formation_energies[key] = default_bond_energies[bond_type]
else:
self._bond_formation_energies[key] = float(formation_energies.find(xml_key).text)
if break_energies is None:
self._bond_break_energies[key] = self._bond_formation_energies[key]
else:
self._bond_break_energies[key] = float(break_energies.find(xml_key).text)
def get_bond_potential(self, atom):
"""Requires Explicit Hs!
Simple method based on standard Lewis dot-structures e.g., http://library.thinkquest.org/C006669/data/Chem/bonding/lewis.html
Bond calculation:
FC = V - N - B (where single bond = 1, double = 2, etc) to make N = 8
"""
if atom.GetAtomicNum() == 1:
if len(atom.GetBonds()) == 0: # if not already bound...
return 1
else:
return 0
else:
bonded_electrons = 0
for bond in atom.GetBonds():
bonded_electrons += bond.GetBondType() # relies on Chem.BondType mapping to int...
valence_electrons = Chem.GetPeriodicTable().GetNOuterElecs(atom.GetAtomicNum())
# logging.info("Bond potential: 8 - ({} + {} + {})".format(valence_electrons, bonded_electrons, atom.GetFormalCharge()))
return 8 - (valence_electrons + bonded_electrons + atom.GetFormalCharge())
def get_bond_energy(self, atom_1, atom_2, end_bond_type=0, start_bond_type=0):
"""Returns the energy REQUIRED to make the bond change from start_bond_type (or existing type if not provided) to end_bond_type.
Creation of a bond requires -e; breaking the bond +e
Energies taken from http://www.cem.msu.edu/~reusch/OrgPage/bndenrgy.htm - Average Bond Dissociation Enthalpies in kcal per mole
:param atom_1: One end of the bond
:type atom_1: Chem.Atom
:param atom_2: Other end of the bond
:type atom_2: Chem.Atom
:param bond_type: Type of the bond, corresponding to index into Chem.BondType.values
:type bond_type: int
:rtype: int
"""
# Energy to release current bond state
if start_bond_type == 0:
start_energy = 0
else:
start_energy = self._bond_break_energies[atom_1.GetSymbol() + str(min(3, start_bond_type)) + atom_2.GetSymbol()]
# Energy to create desired bond state
if end_bond_type == 0:
end_energy = 0
else:
end_energy = self._bond_formation_energies[atom_1.GetSymbol() + str(min(3, end_bond_type)) + atom_2.GetSymbol()]
return start_energy - end_energy
| gpl-3.0 | 2,791,581,899,556,919,300 | 39.602941 | 136 | 0.528251 | false | 3.512723 | false | false | false |
meppe/ros-ort | src/frcnn/src/frcnn/detector.py | 1 | 7209 | import time
import errno
import sys
ros_slam_path = "/opt/ros-ort"
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/caffe-fast-rcnn/python")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/lib")
import rospy
from ort_msgs.msg import Object_bb_list
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.timer import Timer
import numpy as np
import caffe
from threading import Thread
import os
class Detector:
DETECT_RUNNING = False
def __init__(self, classes, prototxt_file, caffemodel_file, args, class_properties=None):
self.classes = classes
self.current_scores = []
self.current_boxes = []
self.current_frame = None
self.current_frame_timestamp = None
self.current_frame_header = None
self.frames_detected = 0
self.detection_start = time.time()
self.args = args
self.CONF_THRESH = args.conf_threshold
# print ("THRESH" + str(self.CONF_THRESH))
self.cls_score_factors = {}
self.set_cls_score_factors(class_properties)
rospy.init_node("frcnn_detector")
print("node initialized")
cfg.TEST.HAS_RPN = True # Use RPN for proposals
prototxt = prototxt_file
caffemodel = caffemodel_file
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./download_caffemodels.sh?').format(caffemodel))
if not os.path.isfile(prototxt):
raise IOError(("{:s} not found.\nMaybe this model is incompatible with the "
"respective network you chose.").format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
print("Set caffe to CPU mode")
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
print("Set caffe to GPU mode, running on GPU {}".format(cfg.GPU_ID))
self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _ = im_detect(self.net, im)
# Create bounding box publisher
self.bb_pub = rospy.Publisher('frcnn/bb', Object_bb_list, queue_size=10)
# self.bb_img_pub = rospy.Publisher('frcnn/bb_img', Image, queue_size=1)
self.detection_start = time.time()
self.sub_frames = rospy.Subscriber("/frcnn_input/image_raw", Image, self.cb_frame_rec, queue_size=10)
rospy.spin()
def set_cls_score_factors(self, class_properties):
'''
This sets the factor to multiply the score with, depending on the object property type (e.g., shape, color, class)
:param class_properties:
:return:
'''
if class_properties == None:
return
for prop in class_properties.keys():
score_factor = class_properties[prop][0]
for cls in class_properties[prop][1]:
self.cls_score_factors[cls] = float(score_factor)
def pub_detections(self):
is_keyframe = False
timestamp = self.current_frame_header.seq
# print("Publishing bb with timestamp {}".format(timestamp))
frame_id = self.current_frame_header.frame_id
bb_ul_xs = []
bb_ul_ys = []
bb_lr_xs = []
bb_lr_ys = []
bb_scores = []
obj_labels = []
class_names = []
for cls_ind, cls in enumerate(self.classes[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = self.current_boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = self.current_scores[:, cls_ind]
for i, b in enumerate(cls_boxes):
score = cls_scores[i]
if cls in self.cls_score_factors.keys():
cls_score_factor = self.cls_score_factors[cls]
score *= cls_score_factor
if float(score) < float(self.CONF_THRESH):
continue
b_ul_x = b[0]
b_ul_y = b[1]
b_lr_x = b[2]
b_lr_y = b[3]
bb_ul_xs.append(b_ul_x)
bb_ul_ys.append(b_ul_y)
bb_lr_xs.append(b_lr_x)
bb_lr_ys.append(b_lr_y)
bb_scores.append(score)
obj_labels.append(cls+"_"+str(i))
class_names.append(cls)
bb_msg = Object_bb_list(frame_id, timestamp, is_keyframe, bb_ul_xs, bb_ul_ys, bb_lr_xs, bb_lr_ys, class_names,
obj_labels, bb_scores)
print("Publishing {} detections.".format(len(obj_labels)))
self.bb_pub.publish(bb_msg)
def frame_detect(self, net, im):
if self.args.cpu_mode:
caffe.set_mode_cpu()
# print("Set caffe to CPU mode")
else:
caffe.set_mode_gpu()
caffe.set_device(self.args.gpu_id)
cfg.GPU_ID = self.args.gpu_id
# print("Set caffe to GPU mode, running on GPU {}".format(cfg.GPU_ID))
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
self.current_scores, self.current_boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, self.current_boxes.shape[0])
def deserialize_and_detect_thread(self, msg):
'''
Start object detection. Parse image message and start frame_detect
:param msg:
:return:
'''
# If detection is not already running start a new detection
if not Detector.DETECT_RUNNING:
Detector.DETECT_RUNNING = True
self.current_frame_header = msg.header
print("Starting detection of frame {}.".format(msg.header.seq))
self.frames_detected += 1
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(msg, msg.encoding)
img = np.asarray(cv_image)
if len(img.shape) == 2:
img = np.asarray([img, img, img])
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 0)
self.current_frame = img
assert(self.net is not None, "No network selected")
if self.net is not None:
self.frame_detect(self.net, img)
self.pub_detections()
now = time.time()
detection_time = now - self.detection_start
fps = self.frames_detected / detection_time
print("Running for {} sec., detection with {} fps.".format(detection_time, fps))
Detector.DETECT_RUNNING = False
# Skip detection if another detection is running already
else:
pass
def cb_frame_rec(self, msg):
t = Thread(target=self.deserialize_and_detect_thread, args=[msg])
t.start()
| gpl-3.0 | -3,395,597,467,868,489,700 | 36.159794 | 122 | 0.569843 | false | 3.600899 | false | false | false |
Subsets and Splits