repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Kraymer/keroaek | keroaek/__init__.py | 1 | 1888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses the `keroaek` program to add tag fields to lyrics
"""
import io
import argparse
import sys
from keroaek.editor import Editor
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
description='Lrc files creator'
'See https://github.com/Kraymer/keroaek for more infos.')
parser.add_argument('audio', metavar='MP3_FILE',
help='Music file', default='')
parser.add_argument('-l', '--lyrics', dest='src',
help='src lyrics'
'DEFAULT: consider audio embedded lyrics', default='')
parser.add_argument('-o', '--output', dest='dest',
help='output filename. '
'DEFAULT: edit input file in-place', default='')
args = vars(parser.parse_args())
if not args['dest']:
args['dest'] = args['src']
if not args['src']:
pass
else:
with io.open(args['src'], 'r', encoding='utf-8') as f:
lyrics = f.read()
lrc = Editor().sync(args['audio'], lyrics)
with io.open(args['dest'], 'w', encoding='utf-8') as f:
f.write(lrc)
if __name__ == "__main__":
sys.exit(main())
| mit | 8,338,894,956,068,001,000 | 32.122807 | 78 | 0.622881 | false | 3.892784 | false | false | false |
aaronkaplan/intelmq-old | intelmq/bots/parsers/hpfeeds/parser.py | 5 | 1028 | from intelmq.lib.bot import Bot, sys
from intelmq.lib.message import Event
from intelmq.bots import utils
import redis
import json
class HPFeedsBot(Bot):
def process(self):
report = self.receive_message()
self.logger.info(report)
if report:
#m = json.loads(report)
m = report
event = Event()
for k in m.keys():
event.add(k, m.value(k))
event.add('feed', 'hpfeed')
event.add('feed_url', m.value("sensorname"))
event = utils.generate_source_time(event, "source_time")
event = utils.generate_observation_time(event, "observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = HPFeedsBot(sys.argv[1])
bot.start()
| agpl-3.0 | -4,485,831,432,475,425,000 | 29.151515 | 82 | 0.500973 | false | 4.161943 | false | false | false |
QuantumGhost/env_conf | env_conf/provider.py | 1 | 2387 | import os
from .errors import Missing
from .types import FieldType
class Field:
def __init__(
self, type_, desc=None, name=None,
optional=False, default=None):
self.name = name
self.desc = desc
self.optional = optional
self.default = default
self._cache = None
if isinstance(type_, type):
if issubclass(type_, FieldType):
self.type_ = type_()
else:
raise TypeError("Invalid field type for {}.".format(self.name))
elif isinstance(type_, FieldType):
self.type_ = type_
else:
raise TypeError("Invalid value for {}.".format(self.name))
def resolve(self, env):
if self._cache is not None:
return self._cache
value = env.get(self.name)
if value is None:
if self.default is not None:
return self.default
elif self.optional:
return None
else:
raise Missing("Required variable {} not set".format(self.name))
self._cache = self.type_.parse(value)
return self._cache
def __get__(self, instance, owner):
if instance is None:
return self
return self.resolve(instance.env)
def __str__(self):
return "<Field(name={}, type={})>".format(self.name, self.type_)
def __repr__(self):
return str(self)
class ProviderMeta(type):
def __new__(mcs, name, bases, attrs):
fields = []
for key, value in attrs.items():
if isinstance(value, Field):
if value.name is None:
value.name = key
fields.append(value)
attrs['fields'] = tuple(fields)
return super().__new__(mcs, name, bases, attrs)
class EnvProvider(metaclass=ProviderMeta):
def __init__(self, eager=True):
if eager:
self.load()
@property
def env(self):
return os.environ
def load(self):
for i in self.fields: # pylint: disable=no-member
i.resolve(self.env)
def __str__(self):
frags = []
for i in self.fields: # pylint: disable=no-member
item = '{}={}'.format(i.name, repr(i.resolve(self.env)))
frags.append(item)
return '<Env({})>'.format(', '.join(frags))
| bsd-3-clause | -3,506,847,169,531,147,300 | 28.109756 | 79 | 0.532468 | false | 4.217314 | false | false | false |
jsmesami/naovoce | src/fruit/signals.py | 1 | 2817 | from django.core.mail import mail_managers
from django.dispatch import receiver
from django.utils.translation import ugettext_noop, ugettext_lazy as _
from comments.signals import comment_created
from gallery.signals import image_created
@receiver(comment_created)
def somebody_commented_your_fruit(comment, comment_type, object_id, **kwargs):
"""
Notify users that there is a new comment under their marker.
"""
if comment_type.model == 'fruit':
fruit = comment_type.get_object_for_this_type(pk=object_id)
if fruit.user != comment.author:
if not comment.complaint:
msg_template = ugettext_noop(
'User <a href="{user_url}">{user_name}</a> '
'posted a <a href="{comment_url}">comment</a> '
'under your <a href="{fruit_url}">marker</a>.'
)
else:
msg_template = ugettext_noop(
'User <a href="{user_url}">{user_name}</a> '
'<strong>posted a <a href="{comment_url}">complaint</a></strong> '
'under your <a href="{fruit_url}">marker</a>.'
)
context = dict(
user_name=comment.author.username,
user_url=comment.author.get_absolute_url(),
comment_url=comment.get_absolute_url(),
fruit_url=fruit.get_absolute_url(),
)
fruit.user.send_message(msg_template, context=context, system=True)
@receiver(comment_created)
def complaint_notification(comment, *args, **kwargs):
"""
Notify manages that complaint has been sent.
"""
if comment.complaint:
subject = _('A complaint has been made.')
body = _('Please review the situation: https://na-ovoce.cz{url}').format(
url=comment.get_absolute_url(),
)
mail_managers(subject, body)
@receiver(image_created)
def somebody_added_image_to_your_fruit(image, gallery_ct, gallery_id, **kwargs):
"""
Notify users that somebody added an image to their marker.
"""
if gallery_ct.model == 'fruit':
fruit = gallery_ct.get_object_for_this_type(pk=gallery_id)
if fruit.user != image.author:
msg_template = ugettext_noop(
'User <a href="{user_url}">{user_name}</a> '
'added a <a href="{image_url}">photo</a> '
'under your <a href="{fruit_url}">marker</a>.'
)
context = dict(
user_name=image.author.username,
user_url=image.author.get_absolute_url(),
image_url=image.get_absolute_url(),
fruit_url=fruit.get_absolute_url(),
)
fruit.user.send_message(msg_template, context=context, system=True)
| bsd-3-clause | -4,929,217,873,916,473,000 | 39.242857 | 86 | 0.57082 | false | 3.756 | false | false | false |
JacoRuit/tgbot | tgbot/reply_markup.py | 1 | 2761 | import json
class ReplyMarkup(object):
def dict(self):
raise NotImplementedError()
def json(self):
return json.dumps(self.dict())
def __str__(self):
return str(self.dict())
class ReplyKeyboardMarkup(ReplyMarkup):
def __init__(self, buttons = [], resize_keyboard = False, one_time_keyboard = False, selective = False):
self.buttons = buttons
self.resize_keyboard = resize_keyboard
self.one_time_keyboard = one_time_keyboard
self.selective = selective
def dict(self):
return {
"keyboard": [[button.dict() for button in row] for row in self.buttons],
"resize_keyboard": self.resize_keyboard,
"one_time_keyboard": self.one_time_keyboard,
"selective": self.selective
}
class KeyboardButton(object):
def __init__(self, text = "", request_contact = False, request_location = False):
self.text = text
self.request_contact = request_contact
self.request_location = request_location
def dict(self):
return {
"text": self.text,
"request_contact": self.request_contact,
"request_location": self.request_location
}
class ReplyKeyboardHide(ReplyMarkup):
def __init__(self, selective = False):
self.selective = selective
def dict(self):
return {
"hide_keyboard": True,
"selective": self.selective
}
class InlineKeyboardMarkup(ReplyMarkup):
def __init__(self, buttons = []):
self.buttons = buttons
def dict(self):
return {
"inline_keyboard": [[button.dict() for button in row] for row in self.buttons]
}
class InlineKeyboardButton(object):
def __init__(self, text = "", url = None, callback_data = None, switch_inline_query = None):
self.text = text
self.url = url
self.callback_data = callback_data
self.switch_inline_query = switch_inline_query
def dict(self):
obj = {
"text": self.text
}
if self.url == None and self.callback_data == None and self.switch_inline_query == None:
raise Exception("At least one of url, callback_data or switch_inline_query should be set")
if self.url != None: obj["url"] = self.url
if self.callback_data != None: obj["callback_data"] = self.callback_data
if self.switch_inline_query != None: obj["switch_inline_query"] = self.switch_inline_query
return obj
class ForceReply(ReplyMarkup):
def __init__(self, selective = False):
self.selective = selective
def dict(self):
return {
"force_reply": True,
"selective": self.selective
}
| mit | -5,428,615,211,913,268,000 | 31.104651 | 108 | 0.59761 | false | 4.030657 | false | false | false |
pymedusa/SickRage | ext/cloudscraper/captcha/deathbycaptcha.py | 2 | 7187 | from __future__ import absolute_import
import json
import requests
try:
import polling
except ImportError:
raise ImportError(
"Please install the python module 'polling' via pip or download it from "
"https://github.com/justiniso/polling/"
)
from ..exceptions import (
reCaptchaException,
reCaptchaServiceUnavailable,
reCaptchaAccountError,
reCaptchaTimeout,
reCaptchaParameter,
reCaptchaBadJobID,
reCaptchaReportError
)
from . import reCaptcha
class captchaSolver(reCaptcha):
def __init__(self):
super(captchaSolver, self).__init__('deathbycaptcha')
self.host = 'http://api.dbcapi.me/api'
self.session = requests.Session()
# ------------------------------------------------------------------------------- #
@staticmethod
def checkErrorStatus(response):
errors = dict(
[
(400, "DeathByCaptcha: 400 Bad Request"),
(403, "DeathByCaptcha: 403 Forbidden - Invalid credentails or insufficient credits."),
# (500, "DeathByCaptcha: 500 Internal Server Error."),
(503, "DeathByCaptcha: 503 Service Temporarily Unavailable.")
]
)
if response.status_code in errors:
raise reCaptchaServiceUnavailable(errors.get(response.status_code))
# ------------------------------------------------------------------------------- #
def login(self, username, password):
self.username = username
self.password = password
def _checkRequest(response):
if response.ok:
if response.json().get('is_banned'):
raise reCaptchaAccountError('DeathByCaptcha: Your account is banned.')
if response.json().get('balanace') == 0:
raise reCaptchaAccountError('DeathByCaptcha: insufficient credits.')
return response
self.checkErrorStatus(response)
return None
response = polling.poll(
lambda: self.session.post(
'{}/user'.format(self.host),
headers={'Accept': 'application/json'},
data={
'username': self.username,
'password': self.password
}
),
check_success=_checkRequest,
step=10,
timeout=120
)
self.debugRequest(response)
# ------------------------------------------------------------------------------- #
def reportJob(self, jobID):
if not jobID:
raise reCaptchaBadJobID(
"DeathByCaptcha: Error bad job id to report failed reCaptcha."
)
def _checkRequest(response):
if response.status_code == 200:
return response
self.checkErrorStatus(response)
return None
response = polling.poll(
lambda: self.session.post(
'{}/captcha/{}/report'.format(self.host, jobID),
headers={'Accept': 'application/json'},
data={
'username': self.username,
'password': self.password
}
),
check_success=_checkRequest,
step=10,
timeout=180
)
if response:
return True
else:
raise reCaptchaReportError(
"DeathByCaptcha: Error report failed reCaptcha."
)
# ------------------------------------------------------------------------------- #
def requestJob(self, jobID):
if not jobID:
raise reCaptchaBadJobID(
"DeathByCaptcha: Error bad job id to request reCaptcha."
)
def _checkRequest(response):
if response.ok and response.json().get('text'):
return response
self.checkErrorStatus(response)
return None
response = polling.poll(
lambda: self.session.get(
'{}/captcha/{}'.format(self.host, jobID),
headers={'Accept': 'application/json'}
),
check_success=_checkRequest,
step=10,
timeout=180
)
if response:
return response.json().get('text')
else:
raise reCaptchaTimeout(
"DeathByCaptcha: Error failed to solve reCaptcha."
)
# ------------------------------------------------------------------------------- #
def requestSolve(self, url, siteKey):
def _checkRequest(response):
if response.ok and response.json().get("is_correct") and response.json().get('captcha'):
return response
self.checkErrorStatus(response)
return None
response = polling.poll(
lambda: self.session.post(
'{}/captcha'.format(self.host),
headers={'Accept': 'application/json'},
data={
'username': self.username,
'password': self.password,
'type': '4',
'token_params': json.dumps({
'googlekey': siteKey,
'pageurl': url
})
},
allow_redirects=False
),
check_success=_checkRequest,
step=10,
timeout=180
)
if response:
return response.json().get('captcha')
else:
raise reCaptchaBadJobID(
'DeathByCaptcha: Error no job id was returned.'
)
# ------------------------------------------------------------------------------- #
def getCaptchaAnswer(self, captchaType, url, siteKey, reCaptchaParams):
jobID = None
for param in ['username', 'password']:
if not reCaptchaParams.get(param):
raise reCaptchaParameter(
"DeathByCaptcha: Missing '{}' parameter.".format(param)
)
setattr(self, param, reCaptchaParams.get(param))
if captchaType == 'hCaptcha':
raise reCaptchaException(
'Provider does not support hCaptcha.'
)
if reCaptchaParams.get('proxy'):
self.session.proxies = reCaptchaParams.get('proxies')
try:
jobID = self.requestSolve(url, siteKey)
return self.requestJob(jobID)
except polling.TimeoutException:
try:
if jobID:
self.reportJob(jobID)
except polling.TimeoutException:
raise reCaptchaTimeout(
"DeathByCaptcha: reCaptcha solve took to long and also failed reporting the job id {}.".format(jobID)
)
raise reCaptchaTimeout(
"DeathByCaptcha: reCaptcha solve took to long to execute job id {}, aborting.".format(jobID)
)
# ------------------------------------------------------------------------------- #
captchaSolver()
| gpl-3.0 | 2,024,386,768,089,290,800 | 29.845494 | 121 | 0.484347 | false | 5.029391 | false | false | false |
brianrodri/oppia | python_utils.py | 2 | 21060 | # coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature detection utilities for Python 2 and Python 3."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import print_function # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import inspect
import io
import itertools
import os
import sys
_THIRD_PARTY_PATH = os.path.join(os.getcwd(), 'third_party', 'python_libs')
sys.path.insert(0, _THIRD_PARTY_PATH)
_YAML_PATH = os.path.join(os.getcwd(), '..', 'oppia_tools', 'pyyaml-5.1.2')
sys.path.insert(0, _YAML_PATH)
_CERTIFI_PATH = os.path.join(
os.getcwd(), '..', 'oppia_tools', 'certifi-2020.12.5')
sys.path.insert(0, _CERTIFI_PATH)
import yaml # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import builtins # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import future.utils # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import past.builtins # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import past.utils # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import six # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import certifi # isort:skip pylint: disable=wrong-import-position, wrong-import-order
import ssl # isort:skip pylint: disable=wrong-import-position, wrong-import-order
BASESTRING = past.builtins.basestring
INPUT = builtins.input
MAP = builtins.map
NEXT = builtins.next
OBJECT = builtins.object
PRINT = print
RANGE = builtins.range
ROUND = builtins.round
UNICODE = builtins.str
ZIP = builtins.zip
def SimpleXMLRPCServer( # pylint: disable=invalid-name
addr, requestHandler=None, logRequests=True, allow_none=False,
encoding=None, bind_and_activate=True):
"""Returns SimpleXMLRPCServer from SimpleXMLRPCServer module if run under
Python 2 and from xmlrpc module if run under Python 3.
Args:
addr: tuple(str, int). The host and port of the server.
requestHandler: callable. A factory for request handler instances.
Defaults to SimpleXMLRPCRequestHandler.
logRequests: bool. Whether to log the requests sent to the server.
allow_none: bool. Permits None in the XML-RPC responses that will be
returned from the server.
encoding: str|None. The encoding used by the XML-RPC responses that will
be returned from the server.
bind_and_activate: bool. Whether server_bind() and server_activate() are
called immediately by the constructor; defaults to true. Setting it
to false allows code to manipulate the allow_reuse_address class
variable before the address is bound.
Returns:
SimpleXMLRPCServer. The SimpleXMLRPCServer object.
"""
try:
from xmlrpc.server import SimpleXMLRPCServer as impl # pylint: disable=import-only-modules
except ImportError:
from SimpleXMLRPCServer import SimpleXMLRPCServer as impl # pylint: disable=import-only-modules
if requestHandler is None:
try:
from xmlrpc.server import SimpleXMLRPCRequestHandler # pylint: disable=import-only-modules
except ImportError:
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler # pylint: disable=import-only-modules
requestHandler = SimpleXMLRPCRequestHandler
return impl(
addr, requestHandler=requestHandler, logRequests=logRequests,
allow_none=allow_none, encoding=encoding,
bind_and_activate=bind_and_activate)
def redirect_stdout(new_target):
"""Returns redirect_stdout from contextlib2 if run under Python 2 and from
contextlib if run under Python 3.
Args:
new_target: FileLike. The file-like object all messages printed to
stdout will be redirected to.
Returns:
contextlib.redirect_stdout or contextlib2.redirect_stdout. The
redirect_stdout object.
"""
try:
from contextlib import redirect_stdout as impl # pylint: disable=import-only-modules
except ImportError:
from contextlib2 import redirect_stdout as impl # pylint: disable=import-only-modules
return impl(new_target)
def nullcontext(enter_result=None):
"""Returns nullcontext from contextlib2 if run under Python 2 and from
contextlib if run under Python 3.
Args:
enter_result: *. The object returned by the nullcontext when entered.
Returns:
contextlib.nullcontext or contextlib2.nullcontext. The nullcontext
object.
"""
try:
from contextlib import nullcontext as impl # pylint: disable=import-only-modules
except ImportError:
from contextlib2 import nullcontext as impl # pylint: disable=import-only-modules
return impl(enter_result=enter_result)
def ExitStack(): # pylint: disable=invalid-name
"""Returns ExitStack from contextlib2 if run under Python 2 and from
contextlib if run under Python 3.
Returns:
contextlib.ExitStack or contextlib2.ExitStack. The ExitStack object.
"""
try:
from contextlib import ExitStack as impl # pylint: disable=import-only-modules
except ImportError:
from contextlib2 import ExitStack as impl # pylint: disable=import-only-modules
return impl()
def string_io(buffer_value=b''):
"""Returns StringIO from StringIO module if run under Python 2 and from io
module if run under Python 3.
Args:
buffer_value: str. A string that is to be converted to in-memory text
stream.
Returns:
StringIO.StringIO or io.StringIO. The StringIO object.
"""
try:
from StringIO import StringIO # pylint: disable=import-only-modules
except ImportError:
from io import StringIO # pylint: disable=import-only-modules
return StringIO(buffer_value) # pylint: disable=disallowed-function-calls
def get_args_of_function_node(function_node, args_to_ignore):
"""Extracts the arguments from a function definition.
Args:
function_node: ast.FunctionDef. Represents a function.
args_to_ignore: list(str). Ignore these arguments in a function
definition.
Returns:
list(str). The args for a function as listed in the function
definition.
"""
try:
return [
a.arg
for a in function_node.args.args
if a.arg not in args_to_ignore
]
except AttributeError:
return [
a.id for a in function_node.args.args if a.id not in args_to_ignore
]
def open_file(filename, mode, encoding='utf-8', newline=None):
"""Open file and return a corresponding file object.
Args:
filename: str. The file to be opened.
mode: str. Mode in which the file is opened.
encoding: str. Encoding in which the file is opened.
newline: None|str. Controls how universal newlines work.
Returns:
_io.TextIOWrapper. The file object.
Raises:
IOError. The file cannot be opened.
"""
# The try/except is needed here to unify the errors because io.open in
# Python 3 throws FileNotFoundError while in Python 2 it throws an IOError.
# This should be removed after we fully migrate to Python 3.
try:
return io.open(filename, mode, encoding=encoding, newline=newline)
except:
raise IOError('Unable to open file: %s' % filename)
def url_join(base_url, relative_url):
"""Construct a full URL by combining a 'base URL' with another URL using
urlparse.urljoin if run under Python 2 and urllib.parse.urljoin if run under
Python 3.
Args:
base_url: str. The base URL.
relative_url: str. The other URL.
Returns:
str. The full URL.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
return urlparse.urljoin(base_url, relative_url) # pylint: disable=disallowed-function-calls
def url_split(urlstring):
"""Splits a URL using urlparse.urlsplit if run under Python 2 and
urllib.parse.urlsplit if run under Python 3.
Args:
urlstring: str. The URL.
Returns:
tuple(str). The components of a URL.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
return urlparse.urlsplit(urlstring) # pylint: disable=disallowed-function-calls
def url_parse(urlstring):
"""Parse a URL into six components using urlparse.urlparse if run under
Python 2 and urllib.parse.urlparse if run under Python 3. This corresponds
to the general structure of a URL:
scheme://netloc/path;parameters?query#fragment.
Args:
urlstring: str. The URL.
Returns:
tuple(str). The components of a URL.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
return urlparse.urlparse(urlstring) # pylint: disable=disallowed-function-calls
def url_unsplit(url_parts):
"""Combine the elements of a tuple as returned by urlsplit() into a complete
URL as a string using urlparse.urlunsplit if run under Python 2 and
urllib.parse.urlunsplit if run under Python 3.
Args:
url_parts: tuple(str). The components of a URL.
Returns:
str. The complete URL.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
return urlparse.urlunsplit(url_parts) # pylint: disable=disallowed-function-calls
def parse_query_string(query_string):
"""Parse a query string given as a string argument
(data of type application/x-www-form-urlencoded) using urlparse.parse_qs if
run under Python 2 and urllib.parse.parse_qs if run under Python 3.
Args:
query_string: str. The query string.
Returns:
dict. The keys are the unique query variable names and the values are
lists of values for each name.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
return urlparse.parse_qs(query_string) # pylint: disable=disallowed-function-calls
def urllib_unquote(content):
"""Replace %xx escapes by their single-character equivalent using
urllib.unquote if run under Python 2 and urllib.parse.unquote if run under
Python 3.
Args:
content: str. The string to be unquoted.
Returns:
str. The unquoted string.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
return urlparse.unquote(content)
def url_quote(content):
"""Quotes a string using urllib.quote if run under Python 2 and
urllib.parse.quote if run under Python 3.
Args:
content: str. The string to be quoted.
Returns:
str. The quoted string.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
return urlparse.quote(content)
def url_unquote_plus(content):
"""Unquotes a string and replace plus signs by spaces, as required for
unquoting HTML form values using urllib.unquote_plus if run under Python 2
and urllib.parse.unquote_plus if run under Python 3.
Args:
content: str. The string to be unquoted.
Returns:
str. The unquoted string.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
return urlparse.unquote_plus(content)
def url_encode(query, doseq=False):
"""Convert a mapping object or a sequence of two-element tuples to a
'url-encoded' string using urllib.urlencode if run under Python 2 and
urllib.parse.urlencode if run under Python 3.
Args:
query: dict or tuple. The query to be encoded.
doseq: bool. If true, individual key=value pairs separated by '&' are
generated for each element of the value sequence for the key.
Returns:
str. The 'url-encoded' string.
"""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
return urlparse.urlencode(query, doseq)
def url_retrieve(source_url, filename=None):
"""Copy a network object denoted by a URL to a local file using
urllib.urlretrieve if run under Python 2 and urllib.request.urlretrieve if
run under Python 3.
Args:
source_url: str. The URL.
filename: str. The file location to copy to.
Returns:
urlretrieve. The 'urlretrieve' object.
"""
context = ssl.create_default_context(cafile=certifi.where())
try:
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
# Change the User-Agent to prevent servers from blocking requests.
# See https://support.cloudflare.com/hc/en-us/articles/360029779472-Troubleshooting-Cloudflare-1XXX-errors#error1010. # pylint: disable=line-too-long
urlrequest.URLopener.version = (
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) '
'Gecko/20100101 Firefox/47.0'
)
return urlrequest.urlretrieve(
source_url, filename=filename, context=context)
def url_open(source_url):
"""Open a network object denoted by a URL for reading using
urllib2.urlopen if run under Python 2 and urllib.request.urlopen if
run under Python 3.
Args:
source_url: str. The URL.
Returns:
urlopen. The 'urlopen' object.
"""
context = ssl.create_default_context(cafile=certifi.where())
try:
import urllib.request as urlrequest
except ImportError:
import urllib2 as urlrequest
return urlrequest.urlopen(source_url, context=context)
def url_request(source_url, data, headers):
"""This function provides an abstraction of a URL request. It uses
urllib2.Request if run under Python 2 and urllib.request.Request if
run under Python 3.
Args:
source_url: str. The URL.
data: str. Additional data to send to the server.
headers: dict. The request headers.
Returns:
Request. The 'Request' object.
"""
try:
import urllib.request as urlrequest
except ImportError:
import urllib2 as urlrequest
return urlrequest.Request(source_url, data, headers)
def divide(number1, number2):
"""This function divides number1 by number2 in the Python 2 way, i.e it
performs an integer division.
Args:
number1: int. The dividend.
number2: int. The divisor.
Returns:
int. The quotent.
"""
return past.utils.old_div(number1, number2)
def with_metaclass(meta, *bases):
"""Python 2 & 3 helper for installing metaclasses.
Example:
class BaseForm(python_utils.OBJECT):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
Args:
meta: type. The metaclass to install on the derived class.
*bases: tuple(class). The base classes to install on the derived class.
When empty, `object` will be the sole base class.
Returns:
class. A proxy class that mutates the classes which inherit from it to
install the input meta class and inherit from the input base classes.
The proxy class itself does not actually become one of the base classes.
"""
if not bases:
bases = (OBJECT,)
return future.utils.with_metaclass(meta, *bases)
def convert_to_bytes(string_to_convert):
"""Converts the string to bytes.
Args:
string_to_convert: unicode|str. Required string to be converted into
bytes.
Returns:
bytes. The encoded string.
"""
if isinstance(string_to_convert, UNICODE):
return string_to_convert.encode('utf-8')
return bytes(string_to_convert)
def _recursively_convert_to_str(value):
"""Convert all builtins.bytes and builtins.str elements in a data structure
to bytes and unicode respectively. This is required for the
yaml.safe_dump() function to work as it only works for unicode and bytes and
not builtins.bytes nor builtins.str(UNICODE). See:
https://stackoverflow.com/a/1950399/11755830
Args:
value: list|dict|BASESTRING. The data structure to convert.
Returns:
list|dict|bytes|unicode. The data structure in bytes and unicode.
"""
if isinstance(value, list):
return [_recursively_convert_to_str(e) for e in value]
elif isinstance(value, dict):
return {
_recursively_convert_to_str(k): _recursively_convert_to_str(v)
for k, v in value.items()
}
# We are using 'type' here instead of 'isinstance' because we need to
# clearly distinguish the builtins.str and builtins.bytes strings.
elif type(value) == future.types.newstr: # pylint: disable=unidiomatic-typecheck
temp = str(value.encode('utf-8')) # pylint: disable=disallowed-function-calls
# Remove the b'' prefix from the string.
return temp[2:-1].decode('utf-8')
elif type(value) == future.types.newbytes: # pylint: disable=unidiomatic-typecheck
temp = bytes(value)
# Remove the b'' prefix from the string.
return temp[2:-1]
else:
return value
def yaml_from_dict(dictionary, width=80):
"""Gets the YAML representation of a dict.
Args:
dictionary: dict. Dictionary for conversion into yaml.
width: int. Width for the yaml representation, default value
is set to be of 80.
Returns:
str. Converted yaml of the passed dictionary.
"""
dictionary = _recursively_convert_to_str(dictionary)
return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
def reraise_exception():
"""Reraise exception with complete stacktrace."""
# TODO(#11547): This method can be replace by 'raise e' after we migrate
# to Python 3.
# This code is needed in order to reraise the error properly with
# the stacktrace. See https://stackoverflow.com/a/18188660/3688189.
exec_info = sys.exc_info()
six.reraise(exec_info[0], exec_info[1], tb=exec_info[2])
def is_string(value):
"""Returns whether value has a string type."""
return isinstance(value, six.string_types)
def get_args_of_function(func):
"""Returns the argument names of the function.
Args:
func: function. The function to inspect.
Returns:
list(str). The names of the function's arguments.
Raises:
TypeError. The input argument is not a function.
"""
try:
# Python 3.
return [p.name for p in inspect.signature(func).parameters
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)]
except AttributeError:
# Python 2.
return inspect.getargspec(func).args
def create_enum(*sequential):
"""Creates a enumerated constant.
Args:
*sequential: *. Sequence List to generate the enumerations.
Returns:
dict. Dictionary containing the enumerated constants.
"""
enum_values = dict(ZIP(sequential, sequential))
try:
from enum import Enum # pylint: disable=import-only-modules
# The type() of argument 1 in Enum must be str, not unicode.
return Enum(str('Enum'), enum_values) # pylint: disable=disallowed-function-calls
except ImportError:
_enums = {}
for name, value in enum_values.items():
_value = {
'name': name,
'value': value
}
_enums[name] = type(b'Enum', (), _value)
return type(b'Enum', (), _enums)
def zip_longest(*args, **kwargs):
"""Creates an iterator that aggregates elements from each of the iterables.
If the iterables are of uneven length, missing values are
filled-in with fillvalue.
Args:
*args: list(*). Iterables that needs to be aggregated into an iterable.
**kwargs: dict. It contains fillvalue.
Returns:
iterable(iterable). A sequence of aggregates elements
from each of the iterables.
"""
fillvalue = kwargs.get('fillvalue')
try:
return itertools.zip_longest(*args, fillvalue=fillvalue)
except AttributeError:
return itertools.izip_longest(*args, fillvalue=fillvalue)
| apache-2.0 | 8,958,891,144,418,246,000 | 32.165354 | 157 | 0.676211 | false | 4.141593 | false | false | false |
mydongistiny/external_chromium_org | tools/telemetry/telemetry/timeline/trace_event_importer.py | 33 | 18358 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' TraceEventImporter imports TraceEvent-formatted data
into the provided model.
This is a port of the trace event importer from
https://code.google.com/p/trace-viewer/
'''
import copy
import json
import re
import telemetry.timeline.async_slice as tracing_async_slice
import telemetry.timeline.flow_event as tracing_flow_event
from telemetry.timeline import importer
from telemetry.timeline import tracing_timeline_data
class TraceBufferOverflowException(Exception):
pass
class TraceEventTimelineImporter(importer.TimelineImporter):
def __init__(self, model, timeline_data):
super(TraceEventTimelineImporter, self).__init__(
model, timeline_data, import_priority=1)
event_data = timeline_data.EventData()
self._events_were_from_string = False
self._all_async_events = []
self._all_object_events = []
self._all_flow_events = []
if type(event_data) is str:
# If the event data begins with a [, then we know it should end with a ].
# The reason we check for this is because some tracing implementations
# cannot guarantee that a ']' gets written to the trace file. So, we are
# forgiving and if this is obviously the case, we fix it up before
# throwing the string at JSON.parse.
if event_data[0] == '[':
event_data = re.sub(r'[\r|\n]*$', '', event_data)
event_data = re.sub(r'\s*,\s*$', '', event_data)
if event_data[-1] != ']':
event_data = event_data + ']'
self._events = json.loads(event_data)
self._events_were_from_string = True
else:
self._events = event_data
# Some trace_event implementations put the actual trace events
# inside a container. E.g { ... , traceEvents: [ ] }
# If we see that, just pull out the trace events.
if 'traceEvents' in self._events:
container = self._events
self._events = self._events['traceEvents']
for field_name in container:
if field_name == 'traceEvents':
continue
# Any other fields in the container should be treated as metadata.
self._model.metadata.append({
'name' : field_name,
'value' : container[field_name]})
@staticmethod
def CanImport(timeline_data):
''' Returns whether obj is a TraceEvent array. '''
if not isinstance(timeline_data,
tracing_timeline_data.TracingTimelineData):
return False
event_data = timeline_data.EventData()
# May be encoded JSON. But we dont want to parse it fully yet.
# Use a simple heuristic:
# - event_data that starts with [ are probably trace_event
# - event_data that starts with { are probably trace_event
# May be encoded JSON. Treat files that start with { as importable by us.
if isinstance(event_data, str):
return len(event_data) > 0 and (event_data[0] == '{'
or event_data[0] == '[')
# Might just be an array of events
if (isinstance(event_data, list) and len(event_data)
and 'ph' in event_data[0]):
return True
# Might be an object with a traceEvents field in it.
if 'traceEvents' in event_data:
trace_events = event_data.get('traceEvents', None)
return (type(trace_events) is list and
len(trace_events) > 0 and 'ph' in trace_events[0])
return False
def _GetOrCreateProcess(self, pid):
return self._model.GetOrCreateProcess(pid)
def _DeepCopyIfNeeded(self, obj):
if self._events_were_from_string:
return obj
return copy.deepcopy(obj)
def _ProcessAsyncEvent(self, event):
'''Helper to process an 'async finish' event, which will close an
open slice.
'''
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_async_events.append({
'event': event,
'thread': thread})
def _ProcessCounterEvent(self, event):
'''Helper that creates and adds samples to a Counter object based on
'C' phase events.
'''
if 'id' in event:
ctr_name = event['name'] + '[' + str(event['id']) + ']'
else:
ctr_name = event['name']
ctr = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateCounter(event['cat'], ctr_name))
# Initialize the counter's series fields if needed.
if len(ctr.series_names) == 0:
#TODO: implement counter object
for series_name in event['args']:
ctr.series_names.append(series_name)
if len(ctr.series_names) == 0:
self._model.import_errors.append('Expected counter ' + event['name'] +
' to have at least one argument to use as a value.')
# Drop the counter.
del ctr.parent.counters[ctr.full_name]
return
# Add the sample values.
ctr.timestamps.append(event['ts'] / 1000.0)
for series_name in ctr.series_names:
if series_name not in event['args']:
ctr.samples.append(0)
continue
ctr.samples.append(event['args'][series_name])
def _ProcessObjectEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_object_events.append({
'event': event,
'thread': thread})
def _ProcessDurationEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
self._model.import_errors.append(
'Timestamps are moving backward.')
return
if event['ph'] == 'B':
thread.BeginSlice(event['cat'],
event['name'],
event['ts'] / 1000.0,
event['tts'] / 1000.0 if 'tts' in event else None,
event['args'])
elif event['ph'] == 'E':
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0):
self._model.import_errors.append(
'Timestamps are moving backward.')
return
if not thread.open_slice_count:
self._model.import_errors.append(
'E phase event without a matching B phase event.')
return
new_slice = thread.EndSlice(
event['ts'] / 1000.0,
event['tts'] / 1000.0 if 'tts' in event else None)
for arg_name, arg_value in event.get('args', {}).iteritems():
if arg_name in new_slice.args:
self._model.import_errors.append(
'Both the B and E phases of ' + new_slice.name +
' provided values for argument ' + arg_name + '. ' +
'The value of the E phase event will be used.')
new_slice.args[arg_name] = arg_value
def _ProcessCompleteEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.PushCompleteSlice(
event['cat'],
event['name'],
event['ts'] / 1000.0,
event['dur'] / 1000.0 if 'dur' in event else None,
event['tts'] / 1000.0 if 'tts' in event else None,
event['tdur'] / 1000.0 if 'tdur' in event else None,
event['args'])
def _ProcessMetadataEvent(self, event):
if event['name'] == 'thread_name':
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.name = event['args']['name']
elif event['name'] == 'process_name':
process = self._GetOrCreateProcess(event['pid'])
process.name = event['args']['name']
elif event['name'] == 'trace_buffer_overflowed':
process = self._GetOrCreateProcess(event['pid'])
process.SetTraceBufferOverflowTimestamp(event['args']['overflowed_at_ts'])
else:
self._model.import_errors.append(
'Unrecognized metadata name: ' + event['name'])
def _ProcessInstantEvent(self, event):
# Treat an Instant event as a duration 0 slice.
# SliceTrack's redraw() knows how to handle this.
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.BeginSlice(event['cat'],
event['name'],
event['ts'] / 1000.0,
args=event.get('args'))
thread.EndSlice(event['ts'] / 1000.0)
def _ProcessSampleEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
thread.AddSample(event['cat'],
event['name'],
event['ts'] / 1000.0,
event.get('args'))
def _ProcessFlowEvent(self, event):
thread = (self._GetOrCreateProcess(event['pid'])
.GetOrCreateThread(event['tid']))
self._all_flow_events.append({
'event': event,
'thread': thread})
def ImportEvents(self):
''' Walks through the events_ list and outputs the structures discovered to
model_.
'''
for event in self._events:
phase = event.get('ph', None)
if phase == 'B' or phase == 'E':
self._ProcessDurationEvent(event)
elif phase == 'X':
self._ProcessCompleteEvent(event)
elif phase == 'S' or phase == 'F' or phase == 'T':
self._ProcessAsyncEvent(event)
# Note, I is historic. The instant event marker got changed, but we
# want to support loading old trace files so we have both I and i.
elif phase == 'I' or phase == 'i':
self._ProcessInstantEvent(event)
elif phase == 'P':
self._ProcessSampleEvent(event)
elif phase == 'C':
self._ProcessCounterEvent(event)
elif phase == 'M':
self._ProcessMetadataEvent(event)
elif phase == 'N' or phase == 'D' or phase == 'O':
self._ProcessObjectEvent(event)
elif phase == 's' or phase == 't' or phase == 'f':
self._ProcessFlowEvent(event)
else:
self._model.import_errors.append('Unrecognized event phase: ' +
phase + '(' + event['name'] + ')')
return self._model
def FinalizeImport(self):
'''Called by the Model after all other importers have imported their
events.'''
self._model.UpdateBounds()
# We need to reupdate the bounds in case the minimum start time changes
self._model.UpdateBounds()
self._CreateAsyncSlices()
self._CreateFlowSlices()
self._SetBrowserProcess()
self._CreateExplicitObjects()
self._CreateImplicitObjects()
self._CreateTabIdsToThreadsMap()
def _CreateAsyncSlices(self):
if len(self._all_async_events) == 0:
return
self._all_async_events.sort(
cmp=lambda x, y: int(x['event']['ts'] - y['event']['ts']))
async_event_states_by_name_then_id = {}
all_async_events = self._all_async_events
for async_event_state in all_async_events:
event = async_event_state['event']
name = event.get('name', None)
if name is None:
self._model.import_errors.append(
'Async events (ph: S, T or F) require an name parameter.')
continue
event_id = event.get('id')
if event_id is None:
self._model.import_errors.append(
'Async events (ph: S, T or F) require an id parameter.')
continue
# TODO(simonjam): Add a synchronous tick on the appropriate thread.
if event['ph'] == 'S':
if not name in async_event_states_by_name_then_id:
async_event_states_by_name_then_id[name] = {}
if event_id in async_event_states_by_name_then_id[name]:
self._model.import_errors.append(
'At %d, a slice of the same id %s was already open.' % (
event['ts'], event_id))
continue
async_event_states_by_name_then_id[name][event_id] = []
async_event_states_by_name_then_id[name][event_id].append(
async_event_state)
else:
if name not in async_event_states_by_name_then_id:
self._model.import_errors.append(
'At %d, no slice named %s was open.' % (event['ts'], name,))
continue
if event_id not in async_event_states_by_name_then_id[name]:
self._model.import_errors.append(
'At %d, no slice named %s with id=%s was open.' % (
event['ts'], name, event_id))
continue
events = async_event_states_by_name_then_id[name][event_id]
events.append(async_event_state)
if event['ph'] == 'F':
# Create a slice from start to end.
async_slice = tracing_async_slice.AsyncSlice(
events[0]['event']['cat'],
name,
events[0]['event']['ts'] / 1000.0)
async_slice.duration = ((event['ts'] / 1000.0)
- (events[0]['event']['ts'] / 1000.0))
async_slice.start_thread = events[0]['thread']
async_slice.end_thread = async_event_state['thread']
if async_slice.start_thread == async_slice.end_thread:
if 'tts' in event and 'tts' in events[0]['event']:
async_slice.thread_start = events[0]['event']['tts'] / 1000.0
async_slice.thread_duration = ((event['tts'] / 1000.0)
- (events[0]['event']['tts'] / 1000.0))
async_slice.id = event_id
async_slice.args = events[0]['event']['args']
# Create sub_slices for each step.
for j in xrange(1, len(events)):
sub_name = name
if events[j - 1]['event']['ph'] == 'T':
sub_name = name + ':' + events[j - 1]['event']['args']['step']
sub_slice = tracing_async_slice.AsyncSlice(
events[0]['event']['cat'],
sub_name,
events[j - 1]['event']['ts'] / 1000.0)
sub_slice.parent_slice = async_slice
sub_slice.duration = ((events[j]['event']['ts'] / 1000.0)
- (events[j - 1]['event']['ts'] / 1000.0))
sub_slice.start_thread = events[j - 1]['thread']
sub_slice.end_thread = events[j]['thread']
if sub_slice.start_thread == sub_slice.end_thread:
if 'tts' in events[j]['event'] and \
'tts' in events[j - 1]['event']:
sub_slice.thread_duration = \
((events[j]['event']['tts'] / 1000.0)
- (events[j - 1]['event']['tts'] / 1000.0))
sub_slice.id = event_id
sub_slice.args = events[j - 1]['event']['args']
async_slice.AddSubSlice(sub_slice)
# The args for the finish event go in the last sub_slice.
last_slice = async_slice.sub_slices[-1]
for arg_name, arg_value in event['args'].iteritems():
last_slice.args[arg_name] = arg_value
# Add |async_slice| to the start-thread's async_slices.
async_slice.start_thread.AddAsyncSlice(async_slice)
del async_event_states_by_name_then_id[name][event_id]
def _CreateExplicitObjects(self):
# TODO(tengs): Implement object instance parsing
pass
def _CreateImplicitObjects(self):
# TODO(tengs): Implement object instance parsing
pass
def _CreateFlowSlices(self):
if len(self._all_flow_events) == 0:
return
self._all_flow_events.sort(
cmp=lambda x, y: int(x['event']['ts'] - y['event']['ts']))
flow_id_to_event = {}
for data in self._all_flow_events:
event = data['event']
thread = data['thread']
if 'name' not in event:
self._model.import_errors.append(
'Flow events (ph: s, t or f) require a name parameter.')
continue
if 'id' not in event:
self._model.import_errors.append(
'Flow events (ph: s, t or f) require an id parameter.')
continue
flow_event = tracing_flow_event.FlowEvent(
event['cat'],
event['id'],
event['name'],
event['ts'] / 1000.0,
event['args'])
thread.AddFlowEvent(flow_event)
if event['ph'] == 's':
if event['id'] in flow_id_to_event:
self._model.import_errors.append(
'event id %s already seen when encountering start of'
'flow event.' % event['id'])
continue
flow_id_to_event[event['id']] = flow_event
elif event['ph'] == 't' or event['ph'] == 'f':
if not event['id'] in flow_id_to_event:
self._model.import_errors.append(
'Found flow phase %s for id: %s but no flow start found.' % (
event['ph'], event['id']))
continue
flow_position = flow_id_to_event[event['id']]
self._model.flow_events.append([flow_position, flow_event])
if event['ph'] == 'f':
del flow_id_to_event[event['id']]
else:
# Make this event the next start event in this flow.
flow_id_to_event[event['id']] = flow_event
def _SetBrowserProcess(self):
for thread in self._model.GetAllThreads():
if thread.name == 'CrBrowserMain':
self._model.browser_process = thread.parent
def _CheckTraceBufferOverflow(self):
for process in self._model.GetAllProcesses():
if process.trace_buffer_did_overflow:
raise TraceBufferOverflowException(
'Trace buffer of process with pid=%d overflowed at timestamp %d. '
'Raw trace data:\n%s' %
(process.pid, process.trace_buffer_overflow_event.start,
repr(self._events)))
def _CreateTabIdsToThreadsMap(self):
# Since _CreateTabIdsToThreadsMap() relies on markers output on timeline
# tracing data, it maynot work in case we have trace events dropped due to
# trace buffer overflow.
self._CheckTraceBufferOverflow()
tab_ids_list = []
for metadata in self._model.metadata:
if metadata['name'] == 'tabIds':
tab_ids_list = metadata['value']
break
for tab_id in tab_ids_list:
timeline_markers = self._model.FindTimelineMarkers(tab_id)
assert(len(timeline_markers) == 1)
assert(timeline_markers[0].start_thread ==
timeline_markers[0].end_thread)
self._model.AddMappingFromTabIdToRendererThread(
tab_id, timeline_markers[0].start_thread)
| bsd-3-clause | -855,325,235,855,090,400 | 36.618852 | 80 | 0.59342 | false | 3.764971 | false | false | false |
tdaylan/pcat | pcat/comp_rtag.py | 1 | 2871 | from __init__ import *
def comp(nameplot):
if nameplot == 'DS_Store':
return
nameplot = nameplot[:-4]
print 'comp() working on ' + nameplot + '...'
cmnd = 'mkdir -p ' + pathimag + 'comprtag/' + nameplot + '/'
#print cmnd
os.system(cmnd)
#print
strgplot = nameplot.split('/')[-1]
cmndconv = 'convert -density 300'
for line in listline:
namedest = pathimag + 'comprtag/' + nameplot + '/' + strgplot + '_' + line + '.pdf'
if not os.path.isfile(namedest):
strgorig = '%s' % pathimag + line + '/' + nameplot + '.pdf'
cmnd = 'cp %s %s' % (strgorig, namedest)
#print cmnd
os.system(cmnd)
#print
else:
pass
print 'File already exists...'
#print
cmndconv += ' ' + namedest
cmndconv += ' ' + pathimag + 'comprtag/' + nameplot + '/merg.pdf'
os.system(cmndconv)
#print
print 'comp_rtag() initialized...'
pathimag = os.environ["PCAT_DATA_PATH"] + '/imag/'
cmnd = 'mkdir -p %s' % (os.environ["PCAT_DATA_PATH"] + '/imag/comprtag')
os.system(cmnd)
print 'Listing the available runs...'
cmnd = 'ls %s | xargs -n 1 basename > %s/listrtag.txt' % (pathimag, pathimag)
print cmnd
os.system(cmnd)
pathlist = pathimag + 'listrtag.txt'
with open(pathlist) as thisfile:
listline = thisfile.readlines()
listline = [x.strip() for x in listline]
if len(sys.argv) > 2:
listline = sys.argv[1:]
else:
strgsrch = sys.argv[1]
listline = fnmatch.filter(listline, strgsrch)
print 'listline'
for line in listline:
print line
namerefr = listline[0]
pathrefr = os.environ["PCAT_DATA_PATH"] + '/imag/' + namerefr + '/'
print 'Iterating over folders of the reference run...'
for namefrst in os.listdir(pathrefr):
namefrstprim = os.path.join(pathrefr, namefrst)
if os.path.isdir(namefrstprim):
for nameseco in os.listdir(namefrstprim):
if nameseco == 'fram':
continue
namesecoprim = os.path.join(namefrstprim, nameseco)
if os.path.isdir(namesecoprim):
for namethrd in os.listdir(namesecoprim):
namethrdprim = os.path.join(namesecoprim, namethrd)
if os.path.isdir(namethrdprim):
for namefrth in os.listdir(namethrdprim):
namefrthprim = os.path.join(namethrdprim, namefrth)
comp(namefrthprim.split(pathrefr)[-1])
else:
comp(namethrdprim.split(pathrefr)[-1])
elif nameseco.endswith('pdf') or nameseco.endswith('gif'):
comp(namesecoprim.split(pathrefr)[-1])
elif namefrst.endswith('pdf') or namefrst.endswith('gif'):
comp(namefrstprim.split(pathrefr)[-1])
| mit | 4,369,272,736,650,746,400 | 29.542553 | 91 | 0.568443 | false | 3.281143 | false | false | false |
Cerberus98/lightshowpi | tools/sync_file_generator.py | 4 | 7604 | # sync file generator for lightshowpi
# run usage
#
# python sync_file_generator.py
#
# Enter y to confirm that you wish to run this
# Enter the path to the folder containing your audio files
# along with the sync files it will also generate a playlist file
# enter the path to this playlist file in your overrides.cfg and
# lightshowpi will use this as your new playlist
import decoder
import glob
import mutagen
import numpy as np
import os
import sys
import wave
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, "
"see readme")
sys.exit()
# hack to get the configuration_manager and fft modules to load
# from a different directory
path = list(sys.path)
# insert script location and configuration_manager location into path
sys.path.insert(0, HOME_DIR + "/py")
# import the configuration_manager and fft now that we can
import fft
import configuration_manager as cm
#### reusing code from synchronized_lights.py
#### no need to reinvent the wheel
_CONFIG = cm.CONFIG
GPIOLEN = len([int(pin) for pin in _CONFIG.get('hardware',
'gpio_pins').split(',')])
_MIN_FREQUENCY = _CONFIG.getfloat('audio_processing', 'min_frequency')
_MAX_FREQUENCY = _CONFIG.getfloat('audio_processing', 'max_frequency')
try:
_CUSTOM_CHANNEL_MAPPING = \
[int(channel) for channel in _CONFIG.get('audio_processing',\
'custom_channel_mapping').split(',')]
except:
_CUSTOM_CHANNEL_MAPPING = 0
try:
_CUSTOM_CHANNEL_FREQUENCIES = [int(channel) for channel in
_CONFIG.get('audio_processing',
'custom_channel_frequencies').split(',')]
except:
_CUSTOM_CHANNEL_FREQUENCIES = 0
CHUNK_SIZE = 2048 # Use a multiple of 8 (move this to config)
def calculate_channel_frequency(min_frequency,
max_frequency,
custom_channel_mapping,
custom_channel_frequencies):
"""
Calculate frequency values
Calculate frequency values for each channel,
taking into account custom settings.
"""
# How many channels do we need to calculate the frequency for
if custom_channel_mapping != 0 and len(custom_channel_mapping) == GPIOLEN:
channel_length = max(custom_channel_mapping)
else:
channel_length = GPIOLEN
octaves = (np.log(max_frequency / min_frequency)) / np.log(2)
octaves_per_channel = octaves / channel_length
frequency_limits = []
frequency_store = []
frequency_limits.append(min_frequency)
if custom_channel_frequencies != 0 and (len(custom_channel_frequencies)
>= channel_length + 1):
frequency_limits = custom_channel_frequencies
else:
for i in range(1, GPIOLEN + 1):
frequency_limits.append(frequency_limits[-1]
* 10 ** (3 /
(10 * (1 / octaves_per_channel))))
for i in range(0, channel_length):
frequency_store.append((frequency_limits[i], frequency_limits[i + 1]))
# we have the frequencies now lets map them if custom mapping is defined
if custom_channel_mapping != 0 and len(custom_channel_mapping) == GPIOLEN:
frequency_map = []
for i in range(0, GPIOLEN):
mapped_channel = custom_channel_mapping[i] - 1
mapped_frequency_set = frequency_store[mapped_channel]
mapped_frequency_set_low = mapped_frequency_set[0]
mapped_frequency_set_high = mapped_frequency_set[1]
frequency_map.append(mapped_frequency_set)
return frequency_map
else:
return frequency_store
def cache_song(song_filename):
"""Play the next song from the play list (or --file argument)."""
# Initialize FFT stats
matrix = [0 for _ in range(GPIOLEN)] # get length of gpio and assign it to a variable
# Set up audio
if song_filename.endswith('.wav'):
musicfile = wave.open(song_filename, 'r')
else:
musicfile = decoder.open(song_filename)
sample_rate = musicfile.getframerate()
num_channels = musicfile.getnchannels()
song_filename = os.path.abspath(song_filename)
# create empty array for the cache_matrix
cache_matrix = np.empty(shape=[0, GPIOLEN])
cache_filename = \
os.path.dirname(song_filename) + "/." + os.path.basename(song_filename) + ".sync"
# The values 12 and 1.5 are good estimates for first time playing back
# (i.e. before we have the actual mean and standard deviations
# calculated for each channel).
mean = [12.0 for _ in range(GPIOLEN)]
std = [1.5 for _ in range(GPIOLEN)]
# Process audio song_filename
row = 0
data = musicfile.readframes(CHUNK_SIZE) # move chunk_size to configuration_manager
frequency_limits = calculate_channel_frequency(_MIN_FREQUENCY,
_MAX_FREQUENCY,
_CUSTOM_CHANNEL_MAPPING,
_CUSTOM_CHANNEL_FREQUENCIES)
while data != '':
# No cache - Compute FFT in this chunk, and cache results
matrix = fft.calculate_levels(data, CHUNK_SIZE, sample_rate, frequency_limits, GPIOLEN)
# Add the matrix to the end of the cache
cache_matrix = np.vstack([cache_matrix, matrix])
# Read next chunk of data from music song_filename
data = musicfile.readframes(CHUNK_SIZE)
row = row + 1
# Compute the standard deviation and mean values for the cache
for i in range(0, GPIOLEN):
std[i] = np.std([item for item in cache_matrix[:, i] if item > 0])
mean[i] = np.mean([item for item in cache_matrix[:, i] if item > 0])
# Add mean and std to the top of the cache
cache_matrix = np.vstack([mean, cache_matrix])
cache_matrix = np.vstack([std, cache_matrix])
# Save the cache using numpy savetxt
np.savetxt(cache_filename, cache_matrix)
#### end reuse
def main():
print "Do you want to generating sync files"
print
print "This could take a while if you have a lot of songs"
question = raw_input("Would you like to proceed? (Y to continue) :")
if not question in ["y", "Y"]:
sys.exit(0)
location = raw_input("Enter the path to the folder of songs:")
location += "/"
sync_list = list()
audio_file_types = ["*.mp3", "*.mp4",
"*.m4a", "*.m4b",
"*.aac", "*.ogg",
"*.flac", "*.oga",
"*.wma", "*.wav"]
for file_type in audio_file_types:
sync_list.extend(glob.glob(location + file_type))
playlistFile = open(location + "playlist", "w")
for song in sync_list:
print "Generating sync file for",song
cache_song(song)
print "cached"
metadata = mutagen.File(song, easy=True)
if "title" in metadata:
title = metadata["title"][0]
else:
title = os.path.splitext(os.path.basename(song))[0].strip()
title = title.replace("_", " ")
title = title.replace("-", " - ")
playlistFile.write(title + "\t" + song + "\n")
playlistFile.close()
print "All Finished."
print "A playlist was also generated"
print location + "playlist"
sys.path[:] = path
if __name__ == "__main__":
main()
| bsd-2-clause | 2,666,866,845,903,510,000 | 34.203704 | 95 | 0.604945 | false | 3.974909 | true | false | false |
tlangerak/Multi-Agent-Systems | build/lib.win32-2.7/spade/Envelope.py | 3 | 10208 | # -*- coding: utf-8 -*-
import types
import AID
try:
import json
except ImportError:
import simplejson as json
class Envelope:
"""
FIPA envelope
"""
def __init__(self, to=None, _from=None, comments=None, aclRepresentation=None, payloadLength=None, payloadEncoding=None, date=None, encrypted=None, intendedReceiver=None, received=None, transportBehaviour=None, userDefinedProperties=None, jsonstring=None):
self.to = list()
if to is not None:
for i in to:
if isinstance(i, AID.aid):
self.to.append(i) # aid
self._from = None
if _from is not None and isinstance(_from, AID.aid):
self._from = _from # aid
if comments is not None:
self.comments = comments # str
else:
self.comments = None
if aclRepresentation is not None:
self.aclRepresentation = aclRepresentation # str
else:
self.aclRepresentation = None
if payloadLength is not None:
self.payloadLength = payloadLength # int
else:
self.payloadLength = None
if payloadEncoding is not None:
self.payloadEncoding = payloadEncoding # str
else:
self.payloadEncoding = None
if date is not None:
self.date = date # list(datetime)
else:
self.date = None
if encrypted is not None:
self.encrypted = encrypted # list(str)
else:
self.encrypted = list()
if intendedReceiver is not None:
self.intendedReceiver = intendedReceiver # list(aid)
else:
self.intendedReceiver = list()
if received is not None:
self.received = received # list(ReceivedObject)
else:
self.received = None
if transportBehaviour is not None:
self.transportBehaviour = transportBehaviour # list(?)
else:
self.transportBehaviour = list()
if userDefinedProperties is not None:
self.userDefinedProperties = userDefinedProperties # list(properties)
else:
self.userDefinedProperties = list()
if jsonstring:
self.loadJSON(jsonstring)
def getTo(self):
return self.to
def addTo(self, to):
self.to.append(to)
self.addIntendedReceiver(to)
def getFrom(self):
return self._from
def setFrom(self, _from):
self._from = _from
def getComments(self):
return self.comments
def setComments(self, comments):
self.comments = comments
def getAclRepresentation(self):
return self.aclRepresentation
def setAclRepresentation(self, acl):
self.aclRepresentation = acl
def getPayloadLength(self):
return self.payloadLength
def setPayloadLength(self, pl):
self.payloadLength = pl
def getPayloadEncoding(self):
return self.payloadEncoding
def setPayloadEncoding(self, pe):
self.payloadEncoding = pe
def getDate(self):
return self.date
def setDate(self, date):
self.date = date
def getEncryted(self):
return self.encrypted
def setEncryted(self, encrypted):
self.encrypted = encrypted
def getIntendedReceiver(self):
return self.intendedReceiver
def addIntendedReceiver(self, intended):
if not intended in self.intendedReceiver:
self.intendedReceiver.append(intended)
def getReceived(self):
return self.received
def setReceived(self, received):
self.received = received
def __str__(self):
return self.asXML()
def asXML(self):
"""
returns a printable version of the envelope in XML
"""
r = '<?xml version="1.0"?>' + "\n"
r = r + "\t\t<envelope> \n"
r = r + '\t\t\t<params index="1">' + "\n"
r = r + "\t\t\t\t<to>\n"
for aid in self.to:
r = r + "\t\t\t\t\t<agent-identifier> \n"
r = r + "\t\t\t\t\t\t<name>" + aid.getName() + "</name> \n"
r = r + "\t\t\t\t\t\t<addresses>\n"
for addr in aid.getAddresses():
r = r + "\t\t\t\t\t\t\t<url>" + addr + "</url>\n"
r = r + "\t\t\t\t\t\t</addresses> \n"
r = r + "\t\t\t\t\t</agent-identifier>\n"
r = r + "\t\t\t\t</to> \n"
if self._from:
r = r + "\t\t\t\t<from> \n"
r = r + "\t\t\t\t\t<agent-identifier> \n"
r = r + "\t\t\t\t\t\t<name>" + self._from.getName() + "</name> \n"
r = r + "\t\t\t\t\t\t<addresses>\n"
for addr in self._from.getAddresses():
r = r + "\t\t\t\t\t\t\t<url>" + addr + "</url>\n"
r = r + "\t\t\t\t\t\t</addresses> \n"
r = r + "\t\t\t\t\t</agent-identifier> \n"
r = r + "\t\t\t\t</from>\n"
if self.aclRepresentation:
r = r + "\t\t\t\t<acl-representation>" + self.aclRepresentation + "</acl-representation>\n"
if self.payloadLength:
r = r + "\t\t\t\t<payload-length>" + str(self.payloadLength) + "</payload-length>\n"
if self.payloadEncoding:
r = r + "\t\t\t\t<payload-encoding>" + self.payloadEncoding + "</payload-encoding>\n"
if self.date:
r = r + "\t\t\t\t<date>" + str(self.date) + "</date>\n"
if self.intendedReceiver:
r = r + "\t\t\t\t<intended-receiver>\n"
for aid in self.intendedReceiver:
r = r + "\t\t\t\t\t<agent-identifier> \n"
r = r + "\t\t\t\t\t\t<name>" + aid.getName() + "</name> \n"
r = r + "\t\t\t\t\t\t<addresses>\n"
for addr in aid.getAddresses():
r = r + "\t\t\t\t\t\t\t<url>" + addr + "</url>\n"
r = r + "\t\t\t\t\t\t</addresses> \n"
r = r + "\t\t\t\t\t</agent-identifier>\n"
r = r + "\t\t\t\t</intended-receiver> \n"
if self.received:
r = r + "\t\t\t\t<received>\n"
if self.received.getBy():
r = r + '\t\t\t\t\t<received-by value="' + self.received.getBy() + '"/>\n'
if self.received.getDate():
r = r + '\t\t\t\t\t<received-date value="' + str(self.received.getDate()) + '"/>\n'
if self.received.getId():
r = r + '\t\t\t\t\t<received-id value="' + self.received.getId() + '"/>\n'
r = r + "\t\t\t\t</received>\n"
r = r + "\t\t\t</params> \n"
r = r + "\t\t</envelope>\n"
return r
def asJSON(self):
"""
returns a printable version of the envelope in JSON
"""
r = "{"
r = r + '"to":['
for aid in self.to:
r = r + '{'
r = r + '"name":"' + aid.getName() + '",'
r = r + '"addresses":['
for addr in aid.getAddresses():
r = r + '"' + addr + '",'
if r[-1:] == ",": r = r[:-1]
r = r + "]"
r = r + "},"
if r[-1:] == ",": r = r[:-1]
r = r + "],"
if self._from:
r = r + '"from":{'
r = r + '"name":"' + self._from.getName() + '",'
r = r + '"addresses":['
for addr in self._from.getAddresses():
r = r + '"' + addr + '",'
if r[-1:] == ",": r = r[:-1]
r = r + "]},"
if self.aclRepresentation:
r = r + '"acl-representation":"' + self.aclRepresentation + '",'
if self.payloadLength:
r = r + '"payload-length":"' + str(self.payloadLength) + '",'
if self.payloadEncoding:
r = r + '"payload-encoding":"' + self.payloadEncoding + '",'
if self.date:
r = r + '"date":"' + str(self.date) + '",'
if self.intendedReceiver:
r = r + '"intended-receiver":['
for aid in self.intendedReceiver:
r = r + "{"
r = r + '"name":"' + aid.getName() + '",'
r = r + '"addresses":['
for addr in aid.getAddresses():
r = r + '"' + addr + '",'
if r[-1:] == ",": r = r[:-1]
r = r + "],"
if r[-1:] == ",": r = r[:-1]
r = r + "},"
if r[-1:] == ",": r = r[:-1]
r = r + "],"
if self.received:
r = r + '"received":{'
if self.received.getBy():
r = r + '"received-by":"' + self.received.getBy() + '",'
if self.received.getDate():
r = r + '"received-date":"' + str(self.received.getDate()) + '",'
if self.received.getId():
r = r + '"received-id":"' + self.received.getId() + '"'
if r[-1:] == ",": r = r[:-1]
r = r + "}"
if r[-1:] == ",": r = r[:-1]
r = r + "}"
return r
def loadJSON(self, jsonstring):
"""
loads a JSON string in the envelope
"""
r = json.loads(jsonstring)
if "to" in r:
for a in r["to"]:
aid = AID.aid()
aid.setName(a["name"])
for addr in a["addresses"]:
aid.addAddress(addr)
self.addTo(aid)
if "from" in r:
aid = AID.aid()
aid.setName(r["from"]["name"])
for addr in r["from"]["addresses"]:
aid.addAddress(addr)
self.setFrom(aid)
if "acl-representation" in r:
self.setAclRepresentation(r["acl-representation"])
if "payload-length" in r:
self.setPayloadLength(r["payload-length"])
if "payload-encoding" in r:
self.setPayloadEncoding(r["payload-encoding"])
if "date" in r:
self.setDate(r["date"])
if "intended-receiver" in r:
for ag in r["intended-receiver"]:
aid = AID.aid()
aid.setName(ag["name"])
for addr in ag["addresses"]:
aid.addAddress(addr)
self.addIntendedReceiver(aid)
| lgpl-2.1 | 9,053,603,676,128,585,000 | 33.958904 | 260 | 0.480701 | false | 3.564246 | false | false | false |
flapjax/FlapJack-Cogs | wordclouds/wordclouds.py | 1 | 11985 | import aiohttp
import asyncio
import discord
import functools
import re
from io import BytesIO
import numpy as np
import os
from PIL import Image
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import box, pagify
from redbot.core.data_manager import cog_data_path
from wordcloud import WordCloud as WCloud
from wordcloud import ImageColorGenerator
# Special thanks to co-author aikaterna for pressing onward
# with this cog when I had lost motivation!
URL_RE = re.compile(
r"([\w+]+\:\/\/)?([\w\d-]+\.)*[\w-]+[\.\:]\w+([\/\?\=\&\#]?[\w-]+)*\/?", flags=re.I
)
# https://stackoverflow.com/questions/6038061/regular-expression-to-find-urls-within-a-string
class WordClouds(commands.Cog):
"""Word Clouds"""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
self.conf = Config.get_conf(self, identifier=3271169074)
default_guild_settings = {
"bg_color": "black",
"maxwords": 200,
"excluded": [],
"mask": None,
"colormask": False,
}
self.conf.register_guild(**default_guild_settings)
self.mask_folder = str(cog_data_path(raw_name="WordClouds")) + "/masks"
if not os.path.exists(self.mask_folder):
os.mkdir(self.mask_folder)
# Clouds can really just be stored in memory at some point
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
async def _list_masks(self, ctx):
masks = sorted(os.listdir(self.mask_folder))
if len(masks) == 0:
return await ctx.send(
"No masks found. Place masks in the bot's data folder for WordClouds or add one with `{}wcset maskfile`.".format(
ctx.prefix
)
)
msg = "Here are the image masks you have installed:\n"
for mask in masks:
msg += f"{mask}\n"
for page in pagify(msg, delims=["\n"]):
await ctx.send(box(page, lang="ini"))
@commands.guild_only()
@commands.command(name="wordcloud", aliases=["wc"])
@commands.cooldown(1, 15, commands.BucketType.guild)
async def wordcloud(self, ctx, *argv):
"""Generate a wordcloud. Optional arguments are channel, user, and
message limit (capped at 10,000)."""
author = ctx.author
channel = ctx.channel
user = None
limit = 10000
# a bit clunky, see if Red has already implemented converters
channel_converter = commands.TextChannelConverter()
member_converter = commands.MemberConverter()
for arg in argv:
try:
channel = await channel_converter.convert(ctx, arg)
continue
except discord.ext.commands.BadArgument:
pass
try:
user = await member_converter.convert(ctx, arg)
continue
except discord.ext.commands.BadArgument:
pass
if arg.isdecimal() and int(arg) <= 10000:
limit = int(arg)
guild = channel.guild
# Verify that wordcloud requester is not being a sneaky snek
if not channel.permissions_for(author).read_messages or guild != ctx.guild:
await ctx.send("\N{SMIRKING FACE} Nice try.")
return
# Default settings
mask = None
coloring = None
width = 800
height = 600
mode = "RGB"
bg_color = await self.conf.guild(guild).bgcolor()
if bg_color == "clear":
mode += "A"
bg_color = None
max_words = await self.conf.guild(guild).maxwords()
if max_words == 0:
max_words = 200
excluded = await self.conf.guild(guild).excluded()
if not excluded:
excluded = None
mask_name = await self.conf.guild(guild).mask()
if mask_name is not None:
mask_file = f"{self.mask_folder}/{mask_name}"
try:
mask = np.array(Image.open(mask_file))
except FileNotFoundError:
await ctx.send(
"I could not load your mask file. It may "
"have been deleted. `{}wcset clearmask` "
"may resolve this.".format(ctx.prefix)
)
return
if await self.conf.guild(guild).colormask():
coloring = ImageColorGenerator(mask)
kwargs = {
"mask": mask,
"color_func": coloring,
"mode": mode,
"background_color": bg_color,
"max_words": max_words,
"stopwords": excluded,
"width": width,
"height": height,
}
msg = "Generating wordcloud for **" + guild.name + "/" + channel.name
if user is not None:
msg += "/" + user.display_name
msg += "** using the last {} messages. (this might take a while)".format(limit)
await ctx.send(msg)
text = ""
try:
async for message in channel.history(limit=limit):
if not message.author.bot:
if user is None or user == message.author:
text += message.clean_content + " "
text = URL_RE.sub("", text)
except discord.errors.Forbidden:
await ctx.send("Wordcloud creation failed. I can't see that channel!")
return
if not text or text.isspace():
await ctx.send(
"Wordcloud creation failed. I couldn't find "
"any words. You may have entered a very small "
"message limit, or I may not have permission "
"to view message history in that channel."
)
return
task = functools.partial(self.generate, text, **kwargs)
task = self.bot.loop.run_in_executor(None, task)
try:
image = await asyncio.wait_for(task, timeout=45)
except asyncio.TimeoutError:
await ctx.send("Wordcloud creation timed out.")
return
await ctx.send(file=discord.File(image))
@staticmethod
def generate(text, **kwargs):
# Designed to be run in executor to avoid blocking
wc = WCloud(**kwargs)
wc.generate(text)
file = BytesIO()
file.name = "wordcloud.png"
wc.to_file(file)
file.seek(0)
return file
@commands.guild_only()
@commands.group(name="wcset")
@checks.mod_or_permissions(administrator=True)
async def wcset(self, ctx):
"""WordCloud image settings"""
pass
@wcset.command(name="listmask")
async def _wcset_listmask(self, ctx):
"""List image files available for masking"""
await self._list_masks(ctx)
@wcset.command(name="maskfile")
async def _wcset_maskfile(self, ctx, filename: str):
"""Set local image file for masking
- place masks in the cog's data folder/masks/"""
guild = ctx.guild
mask_path = f"{self.mask_folder}/{filename}"
if not os.path.isfile(mask_path):
print(mask_path)
await ctx.send("That's not a valid filename.")
return await self._list_masks(ctx)
await self.conf.guild(guild).mask.set(filename)
await ctx.send("Mask set to {}.".format(filename))
@wcset.command(name="upload")
@checks.is_owner()
async def _wcset_upload(self, ctx, url: str = None):
"""Upload an image mask through Discord"""
user = ctx.author
guild = ctx.guild
attachments = ctx.message.attachments
emoji = ("\N{WHITE HEAVY CHECK MARK}", "\N{CROSS MARK}")
if len(attachments) > 1 or (attachments and url):
await ctx.send("Please add one image at a time.")
return
if attachments:
filename = attachments[0].filename
filepath = f"{self.mask_folder}/{filename}"
try:
await attachments[0].save(filepath)
except:
ctx.send("Saving attachment failed.")
return
elif url:
filename = url.split("/")[-1].replace("%20", "_")
filepath = f"{self.mask_folder}/{filename}"
async with self.session.get(url) as new_image:
# Overwrite file if it exists
f = open(str(filepath), "wb")
f.write(await new_image.read())
f.close()
else:
await ctx.send(
"You must provide either a Discord attachment " "or a direct link to an image"
)
return
msg = await ctx.send(
"Mask {} added. Set as current mask for this server?".format(filename)
)
await msg.add_reaction(emoji[0])
await asyncio.sleep(0.5)
await msg.add_reaction(emoji[1])
def check(r, u):
return u == user and r.message.id == msg.id and r.emoji == emoji[0]
try:
await self.bot.wait_for("reaction_add", timeout=60.0, check=check)
await self.conf.guild(guild).mask.set(filename)
await ctx.send("Mask for this server set to uploaded file.")
except asyncio.TimeoutError:
# Can add an timeout message, but not really necessary
# as clearing the reactions is sufficient feedback
pass
finally:
await msg.clear_reactions()
@wcset.command(name="clearmask")
async def _wcset_clearmask(self, ctx):
"""Clear image file for masking"""
guild = ctx.guild
await self.conf.guild(guild).mask.set(None)
await ctx.send("Mask set to None.")
@wcset.command(name="colormask")
async def _wcset_colormask(self, ctx, on_off: bool = None):
"""Turn color masking on/off"""
guild = ctx.guild
if await self.conf.guild(guild).colormask():
await self.conf.guild(guild).colormask.set(False)
await ctx.send("Color masking turned off.")
else:
await self.conf.guild(guild).colormask.set(True)
await ctx.send("Color masking turned on.")
@wcset.command(name="bgcolor")
async def _wcset_bgcolor(self, ctx, color: str):
"""Set background color. Use 'clear' for transparent."""
# No checks for bad colors yet
guild = ctx.guild
await self.conf.guild(guild).bgcolor.set(color)
await ctx.send("Background color set to {}.".format(color))
@wcset.command(name="maxwords")
async def _wcset_maxwords(self, ctx, count: int):
"""Set maximum number of words to appear in the word cloud
Set to 0 for default (4000)."""
# No checks for bad values yet
guild = ctx.guild
await self.conf.guild(guild).maxwords.set(count)
await ctx.send("Max words set to {}.".format(str(count)))
@wcset.command(name="exclude")
async def _wcset_exclude(self, ctx, word: str):
"""Add a word to the excluded list.
This overrides the default excluded list!"""
guild = ctx.guild
excluded = await self.conf.guild(guild).excluded()
if word in excluded:
await ctx.send("'{}' is already in the excluded words.".format(word))
return
excluded.append(word)
await self.conf.guild(guild).excluded.set(excluded)
await ctx.send("'{}' added to excluded words.".format(word))
@wcset.command(name="clearwords")
async def _wcset_clearwords(self, ctx):
"""Clear the excluded word list.
Default excluded list will be used."""
guild = ctx.guild
await self.conf.guild(guild).excluded.set([])
await ctx.send("Cleared the excluded word list.")
| mit | -1,817,683,374,265,702,000 | 34.45858 | 129 | 0.570296 | false | 4.054465 | false | false | false |
arthurbarnard/Dirac_simulation | plot_dirac.py | 1 | 3286 | import matplotlib.pyplot as plt
import numpy as np
import time
import pickle
from matplotlib.path import Path
from dirac_sheet import dirac_sheet
def main():
fig = plt.figure()
filename='E:/pyDirac/collimator_output_full_absorb
# make these smaller to increase the resolution
dx = .25
dt = 0.1
Ngrid = int(1802)
X_offset=112.5
# generate 2 2d grids for the x & y bounds
#y, x = np.ogrid[slice(-5, 5 + dy, dy),
# slice(-3, 3 + dx, dx)]
#y, x = np.mgrid[slice((0-round(Ngrid/2))*dy,(Ngrid-round(Ngrid/2))*dy,dy),
# slice((0-round(Ngrid/2))*dx+112.25,(Ngrid-round(Ngrid/2))*dx+112.25,dx)]
plt.ion()
plt.clf()
ax = plt.plot()
# tic = time.time()
myDirac=dirac_sheet(0,901,dt,dx,X_offset,0)
myDirac.set_p(.5,np.pi/4.0)
# x, y = myDirac.get_pos_mat()
# NoPropMat = np.zeros(x.shape,dtype=np.uint8)
# print myDirac.p0
#define part of injector shape
#poly_verts = np.array([[0,37],[70,12.5],[70,1001],[0,1001],[0,37]])
#NoPropMat[inPolygon(poly_verts,x,y)]=1
# mirror across x-axis and make CCW
# poly_verts[:,1]*=-1
# poly_verts[:,1]-=.125
# poly_verts[:,:]=poly_verts[::-1,:]
# NoPropMat[inPolygon(poly_verts,x,y)]=1
# NoPropMat[((x<140.26)&(x>140)&(y>12.5))]=1
# NoPropMat[((x<140.26)&(x>140)&(y<-12.5))]=1
# AbsMat = np.zeros(x.shape)
# AbsMat[x>205]=.99
# AbsMat[(x>70)&(x<141)&(y>40)]=.99
# AbsMat[(x>70)&(x<141)&(y<-40)]=.99
# DriveMat=np.zeros(x.shape)
# DriveMat[(x>0)&(x<1)&(y>-36)&(y<36)]=1
# myDirac.set_No_prop_mat(NoPropMat)
# myDirac.set_Absorb_mat(AbsMat)
# myDirac.set_Drive_mat(DriveMat)
data=np.load(filename)
u1=data['u1']
# u2=data['u2']
v1=data['v1']
# v2=data['v2']
# plt.clf()
#z limits
z_min, z_max = -0.3, 0.3 #np.min(np.real(myDirac.u1)), np.max(np.real(myDirac.u1))
print np.max(np.real((myDirac.u10*np.conj(myDirac.v10))+(myDirac.v10*np.conj(myDirac.u10))).T)
print np.max(np.imag((myDirac.u10*np.conj(myDirac.v10))-(myDirac.v10*np.conj(myDirac.u10))).T)
print myDirac.theta
plt.imshow(np.real(u1),cmap='RdBu', vmin=z_min, vmax=z_max,
#plt.imshow(np.real(v1*np.conj(v1)).T+np.real(u2*np.conj(u2)).T+0*np.real(v1*np.conj(v1)).T+0*np.real(v2*np.conj(v2)).T, cmap='hot', vmin=z_min, vmax=z_max,
#plt.imshow(np.imag((myDirac.u10*np.conj(myDirac.v10))-(myDirac.v10*np.conj(myDirac.u10))).T, cmap='RdBu', vmin=z_min, vmax=z_max,
#plt.imshow(np.real(((u1+u2)*np.conj(v1+v2))+((v1+v2)*np.conj(u1+u2))).T, cmap='RdBu', vmin=z_min, vmax=z_max,
#plt.imshow(np.imag(np.gradient(u2)[1]*(np.conj(u2))-u2*np.gradient(np.conj(u2))[1]).T, cmap='RdBu', vmin=z_min, vmax=z_max,
#extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower')
#plt.quiver(np.imag(np.gradient(u2)[0]*(np.conj(u2))-u2*np.gradient(np.conj(u2))[0]).T,np.imag(np.gradient(u2)[1]*(np.conj(u2))-u2*np.gradient(np.conj(u2))[1]).T)
#plt.title('image (interp. nearest)')
#plt.colorbar()
fig.canvas.draw()
plt.ioff()
# This line was moved up <----
plt.draw()
plt.show()
def inPolygon(poly_verts,x,y):
ss=x.shape
x1, y1 = x.flatten(), y.flatten()
points = np.vstack((x1,y1)).T
path = Path(poly_verts)
grid = path.contains_points(points, radius=.01)
grid = grid.reshape(ss)
return grid
if __name__ == '__main__':
main() | gpl-3.0 | 6,279,819,993,380,902,000 | 25.296 | 163 | 0.62538 | false | 2.102367 | false | false | false |
feliposz/learning-stuff | python/ps2a.py | 1 | 1119 | # Problem Set 2 - Problem 3
# Author: Felipo Soranz
# Time: ~ 30 Minutes
# For the Diophantine equation (i.e. a, b, c, n are all positive integers):
# a * 6 + b * 9 + c * 20 = n
# Try to find the larges possible value for n where the equation is false
count_possibles = 0
max_impossible = 0
for i in range(1,150):
c = 0
impossible = True
while c * 20 <= i and impossible:
b = 0
while b * 9 <= i and impossible:
a = 0
while a * 6 <= i and impossible:
total = a * 6 + b * 9 + c * 20
if total == i:
impossible = False
a += 1
b += 1
c += 1
if impossible:
max_impossible = i
count_possibles = 0
else:
count_possibles += 1
# After 6 possible consecutive solutions,
# the equation is always solvable, since x + 5 + 1 = x + 6
# Ex: x = 50, x + 5 = 55, x + 6 = x + 5 + 1 = 56
if count_possibles == 6:
break
print("Largest number of McNuggest that cannot be bought in exact quantity:",
max_impossible)
| mit | -5,938,834,110,778,184,000 | 28.447368 | 77 | 0.520107 | false | 3.586538 | false | false | false |
Toto-Azero/Wikipedia | pywikibot/ipday.py | 1 | 1805 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) Toto Azéro, 2011-2015
#
# Distribué sous licence GNU GPLv3
# Distributed under the terms of the GNU GPLv3 license
# http://www.gnu.org/licenses/gpl.html
#
import pywikibot, ip
from pywikibot import pagegenerators
import os, datetime, shutil, locale, codecs
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
site = pywikibot.Site()
#lesMois = {
#1 : u"janvier",
#2 : u"février",
#3 : u"mars",
#4 : u"avril",
#5 : u"mai",
#6 : u"juin",
#7 : u"juillet",
#8 : u"août",
#9 : u"septembre",
#10 : u"octobre",
#11 : u"novembre",
#12 : u"décembre"
#}
dateDuJour = datetime.date.today()
## Date d'il y a un an et un jour
dateFichierDuJour = dateDuJour + datetime.timedelta(days=-366)
#annee = int(dateFichierDuJour.strftime("%Y"))
#mois = int(dateFichierDuJour.strftime("%B"))
#jour = int(dateFichierDuJour.strftime("%d"))
cwd = os.getcwd()
os.chdir('/data/project/totoazero/pywikibot')
nomFichierDuJour = u"Dates messages IP/%s/%i" % (dateFichierDuJour.strftime("%Y/%m").decode('utf-8'), int(dateFichierDuJour.strftime('%d')))
if nomFichierDuJour[-1] == 1:
nomFichierDuJour += u"er"
## Copie du fichier pour être sûr de ne pas le perdre
## en ne traitant pas l'original
#nomFichierSauvegarde = u"%s - sauvegarde" % nomFichierDuJour
#os.chdir('/home/totoazero/')
#shutil.copy(nomFichierDuJour, nomFichierSauvegarde)
fichierDuJour = codecs.open(nomFichierDuJour, 'r')
listeIP = fichierDuJour.read().split()
fichierDuJour.close()
os.chdir(cwd)
listePdDIP = []
for num_ip in listeIP:
listePdDIP.append(pywikibot.Page(site, u"Discussion utilisateur:%s" % num_ip))
#print listePdDIP
pywikibot.output(u"Nombre de pages à traiter : %i" % len(listePdDIP))
gen = pagegenerators.PreloadingGenerator(listePdDIP)
IPBot = ip.IPBot(gen, False)
IPBot.run()
| gpl-3.0 | 4,224,437,962,589,099,000 | 24.309859 | 140 | 0.706733 | false | 2.280457 | false | false | false |
skunkwerks/netinf | python/nilib/niforward.py | 1 | 13774 | #!/usr/bin/env python
"""
@package nilib
@file niforward.py
@brief CL-independent NetInf routing module
@version Copyright (C) 2013 SICS Swedish ICT AB
This is an adjunct to the NI URI library developed as
part of the SAIL project. (http://sail-project.eu)
Specification(s) - note, versions may change
- http://tools.ietf.org/html/draft-farrell-decade-ni-10
- http://tools.ietf.org/html/draft-hallambaker-decade-ni-params-03
- http://tools.ietf.org/html/draft-kutscher-icnrg-netinf-proto-00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
@code
Revision History
================
Version Date Author Notes
0.1 08/11/2013 Bengt Ahlgren Implemented default route as a start
@endcode
"""
#==============================================================================#
#=== Standard modules for Python 2.[567].x distributions ===
import os
import stat
import sys
import socket
import threading
#import itertools
import logging
#import shutil
import json
import random
import tempfile
#import re
#import time
#import datetime
#import textwrap
# try:
# from cStringIO import StringIO
# except ImportError:
# from StringIO import StringIO
# import cgi
import urllib
import urllib2
# import hashlib
# import xml.etree.ElementTree as ET
# import base64
import email.parser
import email.message
# import magic
# import DNS
# import qrcode
#=== Local package modules ===
from netinf_ver import NETINF_VER, NISERVER_VER
from ni import NIname, NIdigester, NIproc, NI_SCHEME, NIH_SCHEME, ni_errs, ni_errs_txt
from metadata import NetInfMetaData
DEBUG = True
def dprint(who, string):
"""
@brief Debug print function
"""
if DEBUG:
print "DEBUG({}): {}".format(who, string)
return
#==============================================================================#
# CONSTANTS
# Enumerate convergence layers (CLs) - used for NextHop.cl_type
NICLHTTP = 1
#NICLDTN = 2
#NICLUDP = 3
# Enumerate router features - used in set NetInfRouterCore.features
NIFWDNAME = 1 # ni-name-based forwarding
NIFWDLOOKUPHINTS = 2 # perform additional routing hint lookup
NIFWDHINT = 3 # hint-based forwarding
NIFWDDEFAULT = 4 # default forwarding
#==============================================================================#
# CLASSES
class NextHop:
"""
@brief Class for one nexthop entry
"""
def __init__(self, cl_type, nexthop_address):
self.cl_type = cl_type
self.cl_address = nexthop_address
# May want other info here, for example, pointers to methods
# for queuing a message for output, or a pointer to a CL class
# that has methods for the CL
return
class NextHopTable(dict):
"""
@brief Class for a table with nexthops, mapping an index to a nexthop entry
"""
# Choosing the dictionary type for now - may not be the best wrt
# performance? convert to list?
def __setitem__(self, index, entry):
"""
@brief add an entry to the nexthop table
@param index integer index of the entry to add
@param entry NextHop next hop entry to add
@return (none)
"""
if not isinstance(index, int):
raise TypeError("'index' needs to be of type 'int'")
if not isinstance(entry, NextHop):
raise TypeError("'entry' needs to be of type 'NextHop'")
if index in self:
dprint("NextHopTable.__setitem__", "Overwriting index {}".format(index))
dict.__setitem__(self, index, entry)
return
# Note: the type of a routing hint is assumed to be an ascii string
# There might be reasons to change to integer for the lookup in the
# forwarding table, but for now it seems simplest to just use the
# ASCII string from the GET message directly without any conversion
class HintForwardTable(dict):
"""
@brief Class for a routing hint forwarding table
"""
def __setitem__(self, hint, nexthop_index):
"""
@brief add a forwarding entry
@param hint string the routing hint to add
@param nexthop_index integer the index of the next hop to use
@return (none)
"""
if not isinstance(hint, str):
raise TypeError("'hint' needs to be of type 'str'")
if not isinstance(nexthop_index, int):
raise TypeError("'nexthop_index' needs to be of type 'int'")
if hint in self:
dprint("HintForwardTable.__setitem__",
"Overwriting entry for hint {}".format(str(hint)))
dict.__setitem__(self, hint, nexthop_index)
return
class NetInfRouterCore:
def __init__(self, config, logger, features):
self.logger = logger
self.features = features # Set of features
# Initialise next hop table and default
# TODO: get info from config instead of letting parent set things up
self.nh_table = NextHopTable()
self.nh_default = -1
return
# These lookup functions could instead for flexibility be
# implemented as part of separate classes that are configured as
# some sort of plug-ins.
def do_name_forward_lookup(self, message, meta, incoming_handle):
return [] # XXX
def do_lookup_hints(self, message, meta, incoming_handle):
pass # XXX
def do_hint_forward_lookup(self, message, meta, incoming_handle):
return [] # XXX
# This method has the main forwarding logic
def do_forward_nexthop(self, msgid, uri, ext, incoming_handle = None):
"""
@brief perform forwarding functions to select next hop(s) for the
@brief message and call CL to forward to the selected next hop(s)
@param msgid str message id of NetInf message
@param uri str uri format ni name for NDO
@param ext str ext field of NetInf message
@param incoming_handle object XXX - handle to the connection
@param receiving the message
@return bool success status
@return object the response to the message - to be returned to
@return the source of the message
"""
next_hops = []
# XXX - all but NIFWDDEFAULT very sketchy...
if NIFWDNAME in self.features: # if ni-name forwarding
next_hops = self.do_name_forward_lookup(uri, ext, incoming_handle)
# XXX - should extract hints from ext, and then add possible new hints
if (next_hops == []) and (NIFWDLOOKUPHINTS in self.features):
# can do_lookup_hints just add hints to the meta
# variable??? or should it add to the message itself (ext
# parameter)???
self.do_lookup_hints(uri, ext, incoming_handle)
if (next_hops == []) and (NIFWDHINT in self.features):
next_hops = self.do_hint_forward(uri, ext, incoming_handle)
if (next_hops == []) and (NIFWDDEFAULT in self.features):
if self.nh_default != -1:
next_hops = [ self.nh_table[self.nh_default] ]
if next_hops != []:
# we have some next hops - call appropriate CL to send
# outgoing message; need to go through some next hop
# structure that is initialised by niserver at startup
status, metadata, filename = do_get_fwd(self.logger, next_hops,
uri, ext)
return status, metadata, filename
else:
return False, None
#--------------------------------------------------------------------------#
# copied and adapted from nifwd.py. / bengta
#
# the actual forwarding should be independent from how the next hops
# are computed so that different schemes can be accomodated.
#
# TODO (later...):
# - next-hop state to reuse existing next-hop connection
# - composing and sending a message should be extracted to another
# library function (common to other code), also the router code
# needs some sort of output queues
#
def do_get_fwd(logger,nexthops,uri,ext):
"""
@brief fwd a request and wait for a response (with timeout)
@param nexthops list a list with next hops to try forwarding to
@param uri str the ni name from the GET message
@param ext str the ext field from the GET message
@return 3-tuple (bool - True if successful,
NetInfMetaData instance with object metadata
str - filename of file with NDO content)
"""
logger.info("Inside do_fwd");
metadata=None
fname=""
for nexthop in nexthops:
# send form along
logger.info("checking via %s" % nexthop.cl_address)
# Only http CL for now...
if nexthop.cl_type != NICLHTTP:
continue
# Generate NetInf form access URL
http_url = "http://%s/netinfproto/get" % nexthop.cl_address
try:
# Set up HTTP form data for get request
new_msgid = random.randint(1, 32000) # need new msgid!
form_data = urllib.urlencode({ "URI": uri,
"msgid": new_msgid,
"ext": ext})
except Exception, e:
logger.info("do_get_fwd: to %s form encoding exception: %s"
% (nexthop.cl_address,str(e)));
continue
# Send POST request to destination server
try:
# Set up HTTP form data for netinf fwd'd get request
http_object = urllib2.urlopen(http_url, form_data, 30)
except Exception, e:
logger.info("do_fwd: to %s http POST exception: %s" %
(nexthop.cl_address,str(e)));
continue
# Get HTTP result code
http_result = http_object.getcode()
# Get message headers - an instance of email.Message
http_info = http_object.info()
obj_length_str = http_info.getheader("Content-Length")
if (obj_length_str != None):
obj_length = int(obj_length_str)
else:
obj_length = None
# Read results into buffer
# Would be good to try and do this better...
# if the object is large we will run into problems here
payload = http_object.read()
http_object.close()
# The results may be either:
# - a single application/json MIME item carrying metadata of object
# - a two part multipart/mixed object with metadats and the content (of whatever type)
# Parse the MIME object
# Verify length and digest if HTTP result code was 200 - Success
if (http_result != 200):
logger.info("do_fwd: weird http status code %d" % http_result)
continue
if ((obj_length != None) and (len(payload) != obj_length)):
logger.info("do_fwd: weird lengths payload=%d and obj=%d" %
(len(payload),obj_length))
continue
buf_ct = "Content-Type: %s\r\n\r\n" % http_object.headers["content-type"]
buf = buf_ct + payload
msg = email.parser.Parser().parsestr(buf)
parts = msg.get_payload()
if msg.is_multipart():
if len(parts) != 2:
logger.info("do_fwd: funny number of parts: %d" % len(parts))
continue
json_msg = parts[0]
ct_msg = parts[1]
try:
temp_fd,fname=tempfile.mkstemp();
f = os.fdopen(temp_fd, "w")
f.write(ct_msg.get_payload())
f.close()
except Exception,e:
logger.info("do_fwd: file crap: %s" % str(e))
return True,metadata,fname
else:
json_msg = msg
ct_msg = None
# Extract JSON values from message
# Check the message is a application/json
if json_msg.get("Content-type") != "application/json":
logger.info("do_fwd: weird content type: %s" %
json_msg.get("Content-type"))
continue
# Extract the JSON structure
try:
json_report = json.loads(json_msg.get_payload())
except Exception, e:
logger.info("do_fwd: can't decode json: %s" % str(e));
continue
curi=NIname(uri)
curi.validate_ni_url()
metadata = NetInfMetaData(curi.get_canonical_ni_url())
logger.info("Metadata I got: %s" % str(json_report))
metadata.insert_resp_metadata(json_report) # will do json.loads again...
# removed GET_RES handling present in do_get_fwd in nifwd.py / bengta
# all good break out of loop
break
# make up stuff to return
# print "do_fwd: success"
if metadata is None:
return False, metadata, fname
return True,metadata,fname
#----------------------------------------------------------------------
#
# main program for testing
if __name__ == "__main__":
print len(sys.argv)
if len(sys.argv) > 1:
print int(sys.argv[1])
# well, doesn't do anything...
| apache-2.0 | -1,378,632,209,066,269,400 | 33.17866 | 94 | 0.588936 | false | 4.147546 | false | false | false |
caioariede/django-location-field | tests/test.py | 1 | 1682 | from django.test import TestCase
from django.conf import settings
from location_field.apps import DefaultConfig
from tests.models import Place
from tests.forms import LocationForm
from pyquery import PyQuery as pq
import json
import location_field
class LocationFieldTest(TestCase):
def test_plain(self):
vals = {
'city': 'Bauru',
'location': '-22.2878573,-49.0905487',
}
obj = Place.objects.create(**vals)
self.assertEqual(obj.city, 'Bauru')
self.assertEqual(obj.location, '-22.2878573,-49.0905487')
def test_settings(self):
with self.settings(LOCATION_FIELD={'map.provider': 'foobar'}):
app_config = DefaultConfig('location_field', location_field)
app_config.patch_settings()
self.assertEqual(settings.LOCATION_FIELD.get('map.provider'),
'foobar')
def test_field_options(self):
form = LocationForm(initial={})
d = pq(str(form))
opts = json.loads(d('[data-location-field-options]').attr(
'data-location-field-options'))
location_field_opts = settings.LOCATION_FIELD
for key, value in location_field_opts.items():
self.assertEqual(value, opts[key])
def test_custom_resources(self):
form = LocationForm(initial={})
self.assertIn('form.js', str(form.media))
with self.settings(LOCATION_FIELD={
'resources.media': {'js': ['foo.js', 'bar.js']}}):
self.assertIn('foo.js', str(form.media))
self.assertIn('bar.js', str(form.media))
if settings.TEST_SPATIAL:
from . import spatial_test
| mit | 3,618,242,750,608,257,500 | 27.508475 | 73 | 0.617717 | false | 3.875576 | true | false | false |
lantianlz/zx | common/capty.py | 1 | 2800 | # -*- coding: utf-8 -*-
"""This tries to do more or less the same thing as CutyCapt, but as a
python module.
This is a derived work from CutyCapt: http://cutycapt.sourceforge.net/
////////////////////////////////////////////////////////////////////
//
// CutyCapt - A Qt WebKit Web Page Rendering Capture Utility
//
// Copyright (C) 2003-2010 Bjoern Hoehrmann <[email protected]>
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// $Id$
//
////////////////////////////////////////////////////////////////////
"""
import sys
from PyQt4 import QtCore, QtGui, QtWebKit
class Capturer(object):
"""A class to capture webpages as images"""
def __init__(self, url, filename):
self.url = url
self.filename = filename
self.saw_initial_layout = False
self.saw_document_complete = False
def loadFinishedSlot(self):
self.saw_document_complete = True
if self.saw_initial_layout and self.saw_document_complete:
self.doCapture()
def initialLayoutSlot(self):
self.saw_initial_layout = True
if self.saw_initial_layout and self.saw_document_complete:
self.doCapture()
def capture(self):
"""Captures url as an image to the file specified"""
self.wb = QtWebKit.QWebPage()
self.wb.mainFrame().setScrollBarPolicy(
QtCore.Qt.Horizontal, QtCore.Qt.ScrollBarAlwaysOff)
self.wb.mainFrame().setScrollBarPolicy(
QtCore.Qt.Vertical, QtCore.Qt.ScrollBarAlwaysOff)
self.wb.loadFinished.connect(self.loadFinishedSlot)
self.wb.mainFrame().initialLayoutCompleted.connect(
self.initialLayoutSlot)
self.wb.mainFrame().load(QtCore.QUrl(self.url))
def doCapture(self):
#print "Capturando"
self.wb.setViewportSize(self.wb.mainFrame().contentsSize())
img = QtGui.QImage(self.wb.viewportSize(), QtGui.QImage.Format_ARGB32)
#print self.wb.viewportSize()
painter = QtGui.QPainter(img)
self.wb.mainFrame().render(painter)
painter.end()
img.save(self.filename)
QtCore.QCoreApplication.instance().quit()
if __name__ == "__main__":
"""Run a simple capture"""
print sys.argv
app = QtGui.QApplication(sys.argv)
c = Capturer(sys.argv[1], sys.argv[2])
c.capture()
app.exec_()
| gpl-2.0 | -537,168,439,815,580,860 | 32.333333 | 78 | 0.635 | false | 3.835616 | false | false | false |
malvikasharan/APRICOT | apricotlib/rna_related_cdd_domain.py | 1 | 5632 | #!/usr/bin/env python
# Description = RNA related CDD domain
import re
class RnaRelatedCDDSelection(object):
'''classification of data'''
def __init__(self, keywords_file,
cdd_whole_data_file,
interpro_mapped_cdd,
domain_data_path):
self._keywords_file = keywords_file
self._cdd_whole_data_file = cdd_whole_data_file
self._interpro_mapped_cdd = interpro_mapped_cdd
self._domain_data_path = domain_data_path
self.cdd_whole_data_list = []
self._cdd_dict = {}
self._mapped_cdd_members = {}
def select_cdd_domains(self):
''''''
self.read_keyword_file()
self.read_interpro_mapped_cdd_file()
self.read_cdd_whole_data_file()
self.create_rna_related_domain_file()
def read_keyword_file(self):
'''reads keywords for domain selection'''
self._keyword_list = [rna_keyword.strip()
for rna_keyword in open(
self._keywords_file, 'r')]
return self._keyword_list
def read_interpro_mapped_cdd_file(self):
'''Parses interpro cdd mapped file to extract common information'''
with open(self._interpro_mapped_cdd, 'r') as in_fh:
for entry in in_fh:
ipr_id = entry.strip().split('\t')[0]
ipr_members = entry.strip().split('\t')[1]
cdd_id = entry.strip().split('\t')[2]
cdd_member = entry.strip().split('\t')[3]
domain_length = entry.strip().split('\t')[4]
self._mapped_cdd_members[cdd_member] = ipr_members
self._mapped_cdd_members
def read_cdd_whole_data_file(self):
'''Parses CDD annotation data from table'''
with open(self._cdd_whole_data_file, 'r') as cdd_data_fh:
for cdd_entry in cdd_data_fh:
if 'smart' in cdd_entry.split('\t')[1]:
cdd_entry = cdd_entry.replace(
cdd_entry.split('\t')[1], 'SM'+cdd_entry.split(
'\t')[1].split('smart')[-1])
elif 'pfam' in cdd_entry.split('\t')[1]:
cdd_entry = cdd_entry.replace(
cdd_entry.split('\t')[1], 'PF'+cdd_entry.split(
'\t')[1].split('pfam')[-1])
cdd_domain = cdd_entry.split('\t')[1]
if cdd_domain in self._mapped_cdd_members.keys():
members = self._mapped_cdd_members[cdd_domain]
else:
members = 'NA'
self.cdd_whole_data_list.append(
cdd_entry.strip())
self._cdd_dict[cdd_entry.strip()] = cdd_entry.strip(
).split('\t')[3]
return self.cdd_whole_data_list, self._cdd_dict
def create_rna_related_domain_file(self):
'''Creates RNA related domain list'''
self._keyword_annotation_dict = {}
self._rna_related_domain = []
for cdd_entry in self.cdd_whole_data_list:
for keyword in self._keyword_list:
if ' ' in keyword:
key_list = []
for each_key in keyword.split(' '):
key_list.append(each_key)
match = re.search(r'\b%s*|\W%s\b'%(key_list[0].lower(),
key_list[1].lower()),
self._cdd_dict[cdd_entry])
if match:
self._keyword_annotation_dict.setdefault(
keyword, []).append(cdd_entry)
else:
match = re.search(r'\b%s\b'%keyword, self._cdd_dict[cdd_entry])
if match:
self._keyword_annotation_dict.setdefault(
keyword, []).append(cdd_entry)
for fkeyword in self._keyword_list:
fkeyword = fkeyword.replace(' ', '_')
with open(self._domain_data_path+'/'+fkeyword+'_related_cdd_ids.tab',
'w') as keyword_specific_domain:
if self._keyword_annotation_dict.get(fkeyword):
for each_entry in self._keyword_annotation_dict[fkeyword]:
each_entry = each_entry.replace(
each_entry.split('\t')[3], " ".join(
each_entry.split('\t')[3].split())).replace(';', ',')
cdd_domain = each_entry.split('\t')[1]
if cdd_domain in set(self._mapped_cdd_members.keys()):
members = self._mapped_cdd_members[cdd_domain]
else:
members = 'NA'
keyword_specific_domain.write('%s\t%s\t%s\n'%(
'\t'.join(each_entry.split('\t')[0:-1]), members,
each_entry.split('\t')[-1]))
self._rna_related_domain.append(('%s\t%s\t%s'%(
'\t'.join(each_entry.split('\t')[0:-1]), members,
each_entry.split('\t')[-1])))
uniq_rna_related_domains = list(set(self._rna_related_domain))
with open(self._domain_data_path+'/all_rna_related_cdd_data.tab',
'w') as rna_related_domain_file:
for domain_entry in uniq_rna_related_domains:
rna_related_domain_file.write('%s\n'%str(domain_entry))
| isc | -6,527,694,381,438,963,000 | 46.728814 | 85 | 0.47745 | false | 3.897578 | false | false | false |
foursquare/pants | contrib/node/src/python/pants/contrib/node/subsystems/node_distribution.py | 1 | 8456 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import filecmp
import logging
import os
import shutil
from pants.base.deprecated import deprecated_conditional
from pants.base.exceptions import TaskError
from pants.binaries.binary_tool import NativeTool
from pants.option.custom_types import dir_option, file_option
from pants.util.dirutil import safe_mkdir, safe_rmtree
from pants.util.memo import memoized_method, memoized_property
from pants.contrib.node.subsystems.command import command_gen
from pants.contrib.node.subsystems.package_managers import (PACKAGE_MANAGER_NPM,
PACKAGE_MANAGER_YARNPKG,
PACKAGE_MANAGER_YARNPKG_ALIAS,
VALID_PACKAGE_MANAGERS,
PackageManagerNpm,
PackageManagerYarnpkg)
from pants.contrib.node.subsystems.yarnpkg_distribution import YarnpkgDistribution
logger = logging.getLogger(__name__)
class NodeDistribution(NativeTool):
"""Represents a self-bootstrapping Node distribution."""
options_scope = 'node-distribution'
name = 'node'
default_version = 'v6.9.1'
archive_type = 'tgz'
@classmethod
def subsystem_dependencies(cls):
# Note that we use a YarnpkgDistribution scoped to the NodeDistribution, which may itself
# be scoped to a task.
return (super(NodeDistribution, cls).subsystem_dependencies() +
(YarnpkgDistribution.scoped(cls), ))
@classmethod
def register_options(cls, register):
super(NodeDistribution, cls).register_options(register)
register('--package-manager', advanced=True, default='npm', fingerprint=True,
choices=VALID_PACKAGE_MANAGERS,
help='Default package manager config for repo. Should be one of {}'.format(
VALID_PACKAGE_MANAGERS))
register('--eslint-setupdir', advanced=True, type=dir_option, fingerprint=True,
help='Find the package.json and yarn.lock under this dir '
'for installing eslint and plugins.')
register('--eslint-config', advanced=True, type=file_option, fingerprint=True,
help='The path to the global eslint configuration file specifying all the rules')
register('--eslint-ignore', advanced=True, type=file_option, fingerprint=True,
help='The path to the global eslint ignore path')
register('--eslint-version', default='4.15.0', fingerprint=True,
help='Use this ESLint version.')
@memoized_method
def _get_package_managers(self):
npm = PackageManagerNpm([self._install_node])
yarnpkg = PackageManagerYarnpkg([self._install_node, self._install_yarnpkg])
return {
PACKAGE_MANAGER_NPM: npm,
PACKAGE_MANAGER_YARNPKG: yarnpkg,
PACKAGE_MANAGER_YARNPKG_ALIAS: yarnpkg, # Allow yarn to be used as an alias for yarnpkg
}
def get_package_manager(self, package_manager=None):
package_manager = package_manager or self.get_options().package_manager
package_manager_obj = self._get_package_managers().get(package_manager)
if not package_manager_obj:
raise TaskError(
'Unknown package manager: {}.\nValid values are {}.'.format(
package_manager, list(NodeDistribution.VALID_PACKAGE_MANAGER_LIST.keys())
))
return package_manager_obj
@memoized_method
def version(self, context=None):
# The versions reported by node and embedded in distribution package names are 'vX.Y.Z'.
# TODO: After the deprecation cycle is over we'll expect the values of the version option
# to already include the 'v' prefix, so there will be no need to normalize, and we can
# delete this entire method override.
version = super(NodeDistribution, self).version(context)
deprecated_conditional(
lambda: not version.startswith('v'), entity_description='', removal_version='1.7.0.dev0',
hint_message='value of --version in scope {} must be of the form '
'vX.Y.Z'.format(self.options_scope))
return version if version.startswith('v') else 'v' + version
@classmethod
def _normalize_version(cls, version):
# The versions reported by node and embedded in distribution package names are 'vX.Y.Z' and not
# 'X.Y.Z'.
return version if version.startswith('v') else 'v' + version
@memoized_property
def eslint_setupdir(self):
return self.get_options().eslint_setupdir
@memoized_property
def eslint_version(self):
return self.get_options().eslint_version
@memoized_property
def eslint_config(self):
return self.get_options().eslint_config
@memoized_property
def eslint_ignore(self):
return self.get_options().eslint_ignore
@memoized_method
def _install_node(self):
"""Install the Node distribution from pants support binaries.
:returns: The Node distribution bin path.
:rtype: string
"""
node_package_path = self.select()
# Todo: https://github.com/pantsbuild/pants/issues/4431
# This line depends on repacked node distribution.
# Should change it from 'node/bin' to 'dist/bin'
node_bin_path = os.path.join(node_package_path, 'node', 'bin')
return node_bin_path
@memoized_method
def _install_yarnpkg(self):
"""Install the Yarnpkg distribution from pants support binaries.
:returns: The Yarnpkg distribution bin path.
:rtype: string
"""
yarnpkg_package_path = YarnpkgDistribution.scoped_instance(self).select()
yarnpkg_bin_path = os.path.join(yarnpkg_package_path, 'dist', 'bin')
return yarnpkg_bin_path
def node_command(self, args=None, node_paths=None):
"""Creates a command that can run `node`, passing the given args to it.
:param list args: An optional list of arguments to pass to `node`.
:param list node_paths: An optional list of paths to node_modules.
:returns: A `node` command that can be run later.
:rtype: :class:`NodeDistribution.Command`
"""
# NB: We explicitly allow no args for the `node` command unlike the `npm` command since running
# `node` with no arguments is useful, it launches a REPL.
return command_gen([self._install_node], 'node', args=args, node_paths=node_paths)
def _configure_eslinter(self, bootstrapped_support_path):
logger.debug('Copying {setupdir} to bootstrapped dir: {support_path}'
.format(setupdir=self.eslint_setupdir,
support_path=bootstrapped_support_path))
safe_rmtree(bootstrapped_support_path)
shutil.copytree(self.eslint_setupdir, bootstrapped_support_path)
return True
_eslint_required_files = ['yarn.lock', 'package.json']
def eslint_supportdir(self, task_workdir):
""" Returns the path where the ESLint is bootstrapped.
:param string task_workdir: The task's working directory
:returns: The path where ESLint is bootstrapped and whether or not it is configured
:rtype: (string, bool)
"""
bootstrapped_support_path = os.path.join(task_workdir, 'eslint')
# TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir
# assuming fingerprinting works as intended.
# If the eslint_setupdir is not provided or missing required files, then
# clean up the directory so that Pants can install a pre-defined eslint version later on.
# Otherwise, if there is no configurations changes, rely on the cache.
# If there is a config change detected, use the new configuration.
if self.eslint_setupdir:
configured = all(os.path.exists(os.path.join(self.eslint_setupdir, f))
for f in self._eslint_required_files)
else:
configured = False
if not configured:
safe_mkdir(bootstrapped_support_path, clean=True)
else:
try:
installed = all(filecmp.cmp(
os.path.join(self.eslint_setupdir, f), os.path.join(bootstrapped_support_path, f))
for f in self._eslint_required_files)
except OSError:
installed = False
if not installed:
self._configure_eslinter(bootstrapped_support_path)
return bootstrapped_support_path, configured
| apache-2.0 | 5,834,333,962,453,197,000 | 41.492462 | 99 | 0.680345 | false | 3.955098 | true | false | false |
moloney/pathmap | tests/test_pathmap.py | 1 | 8162 | import nose
from nose.tools import *
from tempfile import mkdtemp
import shutil, os, sys
from os.path import join, split
#Make sure we test the local source code rather than the installed copy
test_dir = os.path.dirname(__file__)
src_dir = os.path.normpath(os.path.join(test_dir, '..'))
sys.path.insert(0, src_dir)
import pathmap
class TestMakeRegexRule():
known_results = {'.+':
{'hello': ['hello'],
'something.txt': ['something.txt'],
'': pathmap.NoMatch,
},
'(.+)\.(.+)':
{'something.txt': ['something.txt',
'something',
'txt'
],
'image_001.dcm': ['image_001.dcm',
'image_001',
'dcm'
],
'something': pathmap.NoMatch,
},
'image_([0-9]+)\.dcm':
{'image_001.dcm': ['image_001.dcm',
'001'
],
'image_1.dcm': ['image_1.dcm',
'1'
],
'image_one.dcm': pathmap.NoMatch,
'image_001.dc': pathmap.NoMatch,
}
}
def test_known_results(self):
for match_regex, tests in self.known_results.iteritems():
match_rule = pathmap.make_regex_rule(match_regex)
for input_str, results in tests.iteritems():
assert(match_rule(input_str, None) == results)
def build_dir(base_dir, paths_at_level):
for level in paths_at_level:
for path in level:
if split(path)[1].split('-')[-1].startswith('dir'):
os.mkdir(join(base_dir, path))
else:
tmpfile = open(join(base_dir, path), 'a')
tmpfile.close()
class TestSimpleRules():
paths_at_level = [['level0-dir'],
[join('level0-dir', 'level1-file1'),
join('level0-dir', 'level1-file2'),
join('level0-dir', 'level1-dir1'),
join('level0-dir', 'level1-dir2'),
],
[join('level0-dir', 'level1-dir1', 'level2-file1'),
join('level0-dir', 'level1-dir1', 'level2-file2'),
join('level0-dir', 'level1-dir1', 'level2-dir1'),
join('level0-dir', 'level1-dir2', 'level2-dir2'),
join('level0-dir', 'level1-dir2', 'level2-file3')
],
[join('level0-dir', 'level1-dir1', 'level2-dir1',
'level3-file1'),
join('level0-dir', 'level1-dir1', 'level2-dir1',
'level3-dir1'),
],
[join('level0-dir', 'level1-dir1', 'level2-dir1',
'level3-dir1', 'level4-file1'),
],
]
def setup(self):
self.init_dir = os.getcwd()
self.test_dir = mkdtemp()
build_dir(self.test_dir, self.paths_at_level)
os.chdir(self.test_dir)
def tearDown(self):
os.chdir(self.init_dir)
shutil.rmtree(self.test_dir)
def test_min_depth(self):
for i in range(len(self.paths_at_level)):
pm = pathmap.PathMap(depth=(i, None))
matches = list(pm.matches('level0-dir'))
total_paths = 0
for j in range(i, len(self.paths_at_level)):
total_paths += len(self.paths_at_level[j])
for path in self.paths_at_level[j]:
assert(any(path == m.path for m in matches))
if len(matches) != total_paths:
print i
print [m.path for m in matches]
assert(len(matches) == total_paths)
def test_max_depth(self):
for i in range(len(self.paths_at_level)):
pm = pathmap.PathMap(depth=(0, i))
matches = list(pm.matches('level0-dir'))
total_paths = 0
for j in range(0, i+1):
total_paths += len(self.paths_at_level[j])
for path in self.paths_at_level[j]:
assert(any(path == m.path for m in matches))
assert(len(matches) == total_paths)
def test_match_regex(self):
for i in range(len(self.paths_at_level)):
pm = pathmap.PathMap('level' + str(i))
matches = list(pm.matches('level0-dir'))
for j in range(i, len(self.paths_at_level)):
for path in self.paths_at_level[j]:
path = os.path.normpath(path)
assert(any(['level' + str(i)] == m.match_info
for m in matches)
)
def test_ignore_regex(self):
pm = pathmap.PathMap(ignore_rules=['level0'])
matches = list(pm.matches('level0-dir'))
assert(len(matches) == 0)
for i in range(1, len(self.paths_at_level)):
pm = pathmap.PathMap(ignore_rules=['level' + str(i)])
matches = list(pm.matches('level0-dir'))
for j in range(0, i):
for path in self.paths_at_level[j]:
path = os.path.normpath(path)
assert(any(path == m.path for m in matches))
def test_ignore_regexes(self):
ignore_rules = ['level2-file1', '.+'+os.sep+'level3-dir1$']
pm = pathmap.PathMap(ignore_rules=ignore_rules)
for match_result in pm.matches('level0-dir'):
assert(not os.path.basename(match_result.path) in
['level2-file1', 'level3-dir1'])
def test_prune_regex(self):
pm = pathmap.PathMap(prune_rules=['level0-dir'])
matches = list(pm.matches('level0-dir'))
assert(len(matches) == 1)
assert(matches[0].path == 'level0-dir')
prune_rule = 'level2-dir1'
pm = pathmap.PathMap(prune_rules=[prune_rule])
for match_result in pm.matches('level0-dir'):
idx = match_result.path.find(prune_rule)
if idx != -1:
assert(all(x != os.sep for x in match_result.path[idx:]))
def test_prune_regexes(self):
prune_rules = ['level1-dir2', 'level3-dir1']
pm = pathmap.PathMap(prune_rules=prune_rules)
for match_result in pm.matches('level0-dir'):
for rule in prune_rules:
idx = match_result.path.find(rule)
if idx != -1:
assert(all(x != os.sep for x in match_result.path[idx:]))
class TestSorting():
paths_at_level = [['c-dir', 'a-dir', 'd-file', 'b-file'],
[join('c-dir', 'g-file'),
join('c-dir', 'f-file'),
join('a-dir', 'y-file'),
join('a-dir', 'x-file'),
],
]
dfs_sorted = ['.',
join('.', 'a-dir'),
join('.', 'b-file'),
join('.', 'c-dir'),
join('.', 'd-file'),
join('.', 'a-dir', 'x-file'),
join('.', 'a-dir', 'y-file'),
join('.', 'c-dir', 'f-file'),
join('.', 'c-dir', 'g-file'),
]
def setup(self):
self.init_dir = os.getcwd()
self.test_dir = mkdtemp()
build_dir(self.test_dir, self.paths_at_level)
os.chdir(self.test_dir)
def tearDown(self):
os.chdir(self.init_dir)
shutil.rmtree(self.test_dir)
def test_sorting(self):
pm = pathmap.PathMap(sort=True)
matched_paths = [m.path for m in pm.matches('.')]
assert matched_paths == self.dfs_sorted
| mit | 4,489,761,024,084,811,300 | 37.140187 | 77 | 0.451237 | false | 3.890372 | true | false | false |
ojengwa/grr | lib/worker.py | 1 | 15836 | #!/usr/bin/env python
"""Module with GRRWorker implementation."""
import pdb
import time
import traceback
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import master
from grr.lib import queue_manager as queue_manager_lib
from grr.lib import queues as queues_config
from grr.lib import rdfvalue
from grr.lib import registry
# pylint: disable=unused-import
from grr.lib import server_stubs
# pylint: enable=unused-import
from grr.lib import stats
from grr.lib import threadpool
from grr.lib import utils
class Error(Exception):
"""Base error class."""
class FlowProcessingError(Error):
"""Raised when flow requests/responses can't be processed."""
class GRRWorker(object):
"""A GRR worker."""
# time to wait before polling when no jobs are currently in the
# task scheduler (sec)
POLLING_INTERVAL = 2
SHORT_POLLING_INTERVAL = 0.3
SHORT_POLL_TIME = 30
# target maximum time to spend on RunOnce
RUN_ONCE_MAX_SECONDS = 300
# A class global threadpool to be used for all workers.
thread_pool = None
# This is a timed cache of locked flows. If this worker encounters a lock
# failure on a flow, it will not attempt to grab this flow until the timeout.
queued_flows = None
def __init__(self, queues=queues_config.WORKER_LIST,
threadpool_prefix="grr_threadpool",
threadpool_size=None, token=None):
"""Constructor.
Args:
queues: The queues we use to fetch new messages from.
threadpool_prefix: A name for the thread pool used by this worker.
threadpool_size: The number of workers to start in this thread pool.
token: The token to use for the worker.
Raises:
RuntimeError: If the token is not provided.
"""
logging.info("started worker with queues: " + str(queues))
self.queues = queues
self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)
if token is None:
raise RuntimeError("A valid ACLToken is required.")
# Make the thread pool a global so it can be reused for all workers.
if GRRWorker.thread_pool is None:
if threadpool_size is None:
threadpool_size = config_lib.CONFIG["Threadpool.size"]
GRRWorker.thread_pool = threadpool.ThreadPool.Factory(
threadpool_prefix, min_threads=2, max_threads=threadpool_size)
GRRWorker.thread_pool.Start()
self.token = token
self.last_active = 0
# Well known flows are just instantiated.
self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(token=token)
self.flow_lease_time = config_lib.CONFIG["Worker.flow_lease_time"]
self.well_known_flow_lease_time = config_lib.CONFIG[
"Worker.well_known_flow_lease_time"]
def Run(self):
"""Event loop."""
try:
while 1:
if master.MASTER_WATCHER.IsMaster():
processed = self.RunOnce()
else:
processed = 0
if processed == 0:
if time.time() - self.last_active > self.SHORT_POLL_TIME:
interval = self.POLLING_INTERVAL
else:
interval = self.SHORT_POLLING_INTERVAL
time.sleep(interval)
else:
self.last_active = time.time()
except KeyboardInterrupt:
logging.info("Caught interrupt, exiting.")
self.thread_pool.Join()
def RunOnce(self):
"""Processes one set of messages from Task Scheduler.
The worker processes new jobs from the task master. For each job
we retrieve the session from the Task Scheduler.
Returns:
Total number of messages processed by this call.
"""
start_time = time.time()
processed = 0
queue_manager = queue_manager_lib.QueueManager(token=self.token)
for _ in range(0, queue_manager.num_notification_shards):
for queue in self.queues:
# Freezeing the timestamp used by queue manager to query/delete
# notifications to avoid possible race conditions.
queue_manager.FreezeTimestamp()
fetch_messages_start = time.time()
notifications_by_priority = queue_manager.GetNotificationsByPriority(
queue)
stats.STATS.RecordEvent("worker_time_to_retrieve_notifications",
time.time() - fetch_messages_start)
# Process stuck flows first
stuck_flows = notifications_by_priority.pop(
queue_manager.STUCK_PRIORITY, [])
if stuck_flows:
self.ProcessStuckFlows(stuck_flows, queue_manager)
notifications_available = []
for priority in sorted(notifications_by_priority, reverse=True):
for notification in notifications_by_priority[priority]:
# Filter out session ids we already tried to lock but failed.
if notification.session_id not in self.queued_flows:
notifications_available.append(notification)
try:
# If we spent too much time processing what we have so far, the
# active_sessions list might not be current. We therefore break here
# so we can re-fetch a more up to date version of the list, and try
# again later. The risk with running with an old active_sessions list
# is that another worker could have already processed this message,
# and when we try to process it, there is nothing to do - costing us a
# lot of processing time. This is a tradeoff between checking the data
# store for current information and processing out of date
# information.
processed += self.ProcessMessages(notifications_available,
queue_manager,
self.RUN_ONCE_MAX_SECONDS -
(time.time() - start_time))
# We need to keep going no matter what.
except Exception as e: # pylint: disable=broad-except
logging.error("Error processing message %s. %s.", e,
traceback.format_exc())
stats.STATS.IncrementCounter("grr_worker_exceptions")
if flags.FLAGS.debug:
pdb.post_mortem()
queue_manager.UnfreezeTimestamp()
# If we have spent too much time, stop.
if (time.time() - start_time) > self.RUN_ONCE_MAX_SECONDS:
return processed
return processed
def ProcessStuckFlows(self, stuck_flows, queue_manager):
stats.STATS.IncrementCounter("grr_flows_stuck", len(stuck_flows))
for stuck_flow in stuck_flows:
try:
flow.GRRFlow.TerminateFlow(
stuck_flow.session_id, reason="Stuck in the worker",
status=rdfvalue.GrrStatus.ReturnedStatus.WORKER_STUCK,
force=True, token=self.token)
except Exception: # pylint: disable=broad-except
logging.exception("Error terminating stuck flow: %s", stuck_flow)
finally:
# Remove notifications for this flow. This will also remove the
# "stuck flow" notification itself.
queue_manager.DeleteNotification(stuck_flow.session_id)
def ProcessMessages(self, active_notifications, queue_manager, time_limit=0):
"""Processes all the flows in the messages.
Precondition: All tasks come from the same queue.
Note that the server actually completes the requests in the
flow when receiving the messages from the client. We do not really
look at the messages here at all any more - we just work from the
completed messages in the flow RDFValue.
Args:
active_notifications: The list of notifications.
queue_manager: QueueManager object used to manage notifications,
requests and responses.
time_limit: If set return as soon as possible after this many seconds.
Returns:
The number of processed flows.
"""
now = time.time()
processed = 0
for notification in active_notifications:
if notification.session_id not in self.queued_flows:
if time_limit and time.time() - now > time_limit:
break
processed += 1
self.queued_flows.Put(notification.session_id, 1)
self.thread_pool.AddTask(target=self._ProcessMessages,
args=(notification,
queue_manager.Copy()),
name=self.__class__.__name__)
return processed
def _ProcessRegularFlowMessages(self, flow_obj, notification):
"""Processes messages for a given flow."""
session_id = notification.session_id
if not isinstance(flow_obj, flow.GRRFlow):
logging.warn("%s is not a proper flow object (got %s)", session_id,
type(flow_obj))
stats.STATS.IncrementCounter("worker_bad_flow_objects",
fields=[str(type(flow_obj))])
raise FlowProcessingError("Not a GRRFlow.")
runner = flow_obj.GetRunner()
if runner.schedule_kill_notifications:
# Create a notification for the flow in the future that
# indicates that this flow is in progess. We'll delete this
# notification when we're done with processing completed
# requests. If we're stuck for some reason, the notification
# will be delivered later and the stuck flow will get
# terminated.
stuck_flows_timeout = rdfvalue.Duration(
config_lib.CONFIG["Worker.stuck_flows_timeout"])
kill_timestamp = (rdfvalue.RDFDatetime().Now() +
stuck_flows_timeout)
with queue_manager_lib.QueueManager(token=self.token) as manager:
manager.QueueNotification(session_id=session_id,
in_progress=True,
timestamp=kill_timestamp)
# kill_timestamp may get updated via flow.HeartBeat() calls, so we
# have to store it in the runner context.
runner.context.kill_timestamp = kill_timestamp
try:
runner.ProcessCompletedRequests(notification, self.thread_pool)
# Something went wrong - log it in the flow.
except Exception as e: # pylint: disable=broad-except
runner.context.state = rdfvalue.Flow.State.ERROR
runner.context.backtrace = traceback.format_exc()
logging.error("Flow %s: %s", flow_obj, e)
raise FlowProcessingError(e)
finally:
# Delete kill notification as the flow got processed and is not
# stuck.
with queue_manager_lib.QueueManager(token=self.token) as manager:
if runner.schedule_kill_notifications:
manager.DeleteNotification(
session_id, start=runner.context.kill_timestamp,
end=runner.context.kill_timestamp)
runner.context.kill_timestamp = None
if (runner.process_requests_in_order and
notification.last_status and
(runner.context.next_processed_request <=
notification.last_status)):
# We are processing requests in order and have received a
# notification for a specific request but could not process
# that request. This might be a race condition in the data
# store so we reschedule the notification in the future.
delay = config_lib.CONFIG[
"Worker.notification_retry_interval"]
manager.QueueNotification(
notification, timestamp=notification.timestamp + delay)
def _ProcessMessages(self, notification, queue_manager):
"""Does the real work with a single flow."""
flow_obj = None
session_id = notification.session_id
try:
# Take a lease on the flow:
flow_name = session_id.FlowName()
if flow_name in self.well_known_flows:
# Well known flows are not necessarily present in the data store so
# we need to create them instead of opening.
expected_flow = self.well_known_flows[flow_name].__class__.__name__
flow_obj = aff4.FACTORY.CreateWithLock(
session_id, expected_flow,
lease_time=self.well_known_flow_lease_time,
blocking=False, token=self.token)
else:
flow_obj = aff4.FACTORY.OpenWithLock(
session_id, lease_time=self.flow_lease_time,
blocking=False, token=self.token)
now = time.time()
logging.debug("Got lock on %s", session_id)
# If we get here, we now own the flow. We can delete the notifications
# we just retrieved but we need to make sure we don't delete any that
# came in later.
queue_manager.DeleteNotification(session_id, end=notification.timestamp)
if flow_name in self.well_known_flows:
stats.STATS.IncrementCounter("well_known_flow_requests",
fields=[str(session_id)])
# We remove requests first and then process them in the thread pool.
# On one hand this approach increases the risk of losing requests in
# case the worker process dies. On the other hand, it doesn't hold
# the lock while requests are processed, so other workers can
# process well known flows requests as well.
with flow_obj:
responses = flow_obj.FetchAndRemoveRequestsAndResponses(session_id)
flow_obj.ProcessResponses(responses, self.thread_pool)
else:
with flow_obj:
self._ProcessRegularFlowMessages(flow_obj, notification)
elapsed = time.time() - now
logging.debug("Done processing %s: %s sec", session_id, elapsed)
stats.STATS.RecordEvent("worker_flow_processing_time", elapsed,
fields=[flow_obj.Name()])
# Everything went well -> session can be run again.
self.queued_flows.ExpireObject(session_id)
except aff4.LockError:
# Another worker is dealing with this flow right now, we just skip it.
# We expect lots of these when there are few messages (the system isn't
# highly loaded) but it is interesting when the system is under load to
# know if we are pulling the optimal number of messages off the queue.
# A high number of lock fails when there is plenty of work to do would
# indicate we are wasting time trying to process work that has already
# been completed by other workers.
stats.STATS.IncrementCounter("worker_flow_lock_error")
except FlowProcessingError:
# Do nothing as we expect the error to be correctly logged and accounted
# already.
pass
except Exception as e: # pylint: disable=broad-except
# Something went wrong when processing this session. In order not to spin
# here, we just remove the notification.
logging.exception("Error processing session %s: %s", session_id, e)
stats.STATS.IncrementCounter("worker_session_errors",
fields=[str(type(e))])
queue_manager.DeleteNotification(session_id)
class WorkerInit(registry.InitHook):
"""Registers worker stats variables."""
pre = ["StatsInit"]
def RunOnce(self):
"""Exports the vars.."""
stats.STATS.RegisterCounterMetric("grr_flows_stuck")
stats.STATS.RegisterCounterMetric("worker_bad_flow_objects",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric("worker_session_errors",
fields=[("type", str)])
stats.STATS.RegisterCounterMetric(
"worker_flow_lock_error", docstring=("Worker lock failures. We expect "
"these to be high when the system"
"is idle."))
stats.STATS.RegisterEventMetric("worker_flow_processing_time",
fields=[("flow", str)])
stats.STATS.RegisterEventMetric("worker_time_to_retrieve_notifications")
| apache-2.0 | -2,781,089,918,829,109,000 | 38.393035 | 80 | 0.64606 | false | 4.311462 | true | false | false |
frhumanes/consulting | web/src/private_messages/templatetags/messages.py | 1 | 2744 | # -*- encoding: utf-8 -*-
from django.core.urlresolvers import reverse
from django import template
from private_messages.models import Message
from django.utils.translation import ugettext_lazy as _
register = template.Library()
@register.filter('unread_messages')
def get_unread_messages(user):
if user is None:
unread_messages = 0
else:
unread_messages = Message.objects.get_inbox_for_user(user)\
.filter(unread=True).count()
return unread_messages
@register.simple_tag
def get_header(request):
if reverse('private_messages_inbox') in request.path:
return _('Bandeja de entrada')
elif reverse('private_messages_outbox') in request.path:
return _('Enviados')
else:
return ''
@register.simple_tag
def is_sent_url(request):
if reverse('private_messages_outbox') == request.path:
return True
else:
return False
@register.tag('match_url')
def do_match_url(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, page_url, _as_, var_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument"\
% token.contents.split()[0])
if not (page_url[0] == page_url[-1] and page_url[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be\
in quotes" % tag_name)
return MatchUrl(page_url[1:-1], var_name)
class MatchUrl(template.Node):
def __init__(self, page_url, var_name):
self.page_url = page_url
self.var_name = var_name
def render(self, context):
request = context['request']
context[self.var_name] = reverse(self.page_url) == request.path
return ''
@register.tag('match_referer')
def do_match_referer(parser, token):
try:
# split_contents() knows not to split quoted strings.
tag_name, page_url, _as_, var_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires a single argument"\
% token.contents.split()[0])
if not (page_url[0] == page_url[-1] and page_url[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's argument should be\
in quotes" % tag_name)
return MatchReferer(page_url[1:-1], var_name)
class MatchReferer(template.Node):
def __init__(self, referer_name, var_name):
self.referer_name = referer_name
self.var_name = var_name
def render(self, context):
request = context['request']
referer_url = request.META.get('HTTP_REFERER')
context[self.var_name] = self.referer_name in referer_url
return ''
| apache-2.0 | 669,657,954,398,505,200 | 29.153846 | 79 | 0.638848 | false | 3.73842 | false | false | false |
AmesianX/amoco | amoco/arch/x64/formats.py | 5 | 1941 | # -*- coding: utf-8 -*-
from amoco.arch.core import Formatter
def pfx(i):
if i.misc['pfx'] is None: return ''
pfxgrp0 = i.misc['pfx'][0]
if pfxgrp0 is None: return ''
return '%s '%pfxgrp0
def mnemo(i):
mnemo = i.mnemonic.replace('cc','')
if hasattr(i,'cond'): mnemo += i.cond[0].split('/')[0]
return '{: <12}'.format(mnemo.lower())
def deref(op):
if not op._is_mem: return str(op)
d = '%+d'%op.a.disp if op.a.disp else ''
s = {8:'byte ptr ',16:'word ptr ',32:'dword ptr ', 128:'xmmword ptr '}.get(op.size,'')
s += '%s:'%op.a.seg if op.a.seg is not '' else ''
s += '[%s%s]'%(op.a.base,d)
return s
def opers(i):
s = []
for op in i.operands:
if op._is_mem:
s.append(deref(op))
continue
elif op._is_cst:
if i.misc['imm_ref'] is not None:
s.append(str(i.misc['imm_ref']))
continue
elif op.sf:
s.append('%+d'%op.value)
continue
# default:
s.append(str(op))
return ', '.join(s)
def oprel(i):
to = i.misc['to']
if to is not None: return '*'+str(to)
if (i.address is not None) and i.operands[0]._is_cst:
v = i.address + i.operands[0].signextend(64) + i.length
i.misc['to'] = v
return '*'+str(v)
return '.%+d'%i.operands[0].value
# main intel formats:
format_intel_default = (mnemo,opers)
format_intel_ptr = (mnemo,opers)
format_intel_str = (pfx,mnemo,opers)
format_intel_rel = (mnemo,oprel)
# formats:
IA32e_Intel_formats = {
'ia32_strings' : format_intel_str,
'ia32_mov_adr' : format_intel_ptr,
'ia32_ptr_ib' : format_intel_ptr,
'ia32_ptr_iwd' : format_intel_ptr,
'ia32_rm8' : format_intel_ptr,
'ia32_rm32' : format_intel_ptr,
'ia32_imm_rel' : format_intel_rel,
}
IA32e_Intel = Formatter(IA32e_Intel_formats)
IA32e_Intel.default = format_intel_default
| gpl-2.0 | -5,184,224,687,456,694,000 | 26.338028 | 90 | 0.551777 | false | 2.733803 | false | false | false |
ChrisLR/Python-Roguelike-Template | combat/attacks/unarmed/base.py | 1 | 2256 | import abilities
from combat import targets
from combat.attacks.base import Attack
from combat.enums import DamageType
from echo import functions
from stats.enums import StatsEnum
from util import check_roller, dice
class Punch(Attack):
name = "Punch"
target_type = targets.Single
description = "Basic unarmed attack."
actor_message = "You swing your fist at {defender}"
observer_message = "{attacker} swings {attacker_his} fist at {defender}"
@classmethod
def can_execute(cls, attack_context):
attacker = attack_context.attacker
if attack_context.distance_to <= 1:
attacker_body = attacker.body
if attacker_body:
return bool(attacker_body.get_ability(abilities.Punch))
return False
@classmethod
def execute(cls, attack_context):
attacker = attack_context.attacker
defender = attack_context.defender
hit_modifier = attacker.stats.strength.modifier
attack_result = cls.make_hit_roll(attack_context, hit_modifier)
attack_result.attack_message = cls.get_message(attacker, defender)
attack_result.context.attacker_weapon = "fist"
cls.make_damage_roll(attack_result, hit_modifier)
return attack_result,
@classmethod
def make_damage_roll(cls, attack_result, str_modifier):
melee_damage_dice = cls.get_melee_damage_dice(attack_result.context.attacker)
total_damage = check_roller.roll_damage(
dice_stacks=(melee_damage_dice,),
modifiers=str_modifier,
critical=attack_result.critical
)
attack_result.total_damage = total_damage
attack_result.separated_damage = [(total_damage, DamageType.Blunt)]
return attack_result
@classmethod
def get_melee_damage_dice(cls, actor):
return dice.DiceStack(1, dice.D1)
@classmethod
def get_message(cls, actor, target):
if actor.is_player:
return cls.actor_message.format(defender=target.name)
else:
return cls.observer_message.format(
attacker=actor.name,
attacker_his=functions.his_her_it(actor),
defender=functions.name_or_you(target)
)
| mit | 7,263,794,132,409,718,000 | 33.181818 | 85 | 0.657358 | false | 3.772575 | false | false | false |
brianjmiller/TinCanPython | test/activitydefinition_test.py | 2 | 9485 | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
if __name__ == '__main__':
from main import setup_tincan_path
setup_tincan_path()
from tincan import (
ActivityDefinition,
LanguageMap,
InteractionComponentList,
)
class ActivityDefinitionTest(unittest.TestCase):
def test_InitEmpty(self):
adef = ActivityDefinition()
self.assertEqual(vars(adef), {
'_choices': None,
'_correct_responses_pattern': None,
'_description': None,
'_extensions': None,
'_interaction_type': None,
'_more_info': None,
'_name': None,
'_scale': None,
'_source': None,
'_steps': None,
'_target': None,
'_type': None
})
self.assertIsInstance(adef, ActivityDefinition)
def test_InitAll(self):
adef = ActivityDefinition({
'name': {'en-US': 'test'},
'description': {'en-US': 'test'},
'type': 'test',
'more_info': 'test',
'interaction_type': 'choice',
'correct_responses_pattern': ['test'],
'choices': InteractionComponentList(),
'scale': InteractionComponentList(),
'source': InteractionComponentList(),
'target': InteractionComponentList(),
'steps': InteractionComponentList(),
'extensions': {'test': 'test'}
})
self.definitionVerificationHelper(adef)
def test_InitExceptionType(self):
with self.assertRaises(ValueError):
ActivityDefinition(type='')
def test_InitExceptionMoreInfo(self):
with self.assertRaises(ValueError):
ActivityDefinition(more_info='')
def test_InitExceptionInteractionType(self):
with self.assertRaises(ValueError):
ActivityDefinition(interaction_type='notvalidinteraction')
def test_InitExceptionCorrectResponsesPattern(self):
with self.assertRaises(TypeError):
ActivityDefinition(correct_responses_pattern='notlist')
def test_InitExceptionChoices(self):
with self.assertRaises(TypeError):
ActivityDefinition(choices='notlist')
def test_InitExceptionChoicesNotComponentList(self):
with self.assertRaises(TypeError):
ActivityDefinition(choices=['not component'])
def test_InitExceptionScale(self):
with self.assertRaises(TypeError):
ActivityDefinition(scale='notlist')
def test_InitExceptionScaleNotComponentList(self):
with self.assertRaises(TypeError):
ActivityDefinition(scale=['not component'])
def test_InitExceptionSource(self):
with self.assertRaises(TypeError):
ActivityDefinition(source='notlist')
def test_InitExceptionSourceNotComponentList(self):
with self.assertRaises(TypeError):
ActivityDefinition(source=['not component'])
def test_InitExceptionTarget(self):
with self.assertRaises(TypeError):
ActivityDefinition(target='notlist')
def test_InitExceptionTargetNotComponentList(self):
with self.assertRaises(TypeError):
ActivityDefinition(target=['not component'])
def test_InitExceptionSteps(self):
with self.assertRaises(TypeError):
ActivityDefinition(steps='notlist')
def test_InitExceptionStepsNotComponentList(self):
with self.assertRaises(TypeError):
ActivityDefinition(steps=['not component'])
def test_InitUnpack(self):
obj = {
'name': {'en-US': 'test'},
'description': {'en-US': 'test'},
'type': 'test',
'more_info': 'test',
'interaction_type': 'choice',
'correct_responses_pattern': ['test'],
'choices': InteractionComponentList(),
'scale': InteractionComponentList(),
'source': InteractionComponentList(),
'target': InteractionComponentList(),
'steps': InteractionComponentList(),
'extensions': {'test': 'test'}
}
adef = ActivityDefinition(**obj)
self.definitionVerificationHelper(adef)
def test_FromJSONExceptionBadJSON(self):
with self.assertRaises(ValueError):
ActivityDefinition.from_json('{"bad JSON"}')
def test_FromJSONExceptionMalformedJSON(self):
with self.assertRaises(AttributeError):
ActivityDefinition.from_json('{"test": "invalid property"}')
def test_FromJSONExceptionPartiallyMalformedJSON(self):
with self.assertRaises(AttributeError):
ActivityDefinition.from_json('{"test": "invalid property", "id": \
"valid property"}')
def test_FromJSONExceptionEmpty(self):
with self.assertRaises(ValueError):
ActivityDefinition.from_json('')
def test_FromJSON(self):
json_str = '{"name":{"en-US":"test"},\
"description":{"en-US":"test"},\
"type":"test",\
"more_info":"test",\
"interaction_type":"choice",\
"correct_responses_pattern": ["test"],\
"choices": [], "scale": [], "source": [], "target": [], "steps": [],\
"extensions": {"test": "test"}}'
adef = ActivityDefinition.from_json(json_str)
self.definitionVerificationHelper(adef)
def test_AsVersionEmpty(self):
adef = ActivityDefinition()
adef2 = adef.as_version()
self.assertEqual(adef2, {})
def test_AsVersion(self):
adef = ActivityDefinition({
'description': {'en-US': 'test'},
'name': {'en-US': 'test'},
'type': 'test',
'more_info': 'test',
'interaction_type': 'choice',
'correct_responses_pattern': ['test'],
'choices': InteractionComponentList(),
'scale': InteractionComponentList(),
'source': InteractionComponentList(),
'target': InteractionComponentList(),
'steps': InteractionComponentList(),
'extensions': {'test': 'test'}
})
adef2 = adef.as_version()
self.assertEqual(adef2, {
"name": {"en-US": "test"},
"correctResponsesPattern": ["test"],
"scale": [],
"description": {"en-US": "test"},
"choices": [],
"source": [],
"steps": [],
"moreInfo": "test",
"extensions": {"test": "test"},
"interactionType": "choice",
"target": [],
"type": "test",
})
def test_AsVersionIgnoreNone(self):
adef = ActivityDefinition({
'description': {'en-US': 'test'},
'more_info': None
})
self.assertEqual(adef.description, {'en-US': 'test'})
self.assertIsNone(adef.more_info)
adef2 = adef.as_version()
self.assertEqual(adef2, {'description': {'en-US': 'test'}})
def test_ToJSONIgnoreNone(self):
adef = ActivityDefinition({
'description': {'en-US': 'test'},
'more_info': None
})
self.assertEqual(adef.to_json(), '{"description": {"en-US": "test"}}')
def test_ToJSONEmpty(self):
adef = ActivityDefinition()
self.assertEqual(adef.to_json(), '{}')
def definitionVerificationHelper(self, definition):
check_map = LanguageMap({'en-US': 'test'})
check_string = 'test'
self.assertIsInstance(definition.name, LanguageMap)
self.assertEqual(definition.name, check_map)
self.assertIsInstance(definition.description, LanguageMap)
self.assertEqual(definition.description, check_map)
self.assertEqual(definition.type, check_string)
self.assertEqual(definition.more_info, check_string)
self.assertEqual(definition.interaction_type, 'choice')
self.assertIn(definition.interaction_type, ActivityDefinition._interaction_types)
self.assertEqual(definition.correct_responses_pattern, ['test'])
self.assertEqual(definition.choices, [])
self.assertIsInstance(definition.choices, InteractionComponentList)
self.assertEqual(definition.scale, [])
self.assertIsInstance(definition.scale, InteractionComponentList)
self.assertEqual(definition.source, [])
self.assertIsInstance(definition.source, InteractionComponentList)
self.assertEqual(definition.target, [])
self.assertIsInstance(definition.target, InteractionComponentList)
self.assertEqual(definition.steps, [])
self.assertIsInstance(definition.steps, InteractionComponentList)
self.assertEqual(definition.extensions, {"test": "test"})
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(ActivityDefinitionTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | 6,283,205,923,333,592,000 | 36.788845 | 89 | 0.610648 | false | 4.495261 | true | false | false |
vascotenner/holoviews | holoviews/plotting/mpl/hooks.py | 1 | 9173 | import copy
import numpy as np
from matplotlib import pyplot as plt
try:
from mpld3 import plugins
except:
plugins = None
import param
from ...core import NdOverlay, Overlay
from ...element import HeatMap, Raster, Scatter, Curve, Points, Bars, Histogram
from . import CurvePlot, PointPlot, OverlayPlot, RasterPlot, HistogramPlot, BarPlot
class PlottingHook(param.ParameterizedFunction):
"""
PlottingHooks can be used to extend the default functionality
of HoloViews. Each ElementPlot type can be provided with a list of
hooks to apply at the end of plotting. The PlottingHook is provided
with ElementPlot instance, which gives access to the figure, axis
and artists via the handles, and the Element currently displayed
by the Plot. Since each Plot can be associated with multiple
Element plot types the Element types are validated against the
Elements in the types parameter.
"""
types = param.List([], doc="""List of types processed by the hook.""")
__abstract = True
def _applies(self, plot, view):
return type(view) in self.types
class MplD3Plugin(PlottingHook):
"""
The mpld3 library available as an optional backend
for HoloViews provides the option of adding
interactivity to the plot through various plugins.
Subclasses of this PlottingHook can enable
"""
css = param.String(doc="""CSS applied to HTML mpld3 Plugins.""", default="""
table {border-collapse: collapse;}
th {color: #ffffff; background-color: #000000;}
td {background-color: #cccccc;}
table, th, td {font-family:Arial, Helvetica, sans-serif;
border: 1px solid black; text-align: right;}""")
hoffset = param.Integer(default=10, doc="Vertical offset of the labels.")
voffset = param.Integer(default=10, doc="Horizontal offset of the labels.")
__abstract = True
def _applies(self, plot, view):
from ..ipython.magics import OutputMagic
types_match = super(MplD3Plugin, self)._applies(plot, view)
axes3d = plot.projection == '3d'
mpld3_backend = OutputMagic.options['backend'] == 'd3'
return types_match and mpld3_backend and not axes3d
class PointPlugin(MplD3Plugin):
"Labels each point with a table of its values."
types = param.List([Points, Scatter])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
df = view.dframe()
labels = []
for i in range(len(df)):
label = df.ix[[i], :].T
label.columns = [view.label]
labels.append(str(label.to_html(header=len(view.label)>0)))
tooltip = plugins.PointHTMLTooltip(plot.handles['paths'], labels,
voffset=self.voffset, hoffset=self.hoffset,
css=self.css)
plugins.connect(fig, tooltip)
class CurvePlugin(MplD3Plugin):
"Labels each line with the Curve objects label"
format_string = param.String(default='<h4>{label}</h4>', doc="""
Defines the HTML representation of the Element label""")
types = param.List([Curve])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
labels = [self.format_string.format(label=view.label)]
tooltip = plugins.LineHTMLTooltip(plot.handles['line_segment'], labels,
voffset=self.voffset, hoffset=self.hoffset,
css=self.css)
plugins.connect(fig, tooltip)
class BarPlugin(MplD3Plugin):
types = param.List([Bars])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
for key, bar in plot.handles['bars'].items():
handle = bar.get_children()[0]
selection = [(d.name,{k}) for d, k in zip(plot.bar_dimensions, key)
if d is not None]
label_data = view.select(**dict(selection)).dframe().ix[0].to_frame()
label = str(label_data.to_html(header=len(view.label)>0))
tooltip = plugins.LineHTMLTooltip(handle, label, voffset=self.voffset,
hoffset=self.hoffset, css=self.css)
plugins.connect(fig, tooltip)
class HistogramPlugin(MplD3Plugin):
"Labels each bar with a table of its values."
types = param.List([Histogram])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
df = view.dframe()
labels = []
for i in range(len(df)):
label = df.ix[[i], :].T
label.columns = [view.label]
labels.append(str(label.to_html(header=len(view.label)>0)))
for i, (bar, label) in enumerate(zip(plot.handles['bars'].get_children(), labels)):
tooltip = plugins.LineHTMLTooltip(bar, label, voffset=self.voffset,
hoffset=self.hoffset, css=self.css)
plugins.connect(fig, tooltip)
class RasterPlugin(MplD3Plugin):
"""
Replaces the imshow based Raster image with a
pcolormesh, allowing each pixel to be labelled.
"""
types = param.List(default=[Raster, HeatMap])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
ax = plot.handles['axis']
valid_opts = ['cmap']
opts = {k:v for k,v, in plot.style.options.items()
if k in valid_opts}
data = view.data
rows, cols = view.data.shape
if isinstance(view, HeatMap):
data = np.ma.array(data, mask=np.isnan(data))
cmap = copy.copy(plt.cm.get_cmap(opts.get('cmap', 'gray')))
cmap.set_bad('w', 1.)
opts['cmap'] = cmap
df = view.dframe(True).fillna(0)
df = df.sort([d.name for d in view.dimensions()[1:2]])[::-1]
l, b, r, t = (0, 0, 1, 1)
data = np.flipud(data)
else:
df = view.dframe().sort(['y','x'], ascending=(1,1))[::-1]
l, b, r, t = (0, 0, cols, rows)
for k, ann in plot.handles.get('annotations', {}).items():
ann.remove()
plot.handles['annotations'].pop(k)
# Generate color mesh to label each point
cols+=1; rows+=1
cmin, cmax = view.range(2)
x, y = np.meshgrid(np.linspace(l, r, cols), np.linspace(b, t, rows))
plot.handles['im'].set_visible(False)
mesh = ax.pcolormesh(x, y, data, vmin=cmin, vmax=cmax, **opts)
ax.invert_yaxis() # Doesn't work uninverted
df.index = range(len(df))
labels = []
for i in range(len(df)):
label = df.ix[[i], :].T
label.columns = [' '.join([view.label, view.group])]
labels.append(str(label.to_html(header=len(view.label)>0)))
tooltip = plugins.PointHTMLTooltip(mesh, labels[::-1], hoffset=self.hoffset,
voffset=self.voffset, css=self.css)
plugins.connect(fig, tooltip)
class LegendPlugin(MplD3Plugin):
"""
Provides an interactive legend allowing selecting
and unselecting of different elements.
"""
alpha_unsel = param.Number(default=0.2, doc="""
The alpha level of the unselected elements""")
alpha_sel = param.Number(default=2.0, doc="""
The alpha level of the unselected elements""")
types = param.List([Overlay, NdOverlay])
def __call__(self, plot, view):
if not self._applies(plot, view): return
fig = plot.handles['fig']
if 'legend' in plot.handles:
plot.handles['legend'].set_visible(False)
line_segments, labels = [], []
keys = view.keys()
for idx, subplot in enumerate(plot.subplots.values()):
if isinstance(subplot, PointPlot):
line_segments.append(subplot.handles['paths'])
if isinstance(view, NdOverlay):
labels.append(str(keys[idx]))
else:
labels.append(subplot.hmap.last.label)
elif isinstance(subplot, CurvePlot):
line_segments.append(subplot.handles['line_segment'])
if isinstance(view, NdOverlay):
labels.append(str(keys[idx]))
else:
labels.append(subplot.hmap.last.label)
tooltip = plugins.InteractiveLegendPlugin(line_segments, labels,
alpha_sel=self.alpha_sel,
alpha_unsel=self.alpha_unsel)
plugins.connect(fig, tooltip)
if plugins is not None:
OverlayPlot.finalize_hooks = [LegendPlugin]
RasterPlot.finalize_hooks = [RasterPlugin]
CurvePlot.finalize_hooks = [CurvePlugin]
PointPlot.finalize_hooks = [PointPlugin]
HistogramPlot.finalize_hooks = [HistogramPlugin]
BarPlot.finalize_hooks = [BarPlugin]
| bsd-3-clause | -1,523,589,428,948,259,600 | 35.114173 | 91 | 0.587594 | false | 3.857443 | false | false | false |
tiborsimko/analysis-preservation.cern.ch | cap/modules/fixtures/utils.py | 1 | 4290 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2018 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Util methods for fixtures."""
import json
import uuid
from elasticsearch import helpers
from elasticsearch_dsl import Q
from flask import current_app
from invenio_access.models import Role
from invenio_db import db
from invenio_pidstore.errors import PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier
from invenio_search import RecordsSearch
from invenio_search.proxies import current_search_client as es
from cap.modules.deposit.api import CAPDeposit
from cap.modules.deposit.errors import DepositDoesNotExist
from cap.modules.deposit.fetchers import cap_deposit_fetcher
from cap.modules.deposit.minters import cap_deposit_minter
from cap.modules.user.utils import get_existing_or_register_user
def construct_draft_obj(schema, data):
"""Contructs a draft object."""
entry = {
'$schema': 'https://{}/schemas/deposits/records/{}.json'.format(
current_app.config.get('JSONSCHEMAS_HOST'),
schema)
}
entry.update(data)
return entry
def get_entry_uuid_by_unique_field(index, dict_unique_field_value):
"""Return record by uuid."""
rs = RecordsSearch(index=index)
res = rs.query(Q('match',
**dict_unique_field_value)).execute().hits.hits
if not res:
raise DepositDoesNotExist
else:
return res[0]['_id']
def add_read_permission_for_egroup(deposit, egroup):
"""Add read permission for egroup."""
role = Role.query.filter_by(name=egroup).one()
deposit._add_egroup_permissions(role,
['deposit-read'],
db.session)
deposit.commit()
db.session.commit()
def add_drafts_from_file(file_path, schema,
egroup=None, usermail=None, limit=None):
"""Add drafts from a specified file.
Drafts with specified pid will be registered under those.
For drafts without pid, new pids will be minted.
"""
if usermail:
user = get_existing_or_register_user(usermail)
else:
user = None
with open(file_path, 'r') as fp:
entries = json.load(fp)
for entry in entries[0:limit]:
data = construct_draft_obj(schema, entry)
pid = cap_deposit_fetcher(None, data)
pid_value = pid.pid_value if pid else None
try:
PersistentIdentifier.get('depid', pid_value)
print('Draft with id {} already exist!'.format(pid_value))
except PIDDoesNotExistError:
record_uuid = uuid.uuid4()
pid = cap_deposit_minter(record_uuid, data)
deposit = CAPDeposit.create(data, record_uuid, user)
deposit.commit()
if egroup:
add_read_permission_for_egroup(deposit, egroup)
print('Draft {} added.'.format(pid.pid_value))
db.session.commit()
def bulk_index_from_source(index_name, doc_type, source):
"""Indexes from source."""
actions = [{
"_index": index_name,
"_type": doc_type,
"_id": idx,
"_source": obj
} for idx, obj in enumerate(source)]
helpers.bulk(es, actions)
| gpl-2.0 | 5,375,609,865,573,419,000 | 32 | 78 | 0.663403 | false | 3.939394 | false | false | false |
Silmathoron/NNGT | nngt/simulation/nest_activity.py | 1 | 23441 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# This file is part of the NNGT project to generate and analyze
# neuronal networks and their activity.
# Copyright (C) 2015-2019 Tanguy Fardet
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Analyze the activity of a network """
from collections import namedtuple
from copy import deepcopy
import weakref
import nest
import numpy as np
from nngt.lib import InvalidArgument, nonstring_container
from .nest_utils import nest_version, _get_nest_gids
__all__ = [
"ActivityRecord",
"activity_types",
"analyze_raster",
"get_recording",
]
# ------------------------------ #
# Finding the various activities #
# ------------------------------ #
class ActivityRecord:
'''
Class to record the properties of the simulated activity.
'''
def __init__(self, spike_data, phases, properties, parameters=None):
'''
Initialize the instance using `spike_data` (store proxy to an optional
`network`) and compute the properties of provided data.
Parameters
----------
spike_data : 2D array
Array of shape (num_spikes, 2), containing the senders on the 1st
row and the times on the 2nd row.
phases : dict
Limits of the different phases in the simulated period.
properties : dict
Values of the different properties of the activity (e.g.
"firing_rate", "IBI"...).
parameters : dict, optional (default: None)
Parameters used to compute the phases.
Note
----
The firing rate is computed as num_spikes / total simulation time,
the period is the sum of an IBI and a bursting period.
'''
self._data = spike_data
self._phases = phases.copy()
self._properties = properties.copy()
self.parameters = parameters
def simplify():
raise NotImplementedError("Will be implemented soon.")
@property
def data(self):
'''
Returns the (N, 2) array of (senders, spike times).
'''
return self._data
@property
def phases(self):
'''
Return the phases detected:
- "bursting" for periods of high activity where a large fraction
of the network is recruited.
- "quiescent" for periods of low activity
- "mixed" for firing rate in between "quiescent" and "bursting".
- "localized" for periods of high activity but where only a small
fraction of the network is recruited.
Note
----
See `parameters` for details on the conditions used to
differenciate these phases.
'''
return self._phases
@property
def properties(self):
'''
Returns the properties of the activity.
Contains the following entries:
- "firing_rate": average value in Hz for 1 neuron in the network.
- "bursting": True if there were bursts of activity detected.
- "burst_duration", "IBI", "ISI", and "period" in ms, if
"bursting" is True.
- "SpB" (Spikes per Burst): average number of spikes per neuron
during a burst.
'''
return self._properties
# ---------------- #
# Analyse activity #
# ---------------- #
def get_recording(network, record, recorder=None, nodes=None):
'''
Return the evolution of some recorded values for each neuron.
Parameters
----------
network : :class:`nngt.Network`
Network for which the activity was simulated.
record : str or list
Name of the record(s) to obtain.
recorder : tuple of ints, optional (default: all multimeters)
GID of the "spike_detector" objects recording the network activity.
nodes : array-like, optional (default: all nodes)
NNGT ids of the nodes for which the recording should be returned.
Returns
-------
values : dict of dict of arrays
Dictionary containing, for each `record`, an M array with the
recorded values for n-th neuron is stored under entry `n` (integer).
A `times` entry is also added; it should be the same size for all
records, otherwise an error will be raised.
Examples
--------
After the creation of a :class:`~nngt.Network` called ``net``, use the
following code: ::
import nest
rec, _ = monitor_nodes(
net.nest_gids, "multimeter", {"record_from": ["V_m"]}, net)
nest.Simulate(100.)
recording = nngt.simulation.get_recording(net, "V_m")
# access the membrane potential of first neuron + the times
V_m = recording["V_m"][0]
times = recording["times"]
'''
if nodes is None:
nodes = [network.id_from_nest_gid(n) for n in network.nest_gids]
gids = _get_nest_gids([network.nest_gids[n] for n in nodes])
if not nonstring_container(record):
record = [record]
values = {rec: {} for rec in record}
if recorder is None:
if nest_version == 3:
recorder = nest.GetNodes(properties={'model': 'multimeter'})
else:
recorder = nest.GetNodes((0,), properties={'model': 'multimeter'})
times = None
for rec in recorder:
events = nest.GetStatus(rec, "events")[0]
senders = events["senders"]
if times is not None:
assert times == events["times"], "Different times between the " +\
"recorders; check the params."
times = events["times"]
values["times"] = times[senders == senders[0]]
for rec_name in record:
for idx, gid in zip(nodes, gids):
ids = (senders == senders[gid])
values[rec_name][idx] = events[rec_name][ids]
return values
def activity_types(spike_detector, limits, network=None,
phase_coeff=(0.5, 10.), mbis=0.5, mfb=0.2, mflb=0.05,
skip_bursts=0, simplify=False, fignums=[], show=False):
'''
Analyze the spiking pattern of a neural network.
@todo:
think about inserting t=0. and t=simtime at the beginning and at the
end of ``times``.
Parameters
----------
spike_detector : NEST node(s) (tuple or list of tuples)
The recording device that monitored the network's spikes.
limits : tuple of floats
Time limits of the simulation region which should be studied (in ms).
network : :class:`~nngt.Network`, optional (default: None)
Neural network that was analyzed
phase_coeff : tuple of floats, optional (default: (0.2, 5.))
A phase is considered 'bursting' when the interspike between all spikes
that compose it is smaller than ``phase_coeff[0] / avg_rate`` (where
``avg_rate`` is the average firing rate), 'quiescent' when it is
greater that ``phase_coeff[1] / avg_rate``, 'mixed' otherwise.
mbis : float, optional (default: 0.5)
Maximum interspike interval allowed for two spikes to be considered in
the same burst (in ms).
mfb : float, optional (default: 0.2)
Minimal fraction of the neurons that should participate for a burst to
be validated (i.e. if the interspike is smaller that the limit BUT the
number of participating neurons is too small, the phase will be
considered as 'localized').
mflb : float, optional (default: 0.05)
Minimal fraction of the neurons that should participate for a local
burst to be validated (i.e. if the interspike is smaller that the limit
BUT the number of participating neurons is too small, the phase will be
considered as 'mixed').
skip_bursts : int, optional (default: 0)
Skip the `skip_bursts` first bursts to consider only the permanent
regime.
simplify: bool, optional (default: False)
If ``True``, 'mixed' phases that are contiguous to a burst are
incorporated to it.
return_steps : bool, optional (default: False)
If ``True``, a second dictionary, `phases_steps` will also be returned.
@todo: not implemented yet
fignums : list, optional (default: [])
Indices of figures on which the periods can be drawn.
show : bool, optional (default: False)
Whether the figures should be displayed.
Note
----
Effects of `skip_bursts` and `limits[0]` are cumulative: the `limits[0]`
first milliseconds are ignored, then the `skip_bursts` first bursts of the
remaining activity are ignored.
Returns
-------
phases : dict
Dictionary containing the time intervals (in ms) for all four phases
(`bursting', `quiescent', `mixed', and `localized`) as lists.
E.g: ``phases["bursting"]`` could give ``[[123.5,334.2],
[857.1,1000.6]]``.
'''
# check if there are several recorders
senders, times = [], []
if True in nest.GetStatus(spike_detector, "to_file"):
for fpath in nest.GetStatus(spike_detector, "record_to"):
data = _get_data(fpath)
times.extend(data[:, 1])
senders.extend(data[:, 0])
else:
for events in nest.GetStatus(spike_detector, "events"):
times.extend(events["times"])
senders.extend(events["senders"])
idx_sort = np.argsort(times)
times = np.array(times)[idx_sort]
senders = np.array(senders)[idx_sort]
# compute phases and properties
data = np.array((senders, times))
phases, fr = _analysis(times, senders, limits, network=network,
phase_coeff=phase_coeff, mbis=mbis, mfb=mfb, mflb=mflb,
simplify=simplify)
properties = _compute_properties(data, phases, fr, skip_bursts)
kwargs = {
"limits": limits,
"phase_coeff": phase_coeff,
"mbis": mbis,
"mfb": mfb,
"mflb": mflb,
"simplify": simplify
}
# plot if required
if show:
_plot_phases(phases, fignums)
return ActivityRecord(data, phases, properties, kwargs)
def analyze_raster(raster=None, limits=None, network=None,
phase_coeff=(0.5, 10.), mbis=0.5, mfb=0.2, mflb=0.05,
skip_bursts=0, skip_ms=0., simplify=False, fignums=[],
show=False):
'''
Return the activity types for a given raster.
Parameters
----------
raster : array-like (N, 2) or str
Either an array containing the ids of the spiking neurons on the first
column, then the corresponding times on the second column, or the path
to a NEST .gdf recording.
limits : tuple of floats
Time limits of the simulation regrion which should be studied (in ms).
network : :class:`~nngt.Network`, optional (default: None)
Network on which the recorded activity was simulated.
phase_coeff : tuple of floats, optional (default: (0.2, 5.))
A phase is considered 'bursting' when the interspike between all spikes
that compose it is smaller than ``phase_coeff[0] / avg_rate`` (where
``avg_rate`` is the average firing rate), 'quiescent' when it is
greater that ``phase_coeff[1] / avg_rate``, 'mixed' otherwise.
mbis : float, optional (default: 0.5)
Maximum interspike interval allowed for two spikes to be considered in
the same burst (in ms).
mfb : float, optional (default: 0.2)
Minimal fraction of the neurons that should participate for a burst to
be validated (i.e. if the interspike is smaller that the limit BUT the
number of participating neurons is too small, the phase will be
considered as 'localized').
mflb : float, optional (default: 0.05)
Minimal fraction of the neurons that should participate for a local
burst to be validated (i.e. if the interspike is smaller that the limit
BUT the number of participating neurons is too small, the phase will be
considered as 'mixed').
skip_bursts : int, optional (default: 0)
Skip the `skip_bursts` first bursts to consider only the permanent
regime.
simplify: bool, optional (default: False)
If ``True``, 'mixed' phases that are contiguous to a burst are
incorporated to it.
fignums : list, optional (default: [])
Indices of figures on which the periods can be drawn.
show : bool, optional (default: False)
Whether the figures should be displayed.
Note
----
Effects of `skip_bursts` and `limits[0]` are cumulative: the
`limits[0]` first milliseconds are ignored, then the `skip_bursts`
first bursts of the remaining activity are ignored.
Returns
-------
activity : ActivityRecord
Object containing the phases and the properties of the activity
from these phases.
'''
data = _get_data(raster) if isinstance(raster, str) else raster
if data.any():
if limits is None:
limits = [np.min(data[:, 1]), np.max(data[:, 1])]
kwargs = {
"limits": limits,
"phase_coeff": phase_coeff,
"mbis": mbis,
"mfb": mfb,
"mflb": mflb,
"simplify": simplify
}
# compute phases and properties
phases, fr = _analysis(data[:, 1], data[:, 0], limits, network=network,
phase_coeff=phase_coeff, mbis=mbis, mfb=mfb, mflb=mflb,
simplify=simplify)
properties = _compute_properties(data.T, phases, fr, skip_bursts)
# plot if required
if show:
import matplotlib.pyplot as plt
if fignums:
_plot_phases(phases, fignums)
else:
fig, ax = plt.subplots()
ax.scatter(data[:, 1], data[:, 0])
_plot_phases(phases, [fig.number])
return ActivityRecord(data, phases, properties, kwargs)
return ActivityRecord(data, {}, {})
# ----- #
# Tools #
# ----- #
def _get_data(source):
'''
Returns the (times, senders) array.
Parameters
----------
source : list or str
Indices of spike detectors or path to the .gdf files.
Returns
-------
data : 2D array of shape (N, 2)
'''
data = [[],[]]
is_string = isinstance(source, str)
if is_string:
source = [source]
elif nonstring_container(source) and isinstance(source[0], str):
is_string = True
if is_string:
for path in source:
tmp = np.loadtxt(path)
data[0].extend(tmp[:, 0])
data[1].extend(tmp[:, 1])
else:
source_shape = np.shape(np.squeeze(source))
if len(source_shape) == 2:
# source is directly the data
if source_shape[0] == 2 and source_shape[1] != 2:
return np.array(source).T
else:
return np.array(source)
else:
# source contains gids
source = _get_nest_gids(source)
events = None
if nonstring_container(source[0]):
events = [nest.GetStatus(gid, "events")[0] for gid in source]
else:
events = nest.GetStatus(source, "events")
for ev in events:
data[0].extend(ev["senders"])
data[1].extend(ev["times"])
idx_sort = np.argsort(data[1])
return np.array(data)[:, idx_sort].T
def _find_phases(times, phases, lim_burst, lim_quiet, simplify):
'''
Find the time limits of the different phases.
'''
diff = np.diff(times).tolist()[::-1]
i = 0
previous = {"bursting": -2, "mixed": -2, "quiescent": -2}
while diff:
tau = diff.pop()
while True:
if tau < lim_burst: # bursting phase
if previous["bursting"] == i-1:
phases["bursting"][-1][1] = times[i+1]
else:
if simplify and previous["mixed"] == i-1:
start_mixed = phases["mixed"][-1][0]
phases["bursting"].append([start_mixed, times[i+1]])
del phases["mixed"][-1]
else:
phases["bursting"].append([times[i], times[i+1]])
previous["bursting"] = i
i+=1
break
elif tau > lim_quiet:
if previous["quiescent"] == i-1:
phases["quiescent"][-1][1] = times[i+1]
else:
phases["quiescent"].append([times[i], times[i+1]])
previous["quiescent"] = i
i+=1
break
else:
if previous["mixed"] == i-1:
phases["mixed"][-1][1] = times[i+1]
previous["mixed"] = i
else:
if simplify and previous["bursting"] == i-1:
phases["bursting"][-1][1] = times[i+1]
previous["bursting"] = i
else:
phases["mixed"].append([times[i], times[i+1]])
previous["mixed"] = i
i+=1
break
def _check_burst_size(phases, senders, times, network, mflb, mfb):
'''
Check that bursting periods involve at least a fraction mfb of the neurons.
'''
transfer, destination = [], {}
n = len(set(senders)) if network is None else network.node_nb()
for i,burst in enumerate(phases["bursting"]):
idx_start = np.where(times==burst[0])[0][0]
idx_end = np.where(times==burst[1])[0][0]
participating_frac = len(set(senders[idx_start:idx_end])) / float(n)
if participating_frac < mflb:
transfer.append(i)
destination[i] = "mixed"
elif participating_frac < mfb:
transfer.append(i)
destination[i] = "localized"
for i in transfer[::-1]:
phase = phases["bursting"].pop(i)
phases[destination[i]].insert(0, phase)
remove = []
i = 0
while i < len(phases['mixed']):
mixed = phases['mixed'][i]
j=i+1
for span in phases['mixed'][i+1:]:
if span[0] == mixed[1]:
mixed[1] = span[1]
remove.append(j)
elif span[1] == mixed[0]:
mixed[0] = span[0]
remove.append(j)
j+=1
i+=1
remove = list(set(remove))
remove.sort()
for i in remove[::-1]:
del phases["mixed"][i]
def _analysis(times, senders, limits, network=None,
phase_coeff=(0.5, 10.), mbis=0.5, mfb=0.2, mflb=0.05,
simplify=False):
# prepare the phases and check the validity of the data
phases = {
"bursting": [],
"mixed": [],
"quiescent": [],
"localized": []
}
num_spikes, avg_rate = len(times), 0.
if num_spikes:
num_neurons = (len(np.unique(senders)) if network is None
else network.node_nb())
# set the studied region
if limits[0] >= times[0]:
idx_start = np.where(times >= limits[0])[0][0]
times = times[idx_start:]
senders = senders[idx_start:]
if limits[1] <= times[-1]:
idx_end = np.where(times <= limits[1])[0][-1]
times = times[:idx_end]
senders = senders[:idx_end]
# get the average firing rate to differenciate the phases
simtime = limits[1] - limits[0]
lim_burst, lim_quiet = 0., 0.
avg_rate = num_spikes / float(simtime)
lim_burst = max(phase_coeff[0] / avg_rate, mbis)
lim_quiet = min(phase_coeff[1] / avg_rate, 10.)
# find the phases
_find_phases(times, phases, lim_burst, lim_quiet, simplify)
_check_burst_size(phases, senders, times, network, mflb, mfb)
avg_rate *= 1000. / float(num_neurons)
return phases, avg_rate
def _compute_properties(data, phases, fr, skip_bursts):
'''
Compute the properties from the spike times and phases.
Parameters
----------
data : 2D array, shape (N, 2)
Spike times and senders.
phases : dict
The phases.
fr : double
Firing rate.
Returns
-------
prop : dict
Properties of the activity. Contains the following pairs:
- "firing_rate": average value in Hz for 1 neuron in the network.
- "bursting": True if there were bursts of activity detected.
- "burst_duration", "ISI", and "IBI" in ms, if "bursting" is True.
- "SpB": average number of spikes per burst for one neuron.
'''
prop = {}
times = data[1, :]
# firing rate (in Hz, normalized for 1 neuron)
prop["firing_rate"] = fr
num_bursts = len(phases["bursting"])
init_val = 0. if num_bursts > skip_bursts else np.NaN
if num_bursts:
prop["bursting"] = True
prop.update({
"burst_duration": init_val,
"IBI": init_val,
"ISI": init_val,
"SpB": init_val,
"period": init_val})
else:
prop["bursting"] = False
for i, burst in enumerate(phases["bursting"]):
if i >= skip_bursts:
# burst_duration
prop["burst_duration"] += burst[1] - burst[0]
# IBI
if i > 0:
end_older_burst = phases["bursting"][i-1][1]
prop["IBI"] += burst[0]-end_older_burst
# get num_spikes inside the burst, divide by num_neurons
idxs = np.where((times >= burst[0])*(times <= burst[1]))[0]
num_spikes = len(times[idxs])
num_neurons = len(set(data[0, :][idxs]))
if num_neurons:
prop["SpB"] += num_spikes / float(num_neurons)
# ISI
if num_spikes:
prop["ISI"] += num_neurons * (burst[1] - burst[0])\
/ float(num_spikes)
for key in prop.keys():
if key not in ("bursting", "firing_rate") and num_bursts > skip_bursts:
prop[key] /= float(num_bursts - skip_bursts)
if num_bursts > skip_bursts:
prop["period"] = prop["IBI"] + prop["burst_duration"]
if num_bursts and prop["SpB"] < 2.:
prop["ISI"] = np.NaN
return prop
def _plot_phases(phases, fignums):
import matplotlib.pyplot as plt
colors = ('r', 'orange', 'g', 'b')
names = ('bursting', 'mixed', 'localized', 'quiescent')
for fignum in fignums:
fig = plt.figure(fignum)
for ax in fig.axes:
for phase, color in zip(names, colors):
for span in phases[phase]:
ax.axvspan(span[0], span[1], facecolor=color,
alpha=0.2)
plt.show()
| gpl-3.0 | -1,356,721,159,638,273,800 | 34.091317 | 79 | 0.570709 | false | 3.884176 | false | false | false |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/red/config_parser.py | 1 | 2024 | # -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014 Olaf Lüke <[email protected]>
Copyright (C) 2014 Ishraq Ibne Ashraf <[email protected]>
config_parser.py: Parses key=value configs from RED Brick
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from io import StringIO
import configparser
def parse(data):
if isinstance(data, list):
string = bytes(data).decode('utf-8')
elif isinstance(data, str):
string = data
else:
return None
config = configparser.ConfigParser()
config.read_file(['[fake_section]'] + [l for l in string.splitlines() if not l.find('=') < 0])
try:
config = dict(config.items('fake_section'))
except:
return None
return config
def parse_no_fake(data):
if isinstance(data, list):
string = bytes(data).decode('utf-8')
elif isinstance(data, str):
string = data
else:
return None
config = configparser.ConfigParser()
config.read_string(string)
return config
def to_string(data):
config = configparser.ConfigParser()
config.add_section('fake_section')
for key, value in data.items():
config.set('fake_section', key, value)
s = StringIO()
config.write(s)
return s.getvalue().replace('[fake_section]\n', '')
def to_string_no_fake(data):
s = StringIO()
data.write(s)
return s.getvalue()
| gpl-2.0 | -2,293,106,811,281,669,000 | 26.337838 | 98 | 0.687098 | false | 3.802632 | true | false | false |
JeanJoskin/Traffique | src/tracker.py | 1 | 2591 | # Traffique: live visitor statistics on App Engine
# Copyright (C) 2011 Jean Joskin <jeanjoskin.com>
#
# Traffique is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Traffique is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Traffique. If not, see <http://www.gnu.org/licenses/>.
import model
from datetime import datetime, timedelta
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import channel
from google.appengine.api import memcache
PIXEL_GIF = "GIF87a\x01\x00\x01\x00\x80\x00\x00\x0A\x00\x00\x00\x00\x00\x21\xF9" + \
"\x04\x01\x00\x00\x01\x00\x2C\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02" + \
"\x02\x4C\x01\x00\x3B"
class Tracker(webapp.RequestHandler):
@staticmethod
def get_active_channel_tokens():
# Fetch active clients
channel_tokens = memcache.get("channel_tokens")
if channel_tokens is None:
q = model.Session.all()
q.filter("last_activity > ", datetime.utcnow() - timedelta(minutes = 5))
q.order("-last_activity")
clients = q.fetch(limit = 20)
channel_tokens = []
for client in clients:
channel_tokens.append(client.key().name())
memcache.set("channel_tokens", channel_tokens, 3600)
return channel_tokens
def get(self):
try:
# Notify all sessions
tokens = Tracker.get_active_channel_tokens()
msg = '{"i":"' + self.request.remote_addr + '"}';
for token in tokens:
channel.send_message(token, msg)
finally:
# Return pixel to user
self.response.headers["Content-Type"] = "image/gif"
self.response.out.write(PIXEL_GIF)
application = webapp.WSGIApplication(
[
("/t.gif", Tracker)
],
debug=False)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| gpl-3.0 | 5,526,692,384,639,389,000 | 36.014286 | 86 | 0.60633 | false | 3.810294 | false | false | false |
iamarf/terminal-quest | linux_story/story/challenges/challenge_22.py | 1 | 4393 | #!/usr/bin/env python
# coding: utf-8
# Copyright (C) 2014, 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# A chapter of the story
from linux_story.step_helper_functions import unblock_cd_commands
from linux_story.story.terminals.terminal_mkdir import TerminalMkdir
from linux_story.helper_functions import play_sound
from linux_story.story.challenges.challenge_23 import Step1 as NextStep
class StepTemplateMkdir(TerminalMkdir):
challenge_number = 22
class Step1(StepTemplateMkdir):
story = [
"{{gb:Bravo, sembra che ci siano tutti!}}",
"\nRomina: {{Bb:Grazie davvero!}}",
"{{Bb:Staremo qui al sicuro. Ti sono così grata per tutto quello "
"che hai fatto.}}",
"\nUsa {{lb:cat}} per controllare se gli animali stanno "
"bene qui dentro"
]
start_dir = "~/fattoria/fienile/.riparo"
end_dir = "~/fattoria/fienile/.riparo"
commands = [
"cat Violetta",
"cat Trogolo",
"cat Gelsomino"
]
hints = [
"{{rb:Usa}} {{lb:cat}} {{rb:per controlare un animale, tipo}} "
"{{yb:cat Violetta}}{{rb:.}}"
]
# Remove all the food
deleted_items = [
"~/paese/.riparo-nascosto/basket",
"~/paese/.riparo-nascosto/apple"
]
def next(self):
play_sound("bell")
Step2()
class Step2(StepTemplateMkdir):
story = [
"{{pb:Ding. Dong.}}",
"Romina: {{Bb:Cosa?? Ho sentito la campanella! Che significa?}}",
"\nSvelto! {{lb:Guarda attorno}} per controllare se manca qualcuno."
]
start_dir = "~/fattoria/fienile/.riparo"
end_dir = "~/fattoria/fienile/.riparo"
commands = [
"ls",
"ls -a"
]
hints = [
"{{rb:guarda attorno con}} {{yb:ls}}{{rb:.}}"
]
# Remove Edith
deleted_items = [
"~/paese/.riparo-nascosto/Edith"
]
def next(self):
play_sound("bell")
Step3()
class Step3(StepTemplateMkdir):
story = [
"Sembra che qui ci siano tutti...",
"\n{{pb:Ding. Dong.}}",
"\nRomina: {{Bb:Ancora! L'ho sentita! Ma è questa che hai sentito quando "
"è sparito il mio marito?}}",
"Dai un'altra {{lb:occhiata}} veloce."
]
start_dir = "~/fattoria/fienile/.riparo"
end_dir = "~/fattoria/fienile/.riparo"
commands = [
"ls",
"ls -a"
]
hints = [
"{{rb:Guardati attorno con}} {{yb:ls}}{{rb:.}}"
]
# Remove Edoardo
deleted_items = [
"~/paese/.riparo-nascosto/Edoardo"
]
def next(self):
play_sound("bell")
Step4()
# TODO: FIX THIS STEP
class Step4(StepTemplateMkdir):
story = [
"Romina: {{Bb:Meno male. Siamo al sicuro, qui ci sono tutti. "
"Ma perché suona?}}",
"\nForse dovremmo indagare su queste ultime suonate. Chi altro "
"si conosceva?",
"Si potrebbe rincontrollare quella famiglia rimpiattata nel "
"{{lb:.riparo-nascosto}} e parlare loro, ora che hai la voce.",
"\nInizia a tornare indietro per andare al {{lb:.riparo-nascosto}} con {{lb:cd}}"
]
start_dir = "~/fattoria/fienile/.riparo"
end_dir = "~/paese/.riparo-nascosto"
hints = [
"{{rb:Possiamo andare direttamente al}} {{lb:.riparo-nascosto}} "
"{{rb:usando}} {{yb:cd ~/paese/.riparo-nascosto/}}"
]
# Remove the cane
deleted_items = [
"~/paese/.riparo-nascosto/cane"
]
def block_command(self):
return unblock_cd_commands(self.last_user_input)
def check_command(self):
# If the command passes, then print a nice hint.
if self.last_user_input.startswith("cd") and \
not self.get_command_blocked() and \
not self.current_path == self.end_dir:
hint = "\n{{gb:Ottimo! Continua così!}}"
self.send_text(hint)
else:
return StepTemplateMkdir.check_command(self)
def next(self):
Step5()
class Step5(StepTemplateMkdir):
story = [
"Guarda {{lb:attorno}}."
]
start_dir = "~/paese/.riparo-nascosto"
end_dir = "~/paese/.riparo-nascosto"
commands = [
"ls",
"ls -a"
]
hints = [
"{{rb:Usa}} {{yb:ls}} {{rb:per guardare attorno.}}"
]
last_step = True
def next(self):
NextStep(self.xp)
| gpl-2.0 | 5,963,692,810,422,163,000 | 25.119048 | 89 | 0.568596 | false | 2.879265 | false | false | false |
dannysellers/django_orders | tracker/templatetags/num_filters.py | 1 | 1433 | from django import template
register = template.Library()
@register.filter
def length (value, length=2):
"""
Truncates `value` to `length`
:type value: float
:type length: int
:rtype: str
"""
if value:
_length = int(length)
_string = str(value).split('.')
if len(_string[1]) == 1:
_string[1] += '0'.rjust(_length - 1, '0')
return _string[0] + '.' + _string[1][:_length]
else:
return '0.{}'.format('0' * int(length))
@register.filter
def storage_fee_total (item_list, stored = True):
"""
Sums the storage fees of a given item list
:param item_list: List of items to process
:param stored: Whether to consider only items still in storage (default True)
:return: Storage fee sum
"""
_sum = float(0)
for item in item_list:
if stored:
if item.status != '4':
_sum += item.storage_fees
else:
_sum += item.storage_fees
return length(_sum, 2)
@register.filter
def stored_count (unit_list):
"""
Returns count of items (Inventory or Shipment) that are still in storage (status < 4)
:param unit_list: Set of items to filter
:type unit_list: QuerySet
:return: Number of shipments with status < 4
:rtype: Int
"""
_count = int(0)
for unit in unit_list:
if int(unit.status) < 4:
_count += 1
return _count
| gpl-2.0 | -1,343,144,368,386,720,800 | 24.589286 | 89 | 0.573622 | false | 3.609572 | false | false | false |
Samuel-Phillips/python-graph-theory | graphs.py | 1 | 1379 | #!/usr/bin/python3
import collections
import sys
Conn = collections.namedtuple('Conn', ['node', 'weight'])
def info(*tt):
print(*tt, file=sys.stderr)
class Node:
finf = float('inf')
def __init__(self):
self.connected = []
self.origin = None
self.dist = self.finf
info("Created node")
def wipe_dists(self):
info("Wiping own distance")
if self.dist != self.finf:
self.dist = self.finf
for node in connected:
node.wipe_dists()
def connect_to(self, node, path_weight):
info("Connecting up")
self.connected.append(Conn(node, path_weight))
node.connected.append(Conn(self, path_weight))
def set_origin(self):
info("Setting origin")
self.wipe_dists()
self.build_dist(0, self)
def build_dist(self, dd, obj):
info("Building distance")
self.origin = obj
self.dist = dd
needs_build = []
for conn in self.connected:
if conn.node.dist > self.dist + conn.weight:
conn.node.build_dist(self.dist + conn.weight, obj)
def shortest_path_to(self, nodes):
self.set_origin()
return [node.dist for node in nodes]
def __repr__(self):
return 'Node(dist={}, conn={})'.format(
repr(self.dist), repr(self.connected))
| mit | 15,872,365,635,721,076 | 26.039216 | 66 | 0.565627 | false | 3.716981 | false | false | false |
Jailander/COSMOS | kriging_exploration/src/kriging_exploration/topological_map.py | 1 | 1803 | import utm
import yaml
import numpy as np
from map_coords import MapCoords
def coord_in_poly(point, limits):
x=point.easting
y=point.northing
n = len(limits)
inside = False
#
p1x = limits[0].easting
p1y = limits[0].northing
for i in range(n+1):
p2x = limits[i % n].easting
p2y = limits[i % n].northing
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
class TopoNode(object):
def __init__(self, name, coord, ind):
self.name = name #Node Name
self.coord = coord #Node coordinates
self.ind = ind #Grid Indexes
self.visited = False
def __repr__(self):
a = dir(self)
b = []
s = ''
for i in a:
if not i.startswith('_'):
b.append(str(i))
for i in b:
s = s + str(i) + ': ' + str(self.__getattribute__(i)) + '\n'
return s
class TopoMap(object):
def __init__(self, grid):
self.waypoints=[]
self._calculate_waypoints(grid)
def _calculate_waypoints(self, grid):
ind = 0
for i in range(0, len(grid.cells)):
for j in range(0, len(grid.cells[0])):
if coord_in_poly(grid.cells[i][j], grid.limits):
name = "WayPoint%03d" %ind
d = TopoNode(name, grid.cells[i][j], (i, j))
self.waypoints.append(d)
ind+=1
#print name
#print len(self.waypoints)
| mit | -3,500,028,241,945,313,300 | 25.514706 | 76 | 0.4731 | false | 3.338889 | false | false | false |
joelsmith/openshift-tools | scripts/cloud/aws/ops-ec2-trim-ebs-snapshots.py | 5 | 4809 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that snapshots all volumes in a given account.
The volumes must be tagged like so:
snapshot: daily
snapshot: weekly
This assumes that your AWS credentials are loaded in the ENV variables:
AWS_ACCESS_KEY_ID=xxxx
AWS_SECRET_ACCESS_KEY=xxxx
Usage:
ops-ec2-snapshot-ebs-volumes.py --with-schedule weekly
"""
# Ignoring module name
# pylint: disable=invalid-name,import-error
import os
import argparse
from openshift_tools.cloud.aws import ebs_snapshotter
# Reason: disable pylint import-error because our libs aren't loaded on jenkins.
# Status: temporary until we start testing in a container where our stuff is installed.
# pylint: disable=import-error
from openshift_tools.monitoring.metric_sender import MetricSender
EXPIRED_SNAPSHOTS_KEY = 'aws.ebs.snapshotter.expired_snapshots'
DELETED_SNAPSHOTS_KEY = 'aws.ebs.snapshotter.deleted_snapshots'
DELETION_ERRORS_KEY = 'aws.ebs.snapshotter.deletion_errors'
class TrimmerCli(object):
""" Responsible for parsing cli args and running the trimmer. """
def __init__(self):
""" initialize the class """
self.args = None
self.parse_args()
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='EBS Snapshot Trimmer')
parser.add_argument('--keep-hourly', required=True, type=int,
help='The number of hourly snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-daily', required=True, type=int,
help='The number of daily snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-weekly', required=True, type=int,
help='The number of weekly snapshots to keep. 0 is infinite.')
parser.add_argument('--keep-monthly', required=True, type=int,
help='The number of monthly snapshots to keep. 0 is infinite.')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
self.args = parser.parse_args()
def main(self):
""" main function """
total_expired_snapshots = 0
total_deleted_snapshots = 0
total_deletion_errors = 0
if self.args.aws_creds_profile:
os.environ['AWS_PROFILE'] = self.args.aws_creds_profile
regions = ebs_snapshotter.EbsSnapshotter.get_supported_regions()
for region in regions:
print "Region: %s:" % region
ss = ebs_snapshotter.EbsSnapshotter(region.name, verbose=True)
expired_snapshots, deleted_snapshots, snapshot_deletion_errors = \
ss.trim_snapshots(hourly_backups=self.args.keep_hourly, \
daily_backups=self.args.keep_daily, \
weekly_backups=self.args.keep_weekly, \
monthly_backups=self.args.keep_monthly, \
dry_run=self.args.dry_run)
num_deletion_errors = len(snapshot_deletion_errors)
total_expired_snapshots += len(expired_snapshots)
total_deleted_snapshots += len(deleted_snapshots)
total_deletion_errors += num_deletion_errors
if num_deletion_errors > 0:
print " Snapshot Deletion errors (%d):" % num_deletion_errors
for cur_err in snapshot_deletion_errors:
print " %s" % cur_err
print
print " Total number of expired snapshots: %d" % total_expired_snapshots
print " Total number of deleted snapshots: %d" % total_deleted_snapshots
print "Total number of snapshot deletion errors: %d" % total_deletion_errors
print
print "Sending results to Zabbix:"
if self.args.dry_run:
print " *** DRY RUN, NO ACTION TAKEN ***"
else:
TrimmerCli.report_to_zabbix(total_expired_snapshots, total_deleted_snapshots, total_deletion_errors)
@staticmethod
def report_to_zabbix(total_expired_snapshots, total_deleted_snapshots, total_deletion_errors):
""" Sends the commands exit code to zabbix. """
mts = MetricSender(verbose=True)
mts.add_metric({
EXPIRED_SNAPSHOTS_KEY: total_expired_snapshots,
DELETED_SNAPSHOTS_KEY: total_deleted_snapshots,
DELETION_ERRORS_KEY: total_deletion_errors
})
mts.send_metrics()
if __name__ == "__main__":
TrimmerCli().main()
| apache-2.0 | -4,464,730,399,330,036,700 | 37.782258 | 112 | 0.62487 | false | 4.082343 | false | false | false |
plecto/django-smart-lists | testproject/migrations/0002_auto_20190728_1359.py | 1 | 1712 | # Generated by Django 2.2.3 on 2019-07-28 13:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('testproject', '0001_initial')]
operations = [
migrations.CreateModel(
name='ForeignModelWithoutUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='ForeignModelWithUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
],
),
migrations.AlterField(
model_name='samplemodel',
name='category',
field=models.CharField(
choices=[('blog_post', 'Blog Post'), ('foo', 'Foo'), ('bar', 'Bar')], max_length=128
),
),
migrations.AddField(
model_name='samplemodel',
name='foreign_1',
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='testproject.ForeignModelWithUrl'
),
),
migrations.AddField(
model_name='samplemodel',
name='foreign_2',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='testproject.ForeignModelWithoutUrl',
),
),
]
| mit | -6,303,085,913,088,266,000 | 33.24 | 120 | 0.538551 | false | 4.493438 | false | false | false |
pgaines937/news_articles | sentiment.py | 1 | 4501 | from nltk.corpus import stopwords
from itertools import chain
from nltk.corpus import wordnet
from collections import defaultdict
import pip
import csv
# combined_dataset_verbose has all the news article.
f = open('combined_dataset_verbose.csv')
csv_f = csv.reader(f)
next(csv_f)
# Set of positive, negative and neutral words.
negativeword = "downfall,down,decrease,negative,fall,JEOPARDIZED,RECALCULATE,TESTIFY,QUESTIONABLE,IMPEDED,EXACERBATE,OVERSTATEMENT,SLANDER,NONPERFORMING,UNFOUNDED,WORST,ILLICIT,RENEGOTIATE, MANIPULATE, DISTURBING, CIRCUMVENT, PREJUDICED, APPARENTLY, FRIVOLOUS, REJECT, PROTESTED, REJECTS, DOWNSIZED, GRIEVANCE, REFILE, DISSENTING, FORECLOSED, GRATUITOUS, UNPREDICTED, MISAPPLICATION, CLOSEOUT, COLLABORATES, OBLIGEE, DISSENTERS, FOREGO, WRITS, PLEDGORS, PRECIPITATED, IDLED, SUGGESTS, BAILEE, FRIENDLY, ARBITRAL, BREAKTHROUGHS, FAVORING, CERTIORARI, PERSISTS, ADJOURNMENTS, IGNORING, RECALCULATE"
negativeword = negativeword.split(',')
positiveword = "increase,growth,rise,raise,up,UNMATCHED, OUTPERFORM, VOIDED, CONFIDENT, REWARDED, PROSPERITY, DISCREPANCY, RECTIFICATION, CRITICALLY, FORFEITABLE, ARBITRARY, TURMOIL, IMBALANCE, PROGRESSES, ANTECEDENT, OVERCHARGED, DURESS, MANIPULATION, DISTRESSED, DISSOLUTIONS, HAZARD,EXPROPRIATION, UNDERSTATE, UNFIT, PLEADINGS, INVESTIGATED, SOMETIME, ENCROACHMENT, MISSTATE,MUTANDIS, DEFRAUD, UNDEFINED, DELISTING, FORFEITS, UNCOVERS, MALPRACTICE, PRESUMES, GRANTORS, COLLAPSING, FALSELY, UNSOUND, REJECTIONS, WHEREABOUTS, DAMAGING, REASSIGNMENT, DISTRACTING, DISAPPROVED, STAGNANT, PREDECEASES, SAFE"
positiveword = positiveword.split(',')
neutral = "FAVORABLE, VULNERABILITY, CLAIMS, ALTERATION, DISCONTINUING, BANKRUPTCY, DEPENDING, DEPENDING, ATTAINING, ISSIONS, CORRECTING, IMPROVES, GAIN, FLUCTUATION, DISCONTINUE, STATUTES, THEREUNTO, RISKY, RISKY, FLUCTUATES, SUBROGATION, NEGATIVELY, LOSE, ATTORNEY, REVISED, COULD, EXPOSURE, DEPENDENT, WILL, CONTRACTS, FAILURE, RISK, EASILY, PROFICIENCY, SUPERSEDES, ACCESSION, DULY, MAY, REMEDIED, VARIABLE, UNENFORCEABLE, RISKS, UNRESOLVED, VARIATIONS, COURTS, PROBLEM, VARIED, HEREBY, PREDICT"
neutral = neutral.split(',')
finalwordlist = {}
# Generating synonyms for all negative words.
negtivelist = []
for w in negativeword:
w = w.strip()
negtivelist.append(w)
synonyms = wordnet.synsets(w)
lemmas = set(chain.from_iterable([word.lemma_names() for word in synonyms]))
lists = [x.encode('UTF8') for x in lemmas]
negtivelist.extend(lists)
finalwordlist[-1] = negtivelist
# Generating synonyms for all positive words.
positivelist = []
for w in positiveword:
w = w.strip()
positivelist.append(w.lower())
synonyms = wordnet.synsets(w.lower())
lemmas = set(chain.from_iterable([word.lemma_names() for word in synonyms]))
lists = [x.encode('UTF8') for x in lemmas]
positivelist.extend(lists)
finalwordlist[1] = positivelist
# Generating synonyms for all neutral words.
neutrallist = []
for w in neutral:
w = w.strip()
neutrallist.append(w.lower())
synonyms = wordnet.synsets(w.lower())
lemmas = set(chain.from_iterable([word.lemma_names() for word in synonyms]))
lists = [x.encode('UTF8') for x in lemmas]
neutrallist.extend(lists)
finalwordlist[0] = neutrallist
stop = set(stopwords.words('english'))
# .csv output file.
fieldnames = ['publish_date', 'Date', 'sentiment_polarity', 'sentiment_subjectivity', 'Positive', 'Negative', 'Neutral',
'Class']
fp = open('final.csv', 'w')
fp1 = open('date.csv', 'w')
fieldnames1 = ['Date']
writer = csv.DictWriter(fp, fieldnames=fieldnames)
writer1 = csv.DictWriter(fp1, fieldnames=fieldnames1)
writer.writeheader()
writer1.writeheader()
# loop through all article and get sentiment score.
for row in csv_f:
broken = [i for i in row[4].lower().split() if i not in stop]
# get count for each word
pos = len(set(finalwordlist[1]) & set(broken)) + 1
neg = len(set(finalwordlist[-1]) & set(broken)) + 1
neut = len(set(finalwordlist[0]) & set(broken)) + 1
totol = pos + neg + neut
# Calculating the probability
pos = 1.0 * pos / totol
neg = 1.0 * neg / totol
neut = 1.0 * neut / totol
if row[5] > row[8]:
label = 0
else:
label = 1
writer.writerow(
{'publish_date': row[1], 'Date': row[2], 'sentiment_polarity': row[0], 'sentiment_subjectivity': row[3],
'Positive': pos, 'Negative': neg, 'Neutral': neut, 'Class': label})
writer1.writerow({'Date': row[2]})
| mit | -3,985,413,873,002,839,600 | 44.01 | 605 | 0.725839 | false | 2.80611 | false | false | false |
dvspirito/pymeasure | pymeasure/instruments/hp/hp33120A.py | 1 | 4833 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import strict_discrete_set
class HP33120A(Instrument):
""" Represents the Hewlett Packard 33120A Arbitrary Waveform
Generator and provides a high-level interface for interacting
with the instrument.
"""
SHAPES = {
'sinusoid':'SIN', 'square':'SQU', 'triangle':'TRI',
'ramp':'RAMP', 'noise':'NOIS', 'dc':'DC', 'user':'USER'
}
shape = Instrument.control(
"SOUR:FUNC:SHAP?", "SOUR:FUNC:SHAP %s",
""" A string property that controls the shape of the wave,
which can take the values: sinusoid, square, triangle, ramp,
noise, dc, and user. """,
validator=strict_discrete_set,
values=SHAPES,
map_values=True
)
frequency = Instrument.control(
"SOUR:FREQ?", "SOUR:FREQ %g",
""" A floating point property that controls the frequency of the
output in Hz. The allowed range depends on the waveform shape
and can be queried with :attr:`~.max_frequency` and
:attr:`~.min_frequency`. """
)
max_frequency = Instrument.measurement(
"SOUR:FREQ? MAX",
""" Reads the maximum :attr:`~.HP33120A.frequency` in Hz for the given shape """
)
min_frequency = Instrument.measurement(
"SOUR:FREQ? MIN",
""" Reads the minimum :attr:`~.HP33120A.frequency` in Hz for the given shape """
)
amplitude = Instrument.control(
"SOUR:VOLT?", "SOUR:VOLT %g",
""" A floating point property that controls the voltage amplitude of the
output signal. The default units are in peak-to-peak Volts, but can be
controlled by :attr:`~.amplitude_units`. The allowed range depends
on the waveform shape and can be queried with :attr:`~.max_amplitude`
and :attr:`~.min_amplitude`. """
)
max_amplitude = Instrument.measurement(
"SOUR:VOLT? MAX",
""" Reads the maximum :attr:`~.amplitude` in Volts for the given shape """
)
min_amplitude = Instrument.measurement(
"SOUR:VOLT? MIN",
""" Reads the minimum :attr:`~.amplitude` in Volts for the given shape """
)
offset = Instrument.control(
"SOUR:VOLT:OFFS?", "SOUR:VOLT:OFFS %g",
""" A floating point property that controls the amplitude voltage offset
in Volts. The allowed range depends on the waveform shape and can be
queried with :attr:`~.max_offset` and :attr:`~.min_offset`. """
)
max_offset = Instrument.measurement(
"SOUR:VOLT:OFFS? MAX",
""" Reads the maximum :attr:`~.offset` in Volts for the given shape """
)
min_offset = Instrument.measurement(
"SOUR:VOLT:OFFS? MIN",
""" Reads the minimum :attr:`~.offset` in Volts for the given shape """
)
AMPLITUDE_UNITS = {'Vpp':'VPP', 'Vrms':'VRMS', 'dBm':'DBM', 'default':'DEF'}
amplitude_units = Instrument.control(
"SOUR:VOLT:UNIT?", "SOUR:VOLT:UNIT %s",
""" A string property that controls the units of the amplitude,
which can take the values Vpp, Vrms, dBm, and default.
""",
validator=strict_discrete_set,
values=AMPLITUDE_UNITS,
map_values=True
)
def __init__(self, resourceName, **kwargs):
super(HP33120A, self).__init__(
resourceName,
"Hewlett Packard 33120A Function Generator",
**kwargs
)
self.amplitude_units = 'Vpp'
def beep(self):
""" Causes a system beep. """
self.write("SYST:BEEP")
| mit | 8,442,955,602,036,176,000 | 39.957627 | 88 | 0.651769 | false | 4.047739 | false | false | false |
uwosh/Campus_Directory_web_service | getNextSemestersWithDescription.py | 1 | 3594 | # $Id$
# returns the current (if available) and next PeopleSoft semester codes with matching descriptions, as of today
import re
import xmlrpclib
Randy_Loch = '192.168.0.1'
Kim_Nguyen_G5 = '192.168.0.1'
Kim_Nguyen_Air = '192.168.0.1'
Kim_Nguyen_iMac = '192.168.0.1'
John_Hren_MBP = '192.168.0.1'
CMF2 = '192.168.0.1'
Plone1 = '192.168.0.1'
Plone3 = '192.168.0.1'
def getNextSemestersWithDescription(self):
request = self.REQUEST
RESPONSE = request.RESPONSE
remote_addr = request.REMOTE_ADDR
if remote_addr in [Plone1, CMF2, Randy_Loch, Kim_Nguyen_Air, Kim_Nguyen_G5, Kim_Nguyen_iMac, John_Hren_MBP, Plone3, '127.0.0.1', ]:
conn = getattr(self, 'Oracle_Database_Connection_NGUYEN_PRD')
connstr = conn.connection_string
try:
if not conn.connected():
conn.connect(connstr)
except:
conn.connect(connstr)
dbc = conn()
querystr = "select strm, descr from ps_term_tbl where institution = 'UWOSH' and acad_career = 'UGRD' and term_begin_dt <= sysdate and term_end_dt >= sysdate"
try:
retlist = dbc.query (querystr)
except:
# try the query a second time since it can fail to connect the first time
conn.connect(connstr)
dbc = conn()
retlist = dbc.query (querystr)
if len(retlist) == 2:
if len(retlist[1]) == 1:
if len(retlist[1][0]) == 2:
current_semester = retlist[1][0][0]
current_semester_descr = retlist[1][0][1]
if current_semester:
# now grab the next semester's code, knowing the current semester code
querystr2 = "select t1.strm, t1.descr from ps_term_tbl t1 where t1.institution = 'UWOSH' and t1.acad_career = 'UGRD' and t1.strm = (select min(strm) from ps_term_tbl t2 where t2.institution = t1.institution and t2.acad_career = t1.acad_career and t2.strm > '%s')" % current_semester
else:
# grab the next semester code, a bit differently from above because we are not currently in a semester
querystr2 = "select t1.strm, t1.descr from ps_term_tbl t1 where t1.institution = 'UWOSH' and t1.acad_career = 'UGRD' and t1.term_begin_dt = (select min(term_begin_dt) from ps_term_tbl t2 where t2.institution = t1.institution and t2.acad_career = t1.acad_career and term_begin_dt > sysdate)"
try:
retlist = dbc.query (querystr2)
except:
# try the query a second time since it can fail to connect the first time
conn.connect(connstr)
dbc = conn()
retlist = dbc.query (querystr2)
if len(retlist) == 2:
if len(retlist[1]) == 1:
if len(retlist[1][0]) == 2:
next_semester = retlist[1][0][0]
next_semester_descr = retlist[1][0][1]
myMarshaller = xmlrpclib.Marshaller()
if current_semester:
# return array of both semester data
return myMarshaller.dumps([(current_semester, current_semester_descr), (next_semester, next_semester_descr),])
#return([(current_semester, current_semester_descr), (next_semester, next_semester_descr),])
else:
if next_semester:
# return array of just next semester data
return myMarshaller.dumps([(next_semester, next_semester_descr),])
#return([(next_semester, next_semester_descr),])
else:
return "error: unable to determine the next semester code"
| gpl-2.0 | 1,523,897,577,284,683,300 | 45.076923 | 302 | 0.605175 | false | 3.108997 | false | false | false |
rgreinho/docker-django-cookiecutter | {{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/settings/base.py | 1 | 4751 | import os
import sys
from urllib.parse import urlparse
import dj_database_url
# PATH vars
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def root(x):
return os.path.join(BASE_DIR, x)
# Insert the apps dir at the top of your path.
sys.path.insert(0, root('apps'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# Allow all host headers
# SECURITY WARNING: don't run with this setting in production!
ALLOWED_HOSTS = ['*']
# Django applications.
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
]
# 3rd party apps.
THIRD_PARTY_APPS = []
# Project applications.
PROJECT_APPS = [
'{{ cookiecutter.django_app }}',
]
# Installed apps is a combination of all the apps.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + PROJECT_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ cookiecutter.project_name }}.urls'
# Define the site admins.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (('{{ cookiecutter.author }}', '{{ cookiecutter.author_email }}'), )
# Define site managers.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# Python dotted path to the WSGI application used by Django's runserver.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '{{ cookiecutter.project_name }}.wsgi.application'
# Internationalization.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = root('static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# Additional locations of static files.
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (root('assets'), )
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Templates configuration.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
root('templates'),
],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
}]
# Password validation.
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Database configuration.
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Update database configuration with ${DATABASE_URL}.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Cache configuration.
if os.environ.get('REDIS_URL'):
redis_url = urlparse(os.environ.get('REDIS_URL'))
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "{0}:{1}".format(redis_url.hostname, redis_url.port),
"OPTIONS": {
"PASSWORD": redis_url.password,
"DB": 0,
}
}
}
| mit | 7,790,231,306,529,477,000 | 29.455128 | 98 | 0.687434 | false | 3.532342 | false | false | false |
LedgerHQ/blue-loader-python | ledgerblue/loadMCU.py | 1 | 2294 | """
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
import argparse
def auto_int(x):
return int(x, 0)
def get_argparser():
parser = argparse.ArgumentParser(description="""Load the firmware onto the MCU. The MCU must already be in
bootloader mode.""")
parser.add_argument("--targetId", help="The device's target ID", type=auto_int)
parser.add_argument("--fileName", help="The name of the firmware file to load")
parser.add_argument("--bootAddr", help="The firmware's boot address", type=auto_int)
parser.add_argument("--apdu", help="Display APDU log", action='store_true')
parser.add_argument("--reverse", help="Load HEX file in reverse from the highest address to the lowest", action='store_true')
parser.add_argument("--nocrc", help="Load HEX file without checking CRC of loaded sections", action='store_true')
return parser
if __name__ == '__main__':
from .hexParser import IntelHexParser
from .hexLoader import HexLoader
from .comm import getDongle
args = get_argparser().parse_args()
if args.targetId == None:
raise Exception("Missing targetId")
if args.fileName == None:
raise Exception("Missing fileName")
parser = IntelHexParser(args.fileName)
if args.bootAddr == None:
args.bootAddr = parser.getBootAddr()
dongle = getDongle(args.apdu)
#relative load
loader = HexLoader(dongle, 0xe0, False, None, False)
loader.validateTargetId(args.targetId)
hash = loader.load(0xFF, 0xF0, parser, reverse=args.reverse, doCRC=(not args.nocrc))
loader.run(args.bootAddr)
| apache-2.0 | -4,812,842,145,532,295,000 | 37.25 | 129 | 0.653008 | false | 3.941581 | false | false | false |
sbmlteam/deviser | generator/code_files/CppCodeFile.py | 1 | 37362 | #!/usr/bin/env python
#
# @file CppCodeFile.py
# @brief class for generating code file for the given class
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from ..base_files import BaseCppFile
from . cpp_functions import *
from ..util import query, strFunctions, global_variables
class CppCodeFile(BaseCppFile.BaseCppFile):
"""Class for all Cpp Code files"""
def __init__(self, class_object, represents_class=True):
self.brief_description = \
'Implementation of the {0} class.'.format(class_object['name'])
BaseCppFile.BaseCppFile.__init__(self, class_object['name'], 'cpp',
class_object['attribs'])
# members from object
if represents_class:
self.expand_class(class_object)
self.lv_info = []
if 'root' in class_object and 'lv_info' in class_object['root']:
self.lv_info = class_object['root']['lv_info']
########################################################################
# Functions for writing the class
def write_class(self):
# self.write_forward_class()
self.write_constructors()
self.write_attribute_functions()
self.write_child_element_functions()
self.write_listof_functions()
self.write_child_lo_element_functions()
self.write_concrete_functions()
self.write_general_functions()
self.write_generic_attribute_functions()
self.write_functions_to_retrieve()
if self.document:
self.write_document_error_log_functions()
self.write_protected_functions()
if self.add_impl is not None and not self.is_list_of:
self.copy_additional_file(self.add_impl)
def write_c_code(self):
self.is_cpp_api = False
if self.is_plugin:
self.write_child_lo_element_functions()
self.write_attribute_functions()
self.write_child_element_functions()
elif not self.is_list_of:
self.write_constructors()
self.write_attribute_functions()
self.write_child_element_functions()
self.write_child_lo_element_functions()
self.write_concrete_functions()
self.write_general_functions()
else:
self.write_attribute_functions()
self.write_listof_functions()
########################################################################
# Functions for writing specific includes and forward declarations
def write_forward_class(self):
if len(self.concretes) == 0:
return
for element in self.concretes:
self.write_line('class {0};'.format(element['element']))
self.skip_line()
def write_general_includes(self):
lo_name = ''
if self.has_parent_list_of:
if 'lo_class_name' in self.class_object:
lo_name = self.class_object['lo_class_name']
if len(lo_name) == 0:
lo_name = strFunctions.list_of_name(self.class_name)
if global_variables.is_package:
folder = self.language if not self.is_plugin else 'extension'
self.write_line_verbatim('#include <{0}/packages/{1}/{2}/{3}'
'.h>'.format(self.language,
self.package.lower(),
folder, self.class_name))
if self.has_parent_list_of and not self.is_list_of:
self.write_line_verbatim('#include <{0}/packages/{1}/{0}/'
'{2}'
'.h>'.format(self.language,
self.package.lower(),
lo_name))
self.write_line_verbatim('#include <{0}/packages/{1}/validator/'
'{2}{3}Error'
'.h>'.format(self.language,
self.package.lower(),
self.package,
self.cap_language))
else:
self.write_line_verbatim('#include <{0}/{1}'
'.h>'.format(self.language,
self.class_name))
if self.has_parent_list_of and not self.is_list_of:
self.write_line_verbatim('#include <{0}/{1}'
'.h>'.format(self.language,
lo_name))
self.write_line_verbatim('#include <sbml/xml/XMLInputStream.h>')
# determine whether we need to write other headers
write_element_filter = False
concrete_classes = []
write_model = False
write_validators = False
write_math = False
if len(self.child_lo_elements) > 0 and global_variables.is_package:
write_element_filter = True
elif global_variables.is_package:
# for element in self.child_elements:
# if 'concrete' in element:
# write_element_filter = True
if self.num_children > 0 and self.num_children != self.num_non_std_children:
write_element_filter = True
if self.is_plugin and not self.is_doc_plugin \
and self.language == 'sbml':
write_model = True
if self.is_doc_plugin:
write_validators = True
if self.has_math:
write_math = True
for lo in self.child_lo_elements:
if 'concrete' in lo:
child_concretes = query.get_concretes(lo['root'],
lo['concrete'])
for j in range(0, len(child_concretes)):
element = child_concretes[j]['element']
if element not in concrete_classes:
concrete_classes.append(element)
for i in range(0, len(self.concretes)):
element = self.concretes[i]['element']
if element not in concrete_classes:
concrete_classes.append(element)
for child in self.child_elements:
if 'concrete' in child:
child_concretes = query.get_concretes(child['root'],
child['concrete'])
for j in range(0, len(child_concretes)):
element = child_concretes[j]['element']
if element not in concrete_classes:
concrete_classes.append(element)
if write_element_filter:
self.write_line_verbatim('#include <{0}/util/ElementFilter.'
'h>'.format(self.language))
if write_model:
self.write_line_verbatim('#include <{0}/Model'
'.h>'.format(self.language))
if write_validators:
self.write_line_verbatim('#include <{0}/packages/{1}/validator/{2}'
'ConsistencyValidator'
'.h>'.format(self.language,
self.package.lower(),
self.package))
self.write_line_verbatim('#include <{0}/packages/{1}/validator/{2}'
'IdentifierConsistencyValidator.'
'h>'.format(self.language,
self.package.lower(),
self.package))
if write_math:
self.write_line_verbatim('#include <sbml/math/MathML.h>')
if len(concrete_classes) > 0:
self.skip_line()
for element in concrete_classes:
if global_variables.is_package:
self.write_line_verbatim('#include <{0}/packages/{1}/{0}/{2}'
'.h>'.format(self.language,
self.package.lower(),
element))
else:
self.write_line_verbatim('#include <{0}/{1}.h>'
''.format(self.language, element))
self.skip_line(2)
self.write_line('using namespace std;')
self.skip_line()
########################################################################
# function to write the data members
def write_data_members(self, attributes):
for i in range(0, len(attributes)):
if attributes[i]['attType'] != 'string':
self.write_line('{0} {1};'.format(attributes[i]['attTypeCode'],
attributes[i]['memberName']))
else:
self.write_line('std::string {0};'
.format(attributes[i]['memberName']))
if attributes[i]['isNumber'] is True \
or attributes[i]['attType'] == 'boolean':
self.write_line('bool mIsSet{0};'
.format(attributes[i]['capAttName']))
if self.overwrites_children:
self.write_line('std::string mElementName;')
########################################################################
# function to write the constructors
def write_constructors(self):
constructor = Constructors.Constructors(self.language,
self.is_cpp_api,
self.class_object)
if self.is_cpp_api and not self.is_plugin:
code = constructor.write_level_version_constructor()
self.write_function_implementation(code)
code = constructor.write_namespace_constructor()
self.write_function_implementation(code)
elif self.is_plugin:
code = constructor.write_uri_constructor()
self.write_function_implementation(code)
elif self.has_std_base:
for i in range(0, len(self.concretes)+1):
code = constructor.write_level_version_constructor(i)
self.write_function_implementation(code)
else:
code = constructor.write_level_version_constructor(-1)
self.write_function_implementation(code)
code = constructor.write_copy_constructor()
self.write_function_implementation(code)
code = constructor.write_assignment_operator()
self.write_function_implementation(code)
code = constructor.write_clone()
self.write_function_implementation(code)
code = constructor.write_destructor()
self.write_function_implementation(code)
########################################################################
# Functions for writing the attribute manipulation functions
# these are for attributes and elements that occur as a single child
# function to write the get/set/isSet/unset functions for attributes
def write_attribute_functions(self):
attrib_functions = SetGetFunctions.SetGetFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object,
self.lv_info)
num_attributes = len(self.class_attributes)
for i in range(0, num_attributes):
code = attrib_functions.write_get(True, i)
self.write_function_implementation(code)
code = attrib_functions.write_get_string_for_enum(True, i)
self.write_function_implementation(code)
for i in range(0, num_attributes):
code = attrib_functions.write_is_set(True, i)
self.write_function_implementation(code)
code = attrib_functions.write_get_num_for_vector(True, i)
self.write_function_implementation(code)
for i in range(0, num_attributes):
code = attrib_functions.write_set(True, i)
self.write_function_implementation(code)
code = attrib_functions.write_set_string_for_enum(True, i)
self.write_function_implementation(code)
code = attrib_functions.write_add_element_for_vector(True, i)
self.write_function_implementation(code)
for i in range(0, num_attributes):
code = attrib_functions.write_unset(True, i)
self.write_function_implementation(code)
# function to write the get/set/isSet/unset functions for single
# child elements
def write_child_element_functions(self, override=None):
if override is None:
if not self.has_children:
return
attrib_functions = SetGetFunctions.\
SetGetFunctions(self.language, self.is_cpp_api,
self.is_list_of, self.class_object, self.lv_info, False, [], True)
num_elements = len(self.child_elements)
else:
attrib_functions = SetGetFunctions.SetGetFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
override)
num_elements = 1
for i in range(0, num_elements):
code = attrib_functions.write_get(False, i)
self.write_function_implementation(code)
code = attrib_functions.write_get(False, i, const=False)
self.write_function_implementation(code)
for i in range(0, num_elements):
code = attrib_functions.write_is_set(False, i)
self.write_function_implementation(code)
for i in range(0, num_elements):
code = attrib_functions.write_set(False, i)
self.write_function_implementation(code)
for i in range(0, num_elements):
code = attrib_functions.write_create(False, i)
if override is None and code is None \
and 'concrete' in self.child_elements[i]:
# need to write creates for the concrete
member = self.child_elements[i]['memberName']
concrete = self.child_elements[i]['concrete']
concretes = query.get_concretes(self.class_object['root'],
concrete)
for j in range(0, len(concretes)):
code = attrib_functions\
.write_create_concrete_child(concretes[j], member)
self.write_function_implementation(code)
else:
self.write_function_implementation(code)
for i in range(0, num_elements):
code = attrib_functions.write_unset(False, i)
self.write_function_implementation(code)
########################################################################
# Functions for writing the generic attribute manipulation functions
# these are for attributes and elements that occur as a single child
# function to write the get/set/isSet/unset functions for attributes
def write_generic_attribute_functions(self):
attrib_functions = GenericAttributeFunctions.GenericAttributeFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object)
attributes = query.sort_attributes(self.class_attributes)
bool_atts = attributes['bool_atts']
int_atts = attributes['int_atts']
double_atts = attributes['double_atts']
uint_atts = attributes['uint_atts']
string_atts = attributes['string_atts']
code = attrib_functions.write_get(bool_atts, 'bool')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get(int_atts, 'int')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get(double_atts, 'double')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get(uint_atts, 'unsigned int')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get(string_atts, 'std::string')
self.write_function_implementation(code, exclude=True)
# code = attrib_functions.write_get(string_atts, 'const char*')
# self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_is_set(query.get_unique_attributes(self.class_attributes))
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_set(bool_atts, 'bool')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_set(int_atts, 'int')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_set(double_atts, 'double')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_set(uint_atts, 'unsigned int')
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_set(string_atts, 'const std::string&')
self.write_function_implementation(code, exclude=True)
# code = attrib_functions.write_set(string_atts, 'const char*')
# self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_unset(query.get_unique_attributes(self.class_attributes))
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_create_object()
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_add_object()
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_remove_object()
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get_num_objects()
self.write_function_implementation(code, exclude=True)
code = attrib_functions.write_get_object()
self.write_function_implementation(code, exclude=True)
########################################################################
# Functions for writing general functions
def write_general_functions(self):
gen_functions = GeneralFunctions.GeneralFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object,
self.lv_info)
code = gen_functions.write_rename_sidrefs()
self.write_function_implementation(code)
if not self.is_plugin:
code = gen_functions.write_get_element_name()
self.write_function_implementation(code)
code = gen_functions.write_set_element_name()
self.write_function_implementation(code, exclude=True)
if not self.is_plugin:
code = gen_functions.write_get_typecode()
self.write_function_implementation(code)
code = gen_functions.write_get_item_typecode()
self.write_function_implementation(code)
code = gen_functions.write_has_required_attributes()
self.write_function_implementation(code)
code = gen_functions.write_has_required_elements()
self.write_function_implementation(code)
code = gen_functions.write_write_elements()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_accept()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_set_document()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_write()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_connect_to_child()
self.write_function_implementation(code, exclude=True)
if self.is_plugin:
code = gen_functions.write_connect_to_parent()
self.write_function_implementation(code, exclude=True)
if global_variables.is_package:
code = gen_functions.write_enable_package()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_update_ns()
self.write_function_implementation(code, exclude=True)
if self.is_doc_plugin:
code = gen_functions.write_is_comp_flat()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_check_consistency()
self.write_function_implementation(code, exclude=True)
code = gen_functions.write_read_attributes()
self.write_function_implementation(code, exclude=True)
########################################################################
# Retrieve element functions
def write_functions_to_retrieve(self):
if not query.has_child_elements(self.attributes):
return
gen_functions = \
GlobalQueryFunctions.GlobalQueryFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object)
code = gen_functions.write_get_by_sid()
self.write_function_implementation(code)
code = gen_functions.write_get_by_metaid()
self.write_function_implementation(code)
code = gen_functions.write_get_all_elements()
self.write_function_implementation(code)
if self.is_plugin:
code = gen_functions.write_append_from()
self.write_function_implementation(code, True)
########################################################################
# Functions for writing the attribute manipulation functions
# these are for attributes and elements that occur as a single child
# function to write additional functions on a document for another library
def write_document_error_log_functions(self):
attrib_functions = SetGetFunctions.\
SetGetFunctions(self.language, self.is_cpp_api,
self.is_list_of, self.class_object)
num_elements = len(self.child_elements)
# add error log and ns to child elements
att_tc = 'XMLNamespaces*'
if not global_variables.is_package:
att_tc = 'LIBSBML_CPP_NAMESPACE_QUALIFIER XMLNamespaces*'
element = dict({'name': 'Namespaces',
'isArray': False,
'attTypeCode': att_tc,
'capAttName': 'Namespaces',
'attType': 'element',
'memberName': 'm{0}Namespaces'.format(global_variables.prefix)})
errelement = dict({'name': '{0}ErrorLog'.format(global_variables.prefix),
'isArray': False,
'attTypeCode': '{0}ErrorLog*'.format(global_variables.prefix),
'capAttName': 'ErrorLog',
'attType': 'element',
'memberName': 'mErrorLog'})
self.child_elements.append(element)
self.child_elements.append(errelement)
code = attrib_functions.write_get(False, num_elements, True, True)
self.write_function_implementation(code)
code = attrib_functions.write_get(False, num_elements, False, True)
self.write_function_implementation(code)
code = attrib_functions.write_get(False, num_elements+1, True)
self.write_function_implementation(code)
code = attrib_functions.write_get(False, num_elements+1, False)
self.write_function_implementation(code)
self.child_elements.remove(errelement)
self.child_elements.remove(element)
# preserve existing values
existing = dict()
self.class_object['element'] = '{0}Error'.format(global_variables.prefix)
self.class_object['parent'] = dict({'name': '{0}'.format(global_variables.document_class)})
self.class_object['memberName'] = 'mErrorLog'
lo_functions = ListOfQueryFunctions\
.ListOfQueryFunctions(self.language, self.is_cpp_api,
self.is_list_of,
self.class_object)
code = lo_functions.write_get_element_by_index(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_index(is_const=True)
self.write_function_implementation(code)
code = lo_functions.write_get_num_element_function()
self.write_function_implementation(code)
parameter = dict({'name': 'severity',
'type': 'unsigned int'})
code = lo_functions.write_get_num_element_function(parameter)
self.write_function_implementation(code)
########################################################################
# concrete class functions
def write_concrete_functions(self):
conc_functions = \
ConcreteClassFunctions.ConcreteClassFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object)
for i in range(0, len(self.concretes)):
code = conc_functions.write_is_foo(i)
self.write_function_implementation(code)
########################################################################
# Protected functions
def write_protected_functions(self):
protect_functions = \
ProtectedFunctions.ProtectedFunctions(self.language,
self.is_cpp_api,
self.is_list_of,
self.class_object,
self.lv_info)
exclude = True
code = protect_functions.write_create_object()
self.write_function_implementation(code, exclude)
code = protect_functions.write_add_expected_attributes()
self.write_function_implementation(code, exclude)
code = protect_functions.write_read_attributes()
self.write_function_implementation(code, exclude)
if 'num_versions' in self.class_object \
and self.class_object['num_versions'] > 1:
for i in range(0, self.class_object['num_versions']):
code = protect_functions.write_read_version_attributes(i)
self.write_function_implementation(code, exclude)
code = protect_functions.write_read_other_xml()
self.write_function_implementation(code, exclude)
code = protect_functions.write_write_attributes()
self.write_function_implementation(code, exclude)
if 'num_versions' in self.class_object \
and self.class_object['num_versions'] > 1:
for i in range(0, self.class_object['num_versions']):
code = protect_functions.write_write_version_attributes(i)
self.write_function_implementation(code, exclude)
code = protect_functions.write_write_xmlns()
self.write_function_implementation(code, exclude)
code = protect_functions.write_is_valid_type_for_list()
self.write_function_implementation(code, exclude)
code = protect_functions.write_set_element_text()
self.write_function_implementation(code, exclude)
########################################################################
# Functions for writing functions for the main ListOf class
def write_listof_functions(self):
if not self.is_list_of:
return
lo_functions = ListOfQueryFunctions\
.ListOfQueryFunctions(self.language, self.is_cpp_api,
self.is_list_of,
self.class_object)
code = lo_functions.write_get_element_by_index(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_index(is_const=True)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_id(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_id(is_const=True)
self.write_function_implementation(code)
code = lo_functions.write_remove_element_by_index()
self.write_function_implementation(code)
code = lo_functions.write_remove_element_by_id()
self.write_function_implementation(code)
if self.is_cpp_api:
code = lo_functions.write_add_element_function()
self.write_function_implementation(code)
code = lo_functions.write_get_num_element_function()
self.write_function_implementation(code)
for i in range(0, len(self.concretes)+1):
code = lo_functions.write_create_element_function(i)
self.write_function_implementation(code)
for i in range(0, len(self.sid_refs)):
code = lo_functions.write_lookup(self.sid_refs[i])
self.write_function_verbatim(code)
code = \
lo_functions.write_get_element_by_sidref(self.sid_refs[i],
const=True)
self.write_function_implementation(code)
code = \
lo_functions.write_get_element_by_sidref(self.sid_refs[i],
const=False)
self.write_function_implementation(code)
# main function to write the functions dealing with a child listOf element
def write_child_lo_element_functions(self):
num_elements = len(self.child_lo_elements)
for i in range(0, num_elements):
element = self.child_lo_elements[i]
element['std_base'] = self.std_base
element['package'] = self.package
element['is_header'] = self.is_header
element['is_plugin'] = self.is_plugin
if self.is_plugin:
element['plugin'] = self.class_name
if 'concrete' in element:
element['concretes'] = query.get_concretes(
self.class_object['root'], element['concrete'])
lo_functions = ListOfQueryFunctions\
.ListOfQueryFunctions(self.language, self.is_cpp_api,
self.is_list_of,
element)
code = lo_functions.write_get_list_of_function(is_const=True)
self.write_function_implementation(code)
code = lo_functions.write_get_list_of_function(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_index(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_index(is_const=True)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_id(is_const=False)
self.write_function_implementation(code)
code = lo_functions.write_get_element_by_id(is_const=True)
self.write_function_implementation(code)
sid_ref = query.get_sid_refs_for_class(element)
for j in range(0, len(sid_ref)):
if self.is_list_of:
code = lo_functions.write_lookup(sid_ref[j])
self.write_function_verbatim(code)
code = \
lo_functions.write_get_element_by_sidref(sid_ref[j],
const=True)
self.write_function_implementation(code)
code = \
lo_functions.write_get_element_by_sidref(sid_ref[j],
const=False)
self.write_function_implementation(code)
code = lo_functions.write_add_element_function()
self.write_function_implementation(code)
code = lo_functions.write_get_num_element_function()
self.write_function_implementation(code)
if 'concretes' in element:
for n in range(0, len(element['concretes'])):
code = lo_functions.write_create_element_function(n+1)
self.write_function_implementation(code)
else:
code = lo_functions.write_create_element_function()
self.write_function_implementation(code)
code = lo_functions.write_remove_element_by_index()
self.write_function_implementation(code)
code = lo_functions.write_remove_element_by_id()
self.write_function_implementation(code)
# this tackles the situation where a listOfFoo class also
# contains an element of another type
# eg qual:ListOfFunctionTerms contains a DefaultTerm
if not self.is_plugin:
element_children = \
query.get_other_element_children(self.class_object, element)
for j in range(0, len(element_children)):
child_class = self.create_lo_other_child_element_class(
element_children[0], self.class_name)
self.write_child_element_functions(child_class)
########################################################################
# Functions for writing definition declaration
def write_defn_begin(self):
self.skip_line(2)
self.write_line('#ifndef {0}_H__'.format(self.name))
self.write_line('#define {0}_H__'.format(self.name))
self.skip_line(2)
def write_defn_end(self):
self.skip_line(2)
self.write_line('#endif /* !{0}_H__ */'.format(self.name))
self.skip_line(2)
########################################################################
# Write file
def write_file(self):
BaseCppFile.BaseCppFile.write_file(self)
self.write_general_includes()
self.write_cppns_begin()
self.write_cpp_begin()
self.write_class()
self.write_cpp_end()
self.write_c_code()
self.write_cppns_end()
| lgpl-2.1 | -6,839,600,027,321,510,000 | 43.163121 | 99 | 0.549435 | false | 4.543045 | false | false | false |
howknows/Ropper | ropperapp/disasm/chain/ropchain.py | 1 | 1655 | # coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ropperapp.common.abstract import *
from ropperapp.common.error import *
class RopChain(Abstract):
def __init__(self, binaries, printer):
self._binaries = binaries
self._usedBinaries = []
self._printer = printer
@abstractmethod
def create(self):
pass
@classmethod
def name(cls):
return None
@classmethod
def availableGenerators(cls):
return []
@classmethod
def archs(self):
return []
@classmethod
def get(cls, binaries, name, printer):
for subclass in cls.__subclasses__():
if binaries[0].arch in subclass.archs():
gens = subclass.availableGenerators()
for gen in gens:
if gen.name() == name:
return gen(binaries, printer)
raise RopChainError('generator %s is for arch %s not available' % (name, binaries[0].arch.__class__.__name__))
| gpl-2.0 | 5,436,525,872,334,288,000 | 30.226415 | 118 | 0.653776 | false | 4.1375 | false | false | false |
apaleyes/mxnet | tools/ipynb2md.py | 3 | 1485 | #!/usr/bin/env python
"""
Convert jupyter notebook into the markdown format. The notebook outputs will be
removed.
It is heavily adapted from https://gist.github.com/decabyte/0ed87372774cf5d34d7e
"""
import sys
import io
import os
import argparse
import nbformat
def remove_outputs(nb):
"""Removes the outputs cells for a jupyter notebook."""
for cell in nb.cells:
if cell.cell_type == 'code':
cell.outputs = []
def clear_notebook(old_ipynb, new_ipynb):
with io.open(old_ipynb, 'r') as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
remove_outputs(nb)
with io.open(new_ipynb, 'w', encoding='utf8') as f:
nbformat.write(nb, f, nbformat.NO_CONVERT)
def main():
parser = argparse.ArgumentParser(
description="Jupyter Notebooks to markdown"
)
parser.add_argument("notebook", nargs=1, help="The notebook to be converted.")
parser.add_argument("-o", "--output", help="output markdown file")
args = parser.parse_args()
old_ipynb = args.notebook[0]
new_ipynb = 'tmp.ipynb'
md_file = args.output
print md_file
if not md_file:
md_file = os.path.splitext(old_ipynb)[0] + '.md'
clear_notebook(old_ipynb, new_ipynb)
os.system('jupyter nbconvert ' + new_ipynb + ' --to markdown --output ' + md_file)
with open(md_file, 'a') as f:
f.write('<!-- INSERT SOURCE DOWNLOAD BUTTONS -->')
os.system('rm ' + new_ipynb)
if __name__ == '__main__':
main()
| apache-2.0 | 7,133,700,526,051,015,000 | 24.603448 | 86 | 0.639731 | false | 3.09375 | false | false | false |
bikash/h2o-dev | py2/testdir_single_jvm/test_w2v_basic.py | 1 | 8524 | import unittest, time, sys, random, re
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs
from h2o_test import verboseprint, dump_json, OutputObj
import h2o_test
DO_SUMMARY=False
targetList = ['red', 'mail', 'black flag', 5, 1981, 'central park',
'good', 'liquor store rooftoop', 'facebook']
lol = [
['red','orange','yellow','green','blue','indigo','violet'],
['male','female',''],
['bad brains','social distortion','the misfits','black flag',
'iggy and the stooges','the dead kennedys',
'the sex pistols','the ramones','the clash','green day'],
range(1,10),
range(1980,2013),
['central park','marin civic center','madison square garden',
'wembley arena','greenwich village',
'liquor store rooftop','"woodstock, n.y."','shea stadium'],
['good','bad'],
['expensive','cheap','free'],
['yes','no'],
['facebook','twitter','blog',''],
range(8,100),
[random.random() for i in range(20)]
]
whitespaceRegex = re.compile(r"""
^\s*$ # begin, white space or empty space, end
""", re.VERBOSE)
DO_TEN_INTEGERS = False
def random_enum(n):
# pick randomly from a list pointed at by N
if DO_TEN_INTEGERS:
# ten choices
return str(random.randint(0,9))
else:
choiceList = lol[n]
r = str(random.choice(choiceList))
if r in targetList:
t = 1
else:
t = 0
# need more randomness to get enums to be strings
r2 = random.randint(0, 10000)
return (t,"%s_%s" % (r, r2))
def write_syn_dataset(csvPathname, rowCount, colCount=1, SEED='12345678',
colSepChar=",", rowSepChar="\n"):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for row in range(rowCount):
# doesn't guarantee that 10000 rows have 10000 unique enums in a column
# essentially sampling with replacement
rowData = []
lenLol = len(lol)
targetSum = 0
for col in range(colCount):
(t,ri) = random_enum(col % lenLol)
targetSum += t # sum up contributions to output choice
# print ri
# first two rows can't tolerate single/double quote randomly
# keep trying until you get one with no single or double quote in the line
if row < 2:
while True:
# can't have solely white space cols either in the first two rows
if "'" in ri or '"' in ri or whitespaceRegex.match(ri):
(t,ri) = random_enum(col % lenLol)
else:
break
rowData.append(ri)
# output column
avg = (targetSum+0.0)/colCount
# ri = r1.randint(0,1)
rowData.append(targetSum)
# use the new Hive separator
rowDataCsv = colSepChar.join(map(str,rowData)) + rowSepChar
### sys.stdout.write(rowDataCsv)
dsf.write(rowDataCsv)
dsf.close()
#*******************************
def create_file_with_seps(rowCount, colCount):
# can randomly pick the row and col cases.
### colSepCase = random.randint(0,1)
colSepCase = 1
# using the comma is nice to ensure no craziness
if (colSepCase==0):
colSepHexString = '01'
else:
colSepHexString = '2c' # comma
colSepChar = colSepHexString.decode('hex')
colSepInt = int(colSepHexString, base=16)
print "colSepChar:", colSepChar
print "colSepInt", colSepInt
rowSepCase = random.randint(0,1)
# using this instead, makes the file, 'row-readable' in an editor
if (rowSepCase==0):
rowSepHexString = '0a' # newline
else:
rowSepHexString = '0d0a' # cr + newline (windows) \r\n
rowSepChar = rowSepHexString.decode('hex')
print "rowSepChar:", rowSepChar
SEEDPERFILE = random.randint(0, sys.maxint)
if DO_TEN_INTEGERS:
csvFilename = 'syn_int10_' + str(rowCount) + 'x' + str(colCount) + '.csv'
else:
csvFilename = 'syn_enums_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE,
colSepChar=colSepChar, rowSepChar=rowSepChar)
return csvPathname
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1, java_heap_GB=12)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_w2v_basic(self):
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
n = 500000
tryList = [
(n, 1, 'cD', 300),
(n, 2, 'cE', 300),
(n, 3, 'cF', 300),
(n, 4, 'cG', 300),
(n, 5, 'cH', 300),
(n, 6, 'cI', 300),
(n, 7, 'cJ', 300),
(n, 9, 'cK', 300),
]
### h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
csvPathname = create_file_with_seps(rowCount, colCount)
# just parse to make sure it's good
parseResult = h2i.import_parse(path=csvPathname,
check_header=1, delete_on_done = 0, timeoutSecs=180, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
cA = h2o_test.OutputObj(iA.columns[0], "inspect_column")
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
for i in range(colCount):
print cA.type, cA.missing_count
self.assertEqual(0, cA.missing_count, "Column %s Expected %s. missing: %s is incorrect" % (i, 0, cA.missing_count))
self.assertEqual('string', cA.type, "Column %s Expected %s. type: %s is incorrect" % (i, 0, cA.type))
if DO_SUMMARY:
for i in range(colCount):
co = h2o_cmd.runSummary(key=parse_key, column=i)
print co.label, co.type, co.missing, co.domain, sum(co.bins)
self.assertEqual(0, co.missing_count, "Column %s Expected %s. missing: %s is incorrect" % (i, 0, co.missing_count))
self.assertEqual('String', co.type, "Column %s Expected %s. type: %s is incorrect" % (i, 0, co.type))
# no cols ignored
labelListUsed = list(labelList)
numColsUsed = numCols
for trial in range(1):
parameters = {
'validation_frame': parse_key, # KeyIndexed False []
'ignored_columns': None, # string[] None []
'minWordFreq': 5, # int 5 []
'wordModel': 'SkipGram', # enum [u'CBOW', u'SkipGram']
'normModel': 'HSM', # enum # [u'HSM', u'NegSampling']
'negSampleCnt': 5,# int 5 []
'vecSize': 100, # int 100
'windowSize': 5, # int 5
'sentSampleRate': 0.001, # float 0.001
'initLearningRate': 0.05, # float 0.05
'epochs': 1, # int 5
}
model_key = 'benign_w2v.hex'
bmResult = h2o.n0.build_model(
algo='word2vec',
destination_key=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=60)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
cmmResult = h2o.n0.compute_model_metrics( model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult['model_metrics'][0], 'mm')
# not implemented?
# prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
# pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -6,097,692,025,574,265,000 | 34.966245 | 135 | 0.547396 | false | 3.45241 | true | false | false |
semkiv/heppy_fcc | fastsim/detectors/perfect.py | 1 | 2316 | from detector import Detector, DetectorElement
import material
from geometry import VolumeCylinder
import math
class ECAL(DetectorElement):
def __init__(self):
volume = VolumeCylinder('ecal', 1.55, 2.25, 1.30, 2. )
mat = material.Material('ECAL', 8.9e-3, 0.) # lambda_I = 0
self.eta_crack = 1.5
self.emin = 2.
super(ECAL, self).__init__('ecal', volume, mat)
def energy_resolution(self, energy, theta=0.):
return 0.
def cluster_size(self, ptc):
pdgid = abs(ptc.pdgid)
if pdgid==22 or pdgid==11:
return 0.04
else:
return 0.07
def acceptance(self, cluster):
return True
def space_resolution(self, ptc):
pass
class HCAL(DetectorElement):
def __init__(self):
volume = VolumeCylinder('hcal', 2.9, 3.6, 1.9, 2.6 )
mat = material.Material('HCAL', None, 0.17)
super(HCAL, self).__init__('ecal', volume, mat)
def energy_resolution(self, energy, theta=0.):
return 0.
def cluster_size(self, ptc):
return 0.2
def acceptance(self, cluster):
return True
def space_resolution(self, ptc):
pass
class Tracker(DetectorElement):
#TODO acceptance and resolution depend on the particle type
def __init__(self):
volume = VolumeCylinder('tracker', 1.29, 1.99)
mat = material.void
super(Tracker, self).__init__('tracker', volume, mat)
def acceptance(self, track):
return True
def pt_resolution(self, track):
return 0.
class Field(DetectorElement):
def __init__(self, magnitude):
self.magnitude = magnitude
volume = VolumeCylinder('field', 2.9, 3.6)
mat = material.void
super(Field, self).__init__('tracker', volume, mat)
class Perfect(Detector):
'''A detector with the geometry of CMS and the same cluster size,
but without smearing, and with full acceptance (no thresholds).
Used for testing purposes.
'''
def __init__(self):
super(Perfect, self).__init__()
self.elements['tracker'] = Tracker()
self.elements['ecal'] = ECAL()
self.elements['hcal'] = HCAL()
self.elements['field'] = Field(3.8)
perfect = Perfect()
| gpl-3.0 | 3,510,170,150,373,318,000 | 24.733333 | 70 | 0.585492 | false | 3.664557 | false | false | false |
mjalas/pyprojectgen | src/license_generator.py | 2 | 1133 | """License file generator module."""
from pathlib import Path
from src.license_templates import MIT_LICENSE_TEMPLATE
from src.license_templates import GPL3_LICENSE_TEMPLATE
from src.license_templates import APACHE_LICENSE_TEMPLATE
class LicenseGenerator(object):
"""License file generator class."""
@staticmethod
def generate(file_path, license_metadata):
"""Generates the LICENSE file for the project."""
content = None
if license_metadata['file'] == 'MIT':
content = MIT_LICENSE_TEMPLATE
if 'year' in license_metadata:
content = content.replace('<year>', license_metadata['year'])
if 'owner' in license_metadata:
content = content.replace('<owner>', license_metadata['owner'])
elif license_metadata['file'] == 'GPL3':
content = GPL3_LICENSE_TEMPLATE
elif license_metadata['file'] == 'Apache':
content = APACHE_LICENSE_TEMPLATE
if content:
with open(file_path, 'w') as stream:
stream.write(content)
else:
Path(file_path).touch()
| gpl-3.0 | -709,988,112,307,069,600 | 38.068966 | 79 | 0.623124 | false | 4.550201 | false | false | false |
lucky/autumn | autumn/tests/run.py | 2 | 4507 | #!/usr/bin/env python
import unittest
import datetime
from autumn.model import Model
from autumn.tests.models import Book, Author
from autumn.db.query import Query
from autumn.db import escape
from autumn import validators
class TestModels(unittest.TestCase):
def testmodel(self):
# Create tables
### MYSQL ###
#
# DROP TABLE IF EXISTS author;
# CREATE TABLE author (
# id INT(11) NOT NULL auto_increment,
# first_name VARCHAR(40) NOT NULL,
# last_name VARCHAR(40) NOT NULL,
# bio TEXT,
# PRIMARY KEY (id)
# );
# DROP TABLE IF EXISTS books;
# CREATE TABLE books (
# id INT(11) NOT NULL auto_increment,
# title VARCHAR(255),
# author_id INT(11),
# FOREIGN KEY (author_id) REFERENCES author(id),
# PRIMARY KEY (id)
# );
### SQLITE ###
#
# DROP TABLE IF EXISTS author;
# DROP TABLE IF EXISTS books;
# CREATE TABLE author (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# first_name VARCHAR(40) NOT NULL,
# last_name VARCHAR(40) NOT NULL,
# bio TEXT
# );
# CREATE TABLE books (
# id INTEGER PRIMARY KEY AUTOINCREMENT,
# title VARCHAR(255),
# author_id INT(11),
# FOREIGN KEY (author_id) REFERENCES author(id)
# );
for table in ('author', 'books'):
Query.raw_sql('DELETE FROM %s' % escape(table))
# Test Creation
james = Author(first_name='James', last_name='Joyce')
james.save()
kurt = Author(first_name='Kurt', last_name='Vonnegut')
kurt.save()
tom = Author(first_name='Tom', last_name='Robbins')
tom.save()
Book(title='Ulysses', author_id=james.id).save()
Book(title='Slaughter-House Five', author_id=kurt.id).save()
Book(title='Jitterbug Perfume', author_id=tom.id).save()
slww = Book(title='Still Life with Woodpecker', author_id=tom.id)
slww.save()
# Test ForeignKey
self.assertEqual(slww.author.first_name, 'Tom')
# Test OneToMany
self.assertEqual(len(list(tom.books)), 2)
kid = kurt.id
del(james, kurt, tom, slww)
# Test retrieval
b = Book.get(title='Ulysses')[0]
a = Author.get(id=b.author_id)[0]
self.assertEqual(a.id, b.author_id)
a = Author.get(id=b.id)[:]
self.assert_(isinstance(a, list))
# Test update
new_last_name = 'Vonnegut, Jr.'
a = Author.get(id=kid)[0]
a.last_name = new_last_name
a.save()
a = Author.get(kid)
self.assertEqual(a.last_name, new_last_name)
# Test count
self.assertEqual(Author.get().count(), 3)
self.assertEqual(len(Book.get()[1:4]), 3)
# Test delete
a.delete()
self.assertEqual(Author.get().count(), 2)
# Test validation
a = Author(first_name='', last_name='Ted')
try:
a.save()
raise Exception('Validation not caught')
except Model.ValidationError:
pass
# Test defaults
a.first_name = 'Bill and'
a.save()
self.assertEqual(a.bio, 'No bio available')
try:
Author(first_name='I am a', last_name='BadGuy!').save()
raise Exception('Validation not caught')
except Model.ValidationError:
pass
def testvalidators(self):
ev = validators.Email()
assert ev('[email protected]')
assert not ev('[email protected]')
assert validators.Length()('a')
assert not validators.Length(2)('a')
assert validators.Length(max_length=10)('abcdegf')
assert not validators.Length(max_length=3)('abcdegf')
n = validators.Number(1, 5)
assert n(2)
assert not n(6)
assert validators.Number(1)(100.0)
assert not validators.Number()('rawr!')
vc = validators.ValidatorChain(validators.Length(8), validators.Email())
assert vc('[email protected]')
assert not vc('[email protected]')
assert not vc('asdfasdfasdfasdfasdf')
if __name__ == '__main__':
unittest.main()
| mit | -1,967,974,872,636,074,800 | 29.869863 | 80 | 0.531174 | false | 3.829227 | true | false | false |
kpod13/python-mailgunlog | mailgunlog/mailgunlog.py | 1 | 9029 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import json
import argparse
import time
from datetime import datetime, date as _date
from dateutil.relativedelta import relativedelta
try:
from email.Utils import formatdate
except ImportError: # py3
from email.utils import formatdate
import requests
import os.path
def logs(domain, api_key, begin=None, end=None, max_retries=None, verbose=False):
url = 'https://api.mailgun.net/v2/%s/events' % domain
params = {
'begin': begin or formatdate(),
}
if end:
params['end'] = end
init = True
while True:
count_get_page_retry = 0
if params:
response = requests.get(url=url, auth=('api', api_key), params=params)
if not response.ok:
while count_get_page_retry < max_retries:
time.sleep(2)
response = requests.get(url=url, auth=('api', api_key), params=params)
count_get_page_retry += 1
if response.ok:
break
else:
response = requests.get(url=url, auth=('api', api_key))
if not response.ok:
while count_get_page_retry < max_retries:
time.sleep(2)
response = requests.get(url=url, auth=('api', api_key), params=params)
count_get_page_retry += 1
if response.ok:
break
if not response.ok:
raise ValueError('Invalid status_code: {0}'.format(response.status_code))
if verbose:
print('# {0} {1}'.format(response.request.method, response.request.url), file=sys.stderr)
print('# STATUS CODE: {0}'.format(response.status_code), file=sys.stderr)
data = response.json()
items = data['items']
for record in items:
yield record
if not len(items):
if init:
# first iteraction, fetch first page
url = data['paging']['first']
params=None
else:
# EOF
return
else:
url = data['paging']['next']
init = False
def strdate_to_rfc2822(value=None, midnight=False, now=False):
"""Convert date in format YYYY/MM/DD to RFC2822 format.
If value is None, return current utc time.
"""
if midnight and now:
raise ValueError('Choose one of "midnight" or "now"')
strdate = value if value else datetime.utcnow().strftime('%Y/%m/%d')
datetimetuple = list(map(int, strdate.split('/')))
if midnight:
datetimetuple += (0, 0, 0)
elif now:
now = datetime.utcnow()
datetimetuple += (now.hour, now.minute, now.second)
else:
datetimetuple += (23, 59, 59)
date = datetime(*datetimetuple)
timestamp = time.mktime(date.utctimetuple())
return date.strftime('%a, %d %b %Y %H:%M:%S -0000')
def main():
parser = argparse.ArgumentParser(description='Retrieve Mailgun event logs.')
parser.add_argument('-d', '--days', help='Days ago (N)',
dest='days',
type=int,
default=None)
parser.add_argument('-b', '--begin', help='Begin date (YYYY/MM/DD)',
dest='begin',
default=None)
parser.add_argument('-e', '--end', help='End date (YYYY/MM/DD)',
dest='end',
default=None)
parser.add_argument('-j', '--json', help='Print json (original) log records',
dest='json',
action='store_true',
default=False)
parser.add_argument('-V', '--version', help='Print version to stdout and exit',
dest='version',
action='store_true',
default=False)
parser.add_argument('domain', help='Domain registered on Mailgun (or set env var MAILGUN_DOMAIN)',
metavar='domain',
type=str,
nargs='?',
default=None)
parser.add_argument('api_key', help='Mailgun API KEY (or set env var MAILGUN_API_KEY)',
metavar='api_key',
type=str,
nargs='?',
default=None)
parser.add_argument('-r', '--retries', help="Number of retries while",
dest='max_retries',
type=int,
default=100)
parser.add_argument('--file', help="Output file",
dest='filename',
type=str,
default=None)
parser.add_argument('--logstash', help="Output logstash url",
dest='logstashurl',
type=str,
default=None)
parser.add_argument('-v', '--verbose', help='Print debug messages on stderr',
dest='verbose',
action='store_true',
default=False)
args = parser.parse_args()
if args.version:
from . import __title__, __version__
print('{0}-{1}'.format(__title__, __version__))
sys.exit(0)
jsondata = {'logs': []}
# parse date interval
if args.days:
begin_day = _date.today() - relativedelta(days=args.days)
begin = strdate_to_rfc2822(begin_day.strftime('%Y/%m/%d'), midnight=True)
end_day = _date.today() - relativedelta(days=1)
end = strdate_to_rfc2822(end_day.strftime('%Y/%m/%d'))
else:
begin = strdate_to_rfc2822(args.begin, midnight=True)
end = strdate_to_rfc2822(args.end, now=False) if args.end else None
if begin:
jsondata['begin'] = begin
if end:
jsondata['end'] = end
# parse api domain and credentials
try:
if args.domain:
domain = args.domain
else:
domain = os.environ['MAILGUN_DOMAIN']
jsondata['domain'] = domain
except KeyError:
print('Missing mailgun API key')
sys.exit(1)
try:
if args.api_key:
api_key = args.api_key
else:
api_key = os.environ['MAILGUN_API_KEY']
except KeyError:
print('Missing mailgun API key')
sys.exit(1)
if args.verbose:
print('# BEGIN DATE: {}'.format(begin), file=sys.stderr)
print('# END DATE: {}'.format(end), file=sys.stderr)
sys.stderr.flush()
if args.filename:
# Check for existing file
if os.path.exists(args.filename):
while True:
answer = raw_input("File %s exists! Proceed? You will lost data in the file!: " % file)
if answer.lower() == 'yes':
break
elif answer.lower() == 'no':
sys.exit('Interrupted by user')
# Open file for write
try:
logfile = open(args.filename, 'w')
except IOError:
print('Can not open file to write')
# Main loop
time_start = datetime.now().replace(microsecond=0)
for log in logs(domain=domain, api_key=api_key, begin=begin, end=end, max_retries=args.max_retries, verbose=args.verbose):
if args.json:
line_json = json.dumps(log, indent=3)
if args.filename:
try:
logfile.write(line_json + '\n')
except IOError:
print('Can not write log to file ' + args.filename)
logfile.close()
if args.logstashurl:
try:
requests.put(args.logstashurl, data=line_json, headers={'Content-Type': 'application/json'})
except IOError:
print('Can put log to ' + args.logstashurl)
else:
status = log.get('event', '').upper()
ok = status in ['ACCEPTED', 'DELIVERED']
line = '[%s] %s <%s>' % (datetime.utcfromtimestamp(log.get('timestamp', '')), status , log.get('recipient', ''))
if not ok:
line += ' (%s)' % (log.get('delivery-status', {}).get('description', '') or log.get('delivery-status', {}).get('message', ''))
line += ': %s' % log.get('message', {}).get('headers', {}).get('subject', '')
logfile.write(line + '\n')
if args.filename:
logfile.close()
if args.verbose:
# Getting stop time and execution time
time_stop = datetime.now().replace(microsecond=0)
time_exec = time_stop - time_start
# Summary output
summary = """
Start time: %s
End time: %s
Total execution time: %s
""" % (time_start, time_stop, time_exec)
print('\n' + '!' * 79 + '\n' + summary + '!' * 79 + '\n')
if __name__ == '__main__':
main()
| apache-2.0 | -323,765,785,715,018,600 | 33.200758 | 142 | 0.512128 | false | 4.193683 | false | false | false |
sanjioh/django-header-filter | tests/test_matcher_or.py | 1 | 1830 | from header_filter.matchers import Header
def test_or_matcher_supports_bitwise_not(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
matcher = ~(Header(h_name_1, h_value_1) | Header(h_name_2, h_value_2))
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2})
assert matcher.match(request) is False
def test_or_matcher_supports_bitwise_and(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) | Header(h_name_2, h_value_2)) & Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is True
def test_or_matcher_supports_bitwise_or(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) | Header(h_name_2, h_value_2)) | Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is True
def test_or_matcher_supports_bitwise_xor(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = (Header(h_name_1, h_value_1) | Header(h_name_2, h_value_2)) ^ Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is False
def test_repr():
assert (
repr(Header('HTTP_X_A', 'val_x') | Header('HTTP_X_B', 'val_y'))
== "(Header('HTTP_X_A', 'val_x') | Header('HTTP_X_B', 'val_y'))"
)
| mit | -366,165,832,665,979,300 | 41.55814 | 103 | 0.585246 | false | 2.264851 | false | false | false |
mja054/swift_plugin | swift/common/ring/ring.py | 5 | 6075 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
from gzip import GzipFile
from os.path import getmtime
from struct import unpack_from
from time import time
from swift.common.utils import hash_path, validate_configuration
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
def to_dict(self):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift}
class Ring(object):
"""
Partitioned consistent hashing ring.
:param pickle_gz_path: path to ring file
:param reload_time: time interval in seconds to check for a ring change
"""
def __init__(self, pickle_gz_path, reload_time=15):
# can't use the ring unless HASH_PATH_SUFFIX is set
validate_configuration()
self.pickle_gz_path = pickle_gz_path
self.reload_time = reload_time
self._reload(force=True)
def _reload(self, force=False):
self._rtime = time() + self.reload_time
if force or self.has_changed():
ring_data = pickle.load(GzipFile(self.pickle_gz_path, 'rb'))
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'])
self._mtime = getmtime(self.pickle_gz_path)
self.devs = ring_data.devs
self.zone2devs = {}
for dev in self.devs:
if not dev:
continue
if dev['zone'] in self.zone2devs:
self.zone2devs[dev['zone']].append(dev)
else:
self.zone2devs[dev['zone']] = [dev]
self._replica2part2dev_id = ring_data._replica2part2dev_id
self._part_shift = ring_data._part_shift
@property
def replica_count(self):
"""Number of replicas used in the ring."""
return len(self._replica2part2dev_id)
@property
def partition_count(self):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
def has_changed(self):
"""
Check to see if the ring on disk is different than the current one in
memory.
:returns: True if the ring on disk has changed, False otherwise
"""
return getmtime(self.pickle_gz_path) != self._mtime
def get_part_nodes(self, part):
"""
Get the nodes that are responsible for the partition.
:param part: partition to get nodes for
:returns: list of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
return [self.devs[r[part]] for r in self._replica2part2dev_id]
def get_nodes(self, account, container=None, obj=None):
"""
Get the partition and nodes for an account/container/object.
:param account: account name
:param container: container name
:param obj: object name
:returns: a tuple of (partition, list of node dicts)
Each node dict will have at least the following keys:
====== ===============================================================
id unique integer identifier amongst devices
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
"""
key = hash_path(account, container, obj, raw_digest=True)
if time() > self._rtime:
self._reload()
part = unpack_from('>I', key)[0] >> self._part_shift
return part, [self.devs[r[part]] for r in self._replica2part2dev_id]
def get_more_nodes(self, part):
"""
Generator to get extra nodes for a partition for hinted handoff.
:param part: partition to get handoff nodes for
:returns: generator of node dicts
See :func:`get_nodes` for a description of the node dicts.
"""
if time() > self._rtime:
self._reload()
zones = sorted(self.zone2devs.keys())
for part2dev_id in self._replica2part2dev_id:
zones.remove(self.devs[part2dev_id[part]]['zone'])
while zones:
zone = zones.pop(part % len(zones))
weighted_node = None
for i in xrange(len(self.zone2devs[zone])):
node = self.zone2devs[zone][(part + i) %
len(self.zone2devs[zone])]
if node.get('weight'):
weighted_node = node
break
if weighted_node:
yield weighted_node
| apache-2.0 | 4,469,315,506,916,429,300 | 36.96875 | 79 | 0.588971 | false | 4.212899 | false | false | false |
log2timeline/plaso | plaso/parsers/winreg_plugins/bam.py | 2 | 3427 | # -*- coding: utf-8 -*-
"""Windows Registry plugin to parse the Background Activity Moderator keys."""
import os
from dfdatetime import filetime as dfdatetime_filetime
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import dtfabric_helper
from plaso.lib import errors
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class BackgroundActivityModeratorEventData(events.EventData):
"""Background Activity Moderator event data.
Attributes:
binary_path (str): binary executed.
user_sid (str): user SID associated with entry.
"""
DATA_TYPE = 'windows:registry:bam'
def __init__(self):
"""Initializes event data."""
super(
BackgroundActivityModeratorEventData,
self).__init__(data_type=self.DATA_TYPE)
self.binary_path = None
self.user_sid = None
class BackgroundActivityModeratorWindowsRegistryPlugin(
interface.WindowsRegistryPlugin, dtfabric_helper.DtFabricHelper):
"""Background Activity Moderator data Windows Registry plugin."""
NAME = 'bam'
DATA_FORMAT = 'Background Activity Moderator (BAM) Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Services\\bam'
'\\UserSettings'),
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Services\\bam'
'\\State\\UserSettings')])
_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'filetime.yaml')
def _ParseValue(self, registry_value):
"""Parses the registry value.
Args:
registry_value (bytes): value data.
Returns:
int: timestamp.
Raises:
ParseError: if the value data could not be parsed.
"""
try:
timestamp = self._ReadStructureFromByteStream(
registry_value, 0, self._GetDataTypeMap('filetime'))
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse timestamp with error: {0!s}'.format(
exception))
return timestamp
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Raises:
ParseError: if the value data could not be parsed.
"""
sid_keys = registry_key.GetSubkeys()
if not sid_keys:
return
for sid_key in sid_keys:
for value in sid_key.GetValues():
if not value.name == 'Version' and not value.name == 'SequenceNumber':
timestamp = self._ParseValue(value.data)
if timestamp:
event_data = BackgroundActivityModeratorEventData()
event_data.binary_path = value.name
event_data.user_sid = sid_key.name
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(
BackgroundActivityModeratorWindowsRegistryPlugin)
| apache-2.0 | -76,917,701,552,588,500 | 30.440367 | 78 | 0.693026 | false | 4.128916 | false | false | false |
eliksir/mailmojo-python-sdk | mailmojo_sdk/api/page_api.py | 1 | 15435 | # coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailmojo_sdk.api_client import ApiClient
class PageApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_page_by_id(self, id, **kwargs): # noqa: E501
"""Retrieve a landing page. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_page_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the landing page to retrieve. (required)
:return: Page
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_page_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_page_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_page_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieve a landing page. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_page_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the landing page to retrieve. (required)
:return: Page
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_page_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_page_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/pages/{id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Page', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pages(self, **kwargs): # noqa: E501
"""Retrieve all landing pages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Page]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_pages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_pages_with_http_info(**kwargs) # noqa: E501
return data
def get_pages_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve all landing pages. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[Page]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/pages/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Page]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def track_page_view(self, id, view, **kwargs): # noqa: E501
"""Track a view of a landing page. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.track_page_view(id, view, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the page to track view of. (required)
:param TrackPageView view: (required)
:return: TrackPageView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.track_page_view_with_http_info(id, view, **kwargs) # noqa: E501
else:
(data) = self.track_page_view_with_http_info(id, view, **kwargs) # noqa: E501
return data
def track_page_view_with_http_info(self, id, view, **kwargs): # noqa: E501
"""Track a view of a landing page. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.track_page_view_with_http_info(id, view, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the page to track view of. (required)
:param TrackPageView view: (required)
:return: TrackPageView
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'view'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method track_page_view" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `track_page_view`") # noqa: E501
# verify the required parameter 'view' is set
if ('view' not in params or
params['view'] is None):
raise ValueError("Missing the required parameter `view` when calling `track_page_view`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'view' in params:
body_params = params['view']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/pages/{id}/track/view/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TrackPageView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_page(self, id, **kwargs): # noqa: E501
"""Update a landing page partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_page(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the landing page to update. (required)
:param Page page:
:return: Page
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_page_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_page_with_http_info(id, **kwargs) # noqa: E501
return data
def update_page_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a landing page partially. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_page_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: ID of the landing page to update. (required)
:param Page page:
:return: Page
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_page" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_page`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'page' in params:
body_params = params['page']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/pages/{id}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Page', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | 683,332,016,425,368,700 | 35.232394 | 114 | 0.56346 | false | 4.046932 | false | false | false |
gsnbng/erpnext | erpnext/regional/report/datev/datev.py | 1 | 12719 | # coding: utf-8
"""
Provide a report and downloadable CSV according to the German DATEV format.
- Query report showing only the columns that contain data, formatted nicely for
dispay to the user.
- CSV download functionality `download_datev_csv` that provides a CSV file with
all required columns. Used to import the data into the DATEV Software.
"""
from __future__ import unicode_literals
import datetime
import json
import zlib
import zipfile
import six
from csv import QUOTE_NONNUMERIC
from six import BytesIO
from six import string_types
import frappe
from frappe import _
import pandas as pd
from .datev_constants import DataCategory
from .datev_constants import Transactions
from .datev_constants import DebtorsCreditors
from .datev_constants import AccountNames
from .datev_constants import QUERY_REPORT_COLUMNS
def execute(filters=None):
"""Entry point for frappe."""
validate(filters)
result = get_transactions(filters, as_dict=0)
columns = QUERY_REPORT_COLUMNS
return columns, result
def validate(filters):
"""Make sure all mandatory filters and settings are present."""
if not filters.get('company'):
frappe.throw(_('<b>Company</b> is a mandatory filter.'))
if not filters.get('from_date'):
frappe.throw(_('<b>From Date</b> is a mandatory filter.'))
if not filters.get('to_date'):
frappe.throw(_('<b>To Date</b> is a mandatory filter.'))
try:
frappe.get_doc('DATEV Settings', filters.get('company'))
except frappe.DoesNotExistError:
frappe.throw(_('Please create <b>DATEV Settings</b> for Company <b>{}</b>.').format(filters.get('company')))
def get_transactions(filters, as_dict=1):
"""
Get a list of accounting entries.
Select GL Entries joined with Account and Party Account in order to get the
account numbers. Returns a list of accounting entries.
Arguments:
filters -- dict of filters to be passed to the sql query
as_dict -- return as list of dicts [0,1]
"""
filter_by_voucher = 'AND gl.voucher_type = %(voucher_type)s' if filters.get('voucher_type') else ''
gl_entries = frappe.db.sql("""
SELECT
/* either debit or credit amount; always positive */
case gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',
/* 'H' when credit, 'S' when debit */
case gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',
/* account number or, if empty, party account number */
coalesce(acc.account_number, acc_pa.account_number) as 'Konto',
/* against number or, if empty, party against number */
coalesce(acc_against.account_number, acc_against_pa.account_number) as 'Gegenkonto (ohne BU-Schlüssel)',
gl.posting_date as 'Belegdatum',
gl.voucher_no as 'Belegfeld 1',
LEFT(gl.remarks, 60) as 'Buchungstext',
gl.voucher_type as 'Beleginfo - Art 1',
gl.voucher_no as 'Beleginfo - Inhalt 1',
gl.against_voucher_type as 'Beleginfo - Art 2',
gl.against_voucher as 'Beleginfo - Inhalt 2'
FROM `tabGL Entry` gl
/* Statistisches Konto (Debitoren/Kreditoren) */
left join `tabParty Account` pa
on gl.against = pa.parent
and gl.company = pa.company
/* Kontonummer */
left join `tabAccount` acc
on gl.account = acc.name
/* Gegenkonto-Nummer */
left join `tabAccount` acc_against
on gl.against = acc_against.name
/* Statistische Kontonummer */
left join `tabAccount` acc_pa
on pa.account = acc_pa.name
/* Statistische Gegenkonto-Nummer */
left join `tabAccount` acc_against_pa
on pa.account = acc_against_pa.name
WHERE gl.company = %(company)s
AND DATE(gl.posting_date) >= %(from_date)s
AND DATE(gl.posting_date) <= %(to_date)s
{}
ORDER BY 'Belegdatum', gl.voucher_no""".format(filter_by_voucher), filters, as_dict=as_dict)
return gl_entries
def get_customers(filters):
"""
Get a list of Customers.
Arguments:
filters -- dict of filters to be passed to the sql query
"""
return frappe.db.sql("""
SELECT
acc.account_number as 'Konto',
cus.customer_name as 'Name (Adressatentyp Unternehmen)',
case cus.customer_type when 'Individual' then 1 when 'Company' then 2 else 0 end as 'Adressatentyp',
adr.address_line1 as 'Straße',
adr.pincode as 'Postleitzahl',
adr.city as 'Ort',
UPPER(country.code) as 'Land',
adr.address_line2 as 'Adresszusatz',
con.email_id as 'E-Mail',
coalesce(con.mobile_no, con.phone) as 'Telefon',
cus.website as 'Internet',
cus.tax_id as 'Steuernummer',
ccl.credit_limit as 'Kreditlimit (Debitor)'
FROM `tabParty Account` par
left join `tabAccount` acc
on acc.name = par.account
left join `tabCustomer` cus
on cus.name = par.parent
left join `tabAddress` adr
on adr.name = cus.customer_primary_address
left join `tabCountry` country
on country.name = adr.country
left join `tabContact` con
on con.name = cus.customer_primary_contact
left join `tabCustomer Credit Limit` ccl
on ccl.parent = cus.name
and ccl.company = par.company
WHERE par.company = %(company)s
AND par.parenttype = 'Customer'""", filters, as_dict=1)
def get_suppliers(filters):
"""
Get a list of Suppliers.
Arguments:
filters -- dict of filters to be passed to the sql query
"""
return frappe.db.sql("""
SELECT
acc.account_number as 'Konto',
sup.supplier_name as 'Name (Adressatentyp Unternehmen)',
case sup.supplier_type when 'Individual' then '1' when 'Company' then '2' else '0' end as 'Adressatentyp',
adr.address_line1 as 'Straße',
adr.pincode as 'Postleitzahl',
adr.city as 'Ort',
UPPER(country.code) as 'Land',
adr.address_line2 as 'Adresszusatz',
con.email_id as 'E-Mail',
coalesce(con.mobile_no, con.phone) as 'Telefon',
sup.website as 'Internet',
sup.tax_id as 'Steuernummer',
case sup.on_hold when 1 then sup.release_date else null end as 'Zahlungssperre bis'
FROM `tabParty Account` par
left join `tabAccount` acc
on acc.name = par.account
left join `tabSupplier` sup
on sup.name = par.parent
left join `tabDynamic Link` dyn_adr
on dyn_adr.link_name = sup.name
and dyn_adr.link_doctype = 'Supplier'
and dyn_adr.parenttype = 'Address'
left join `tabAddress` adr
on adr.name = dyn_adr.parent
and adr.is_primary_address = '1'
left join `tabCountry` country
on country.name = adr.country
left join `tabDynamic Link` dyn_con
on dyn_con.link_name = sup.name
and dyn_con.link_doctype = 'Supplier'
and dyn_con.parenttype = 'Contact'
left join `tabContact` con
on con.name = dyn_con.parent
and con.is_primary_contact = '1'
WHERE par.company = %(company)s
AND par.parenttype = 'Supplier'""", filters, as_dict=1)
def get_account_names(filters):
return frappe.get_list("Account",
fields=["account_number as Konto", "name as Kontenbeschriftung"],
filters={"company": filters.get("company"), "is_group": "0"})
def get_datev_csv(data, filters, csv_class):
"""
Fill in missing columns and return a CSV in DATEV Format.
For automatic processing, DATEV requires the first line of the CSV file to
hold meta data such as the length of account numbers oder the category of
the data.
Arguments:
data -- array of dictionaries
filters -- dict
csv_class -- defines DATA_CATEGORY, FORMAT_NAME and COLUMNS
"""
empty_df = pd.DataFrame(columns=csv_class.COLUMNS)
data_df = pd.DataFrame.from_records(data)
result = empty_df.append(data_df, sort=True)
if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS:
result['Belegdatum'] = pd.to_datetime(result['Belegdatum'])
if csv_class.DATA_CATEGORY == DataCategory.ACCOUNT_NAMES:
result['Sprach-ID'] = 'de-DE'
data = result.to_csv(
# Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035
sep=str(';'),
# European decimal seperator
decimal=',',
# Windows "ANSI" encoding
encoding='latin_1',
# format date as DDMM
date_format='%d%m',
# Windows line terminator
line_terminator='\r\n',
# Do not number rows
index=False,
# Use all columns defined above
columns=csv_class.COLUMNS,
# Quote most fields, even currency values with "," separator
quoting=QUOTE_NONNUMERIC
)
if not six.PY2:
data = data.encode('latin_1')
header = get_header(filters, csv_class)
header = ';'.join(header).encode('latin_1')
# 1st Row: Header with meta data
# 2nd Row: Data heading (Überschrift der Nutzdaten), included in `data` here.
# 3rd - nth Row: Data (Nutzdaten)
return header + b'\r\n' + data
def get_header(filters, csv_class):
coa = frappe.get_value("Company", filters.get("company"), "chart_of_accounts")
description = filters.get("voucher_type", csv_class.FORMAT_NAME)
coa_used = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "")
header = [
# DATEV format
# "DTVF" = created by DATEV software,
# "EXTF" = created by other software
'"EXTF"',
# version of the DATEV format
# 141 = 1.41,
# 510 = 5.10,
# 720 = 7.20
'700',
csv_class.DATA_CATEGORY,
'"%s"' % csv_class.FORMAT_NAME,
# Format version (regarding format name)
csv_class.FORMAT_VERSION,
# Generated on
datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '000',
# Imported on -- stays empty
'',
# Origin. Any two symbols, will be replaced by "SV" on import.
'"EN"',
# I = Exported by
'"%s"' % frappe.session.user,
# J = Imported by -- stays empty
'',
# K = Tax consultant number (Beraternummer)
frappe.get_value("DATEV Settings", filters.get("company"), "consultant_number"),
# L = Tax client number (Mandantennummer)
frappe.get_value("DATEV Settings", filters.get("company"), "client_number"),
# M = Start of the fiscal year (Wirtschaftsjahresbeginn)
frappe.utils.formatdate(frappe.defaults.get_user_default("year_start_date"), "yyyyMMdd"),
# N = Length of account numbers (Sachkontenlänge)
'4',
# O = Transaction batch start date (YYYYMMDD)
frappe.utils.formatdate(filters.get('from_date'), "yyyyMMdd"),
# P = Transaction batch end date (YYYYMMDD)
frappe.utils.formatdate(filters.get('to_date'), "yyyyMMdd"),
# Q = Description (for example, "Sales Invoice") Max. 30 chars
'"{}"'.format(_(description)),
# R = Diktatkürzel
'',
# S = Buchungstyp
# 1 = Transaction batch (Finanzbuchführung),
# 2 = Annual financial statement (Jahresabschluss)
'1' if csv_class.DATA_CATEGORY == DataCategory.TRANSACTIONS else '',
# T = Rechnungslegungszweck
# 0 oder leer = vom Rechnungslegungszweck unabhängig
# 50 = Handelsrecht
# 30 = Steuerrecht
# 64 = IFRS
# 40 = Kalkulatorik
# 11 = Reserviert
# 12 = Reserviert
'0',
# U = Festschreibung
# TODO: Filter by Accounting Period. In export for closed Accounting Period, this will be "1"
'0',
# V = Default currency, for example, "EUR"
'"%s"' % frappe.get_value("Company", filters.get("company"), "default_currency"),
# reserviert
'',
# Derivatskennzeichen
'',
# reserviert
'',
# reserviert
'',
# SKR
'"%s"' % coa_used,
# Branchen-Lösungs-ID
'',
# reserviert
'',
# reserviert
'',
# Anwendungsinformation (Verarbeitungskennzeichen der abgebenden Anwendung)
''
]
return header
@frappe.whitelist()
def download_datev_csv(filters=None):
"""
Provide accounting entries for download in DATEV format.
Validate the filters, get the data, produce the CSV file and provide it for
download. Can be called like this:
GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv
Arguments / Params:
filters -- dict of filters to be passed to the sql query
"""
if isinstance(filters, string_types):
filters = json.loads(filters)
validate(filters)
# This is where my zip will be written
zip_buffer = BytesIO()
# This is my zip file
datev_zip = zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED)
transactions = get_transactions(filters)
transactions_csv = get_datev_csv(transactions, filters, csv_class=Transactions)
datev_zip.writestr('EXTF_Buchungsstapel.csv', transactions_csv)
account_names = get_account_names(filters)
account_names_csv = get_datev_csv(account_names, filters, csv_class=AccountNames)
datev_zip.writestr('EXTF_Kontenbeschriftungen.csv', account_names_csv)
customers = get_customers(filters)
customers_csv = get_datev_csv(customers, filters, csv_class=DebtorsCreditors)
datev_zip.writestr('EXTF_Kunden.csv', customers_csv)
suppliers = get_suppliers(filters)
suppliers_csv = get_datev_csv(suppliers, filters, csv_class=DebtorsCreditors)
datev_zip.writestr('EXTF_Lieferanten.csv', suppliers_csv)
# You must call close() before exiting your program or essential records will not be written.
datev_zip.close()
frappe.response['filecontent'] = zip_buffer.getvalue()
frappe.response['filename'] = 'DATEV.zip'
frappe.response['type'] = 'binary'
| agpl-3.0 | 2,071,427,166,371,283,500 | 29.406699 | 110 | 0.699213 | false | 2.940088 | false | false | false |
benjamincongdon/adept | mapGenerator.py | 1 | 5638 | import sys
import random
from noise import snoise3
from PIL import Image
from biome import Biome
"""
Class that generates tile data for the world map with a given seed.
The generation algorithm uses Perlin noise to generate maps for both altitude and moisture.
(This requires the 'noise' library)
Based on the generated noise, biomes are determined.
The algorithm is (and must be) deterministic for any discrete seed, as the whole
game world will not be generated in a single call of the GenerateMap() function.
Rather, chunks may be requested from the generator using the map's seed, and the
location / size of the requested chunks.
"""
class MapGenerator:
chunkSizeX = 32
chunkSizeY = 32
@staticmethod
def GenerateMap(seed, startx, starty, sizex, sizey):
#Constants needed for the perlin noise algorithm
octaves = 8;
freq = 64.0 * octaves
"""
NOTE: Changing the value of freq essentially changes the scale / level
of detail produced in the noise maps.
"""
#Generate 2d Lists for height and moisture data
heightMap = [[None]*sizex for g in range(sizey)]
moistureMap = [[None]*sizex for h in range(sizey)]
for outputy, y in enumerate(range(starty, sizey + starty)):
for outputx, x in enumerate(range(startx, sizex + startx)):
#Generate Perlin noise for the given x,y using the map seed as the z value
#Map the noise to between 0 and 255
heightMap[outputx][outputy] = int(snoise3(x / freq, y / freq, seed, octaves) * 127.0 + 128.0)
#Change the z value so that moisture is determined by a different (but predictable) seed
moistureMap[outputx][outputy] = int(snoise3(x / freq, y / freq, seed*10, octaves) * 127.0 + 128.0)
biomeMap = MapGenerator.AssignBiomes(heightMap,moistureMap,sizex,sizey)
return biomeMap
@staticmethod
def AssignBiomes(altitude,moisture,sizex,sizey):
biomeMap = [[None]*sizex for g in range(sizey)]
for y in range(0,sizex):
for x in range(0,sizey):
#ocean
if(altitude[x][y] <= Biome.ocean_height):
biomeMap[y][x] = Biome.ocean
#shore
elif(altitude[x][y] <= Biome.shore_height):
biomeMap[y][x] = Biome.shore
#Mountain Peak
elif(altitude[x][y] >= Biome.peak_height):
biomeMap[y][x] = Biome.peak
#Mountain
elif(altitude[x][y] >= Biome.mountain_height):
biomeMap[y][x] = Biome.mountain
#tundra
elif(moisture[x][y] >= Biome.tundra_moisture):
biomeMap[y][x] = Biome.tundra
#tropical
elif(moisture[x][y] >= Biome.tropical_moisture):
biomeMap[y][x] = Biome.tropical
#Forest
elif(moisture[x][y] >= Biome.forest_moisture):
biomeMap[y][x] = Biome.forest
#Grassland
elif(moisture[x][y] >= Biome.grassland_moisture):
biomeMap[y][x] = Biome.grassland
#desert
elif(moisture[x][y] >= Biome.desert_moisture):
biomeMap[y][x] = Biome.desert
return biomeMap
@staticmethod
def SmoothMoistureMap(moisture):
"""
TODO
"""
pass
@staticmethod
def GenerateChunk(seed,chunkx, chunky):
worldx = chunkx * MapGenerator.chunkSizeX
worldy = chunky * MapGenerator.chunkSizeY
return MapGenerator.GenerateMap(seed, worldx, worldy, MapGenerator.chunkSizeX, MapGenerator.chunkSizeX)
@staticmethod
def DrawMap(biomeMap):
#initializes new image
img = Image.new("RGB", (len(biomeMap),len(biomeMap[0])), "blue")
pixels = img.load()
#Iterate through all pixels
for y in range(len(biomeMap)):
for x in range(len(biomeMap[0])):
#Mountain Peak
if(biomeMap[x][y] == Biome.peak):
pixels[x,y] = Biome.peak_color
#Mountain
elif(biomeMap[x][y] == Biome.mountain):
pixels[x,y] = Biome.mountain_color
#Forest
elif(biomeMap[x][y] == Biome.forest):
pixels[x,y] = Biome.forest_color
#Grassland
elif(biomeMap[x][y] == Biome.grassland):
pixels[x,y] = Biome.grassland_color
#desert
elif(biomeMap[x][y] == Biome.desert):
pixels[x,y] = Biome.desert_color
#ocean
elif(biomeMap[x][y] == Biome.ocean):
pixels[x,y] = Biome.ocean_color
#shore
elif(biomeMap[x][y] == Biome.shore):
pixels[x,y] = Biome.shore_color
#tropical
elif(biomeMap[x][y] == Biome.tropical):
pixels[x,y] = Biome.tropical_color
#tundra
elif(biomeMap[x][y] == Biome.tundra):
pixels[x,y] = Biome.tundra_color
else:
pixels[x,y] = 0x000000
#Biome not assigned
if x % 32 == 0 or y % 32 == 0:
pixels[x,y] = 0xeeeeee
if x == 0 and y == 0:
pixels[x,y] = 0xff0000
img.show()
#MapGenerator.DrawMap(MapGenerator.GenerateMap(random.random(),0,0,512,512))
| mit | -4,249,066,386,086,056,000 | 39.271429 | 114 | 0.545406 | false | 3.646831 | false | false | false |
tcuthbert/tcuthbert | tombomation/blog/migrations/0003_auto_20150212_0746.py | 1 | 1304 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_category'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories', 'ordering': ['title'], 'verbose_name': 'Category'},
),
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(verbose_name='Slug', default=datetime.datetime(2015, 2, 12, 7, 46, 25, 982321, tzinfo=utc), help_text='Uri identifier.', max_length=255, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='post',
name='categories',
field=models.ManyToManyField(null=True, verbose_name='Categories', help_text=' ', to='blog.Category', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='post',
name='date_publish',
field=models.DateTimeField(auto_now=True, help_text=' ', verbose_name='Publish Date'),
preserve_default=True,
),
]
| mit | -4,135,064,516,705,127,400 | 33.315789 | 185 | 0.590491 | false | 4.30363 | false | false | false |
leandrotoledo/python-telegram-bot | setup.py | 2 | 4496 | #!/usr/bin/env python
"""The setup and build script for the python-telegram-bot library."""
import os
import subprocess
import sys
from setuptools import setup, find_packages
UPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'
def get_requirements(raw=False):
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as reqs:
for install in reqs:
if install.startswith('# only telegram.ext:'):
if raw:
break
continue
requirements_list.append(install.strip())
return requirements_list
def get_packages_requirements(raw=False):
"""Build the package & requirements list for this project"""
reqs = get_requirements(raw=raw)
exclude = ['tests*']
if raw:
exclude.append('telegram.ext*')
packs = find_packages(exclude=exclude)
# Allow for a package install to not use the vendored urllib3
if UPSTREAM_URLLIB3_FLAG in sys.argv:
sys.argv.remove(UPSTREAM_URLLIB3_FLAG)
reqs.append('urllib3 >= 1.19.1')
packs = [x for x in packs if not x.startswith('telegram.vendor.ptb_urllib3')]
return packs, reqs
def get_setup_kwargs(raw=False):
"""Builds a dictionary of kwargs for the setup function"""
packages, requirements = get_packages_requirements(raw=raw)
raw_ext = "-raw" if raw else ""
readme = f'README{"_RAW" if raw else ""}.rst'
fn = os.path.join('telegram', 'version.py')
with open(fn) as fh:
for line in fh.readlines():
if line.startswith('__version__'):
exec(line)
with open(readme, 'r', encoding='utf-8') as fd:
kwargs = dict(
script_name=f'setup{raw_ext}.py',
name=f'python-telegram-bot{raw_ext}',
version=locals()['__version__'],
author='Leandro Toledo',
author_email='[email protected]',
license='LGPLv3',
url='https://python-telegram-bot.org/',
# Keywords supported by PyPI can be found at https://git.io/JtLIZ
project_urls={
"Documentation": "https://python-telegram-bot.readthedocs.io",
"Bug Tracker": "https://github.com/python-telegram-bot/python-telegram-bot/issues",
"Source Code": "https://github.com/python-telegram-bot/python-telegram-bot",
"News": "https://t.me/pythontelegrambotchannel",
"Changelog": "https://python-telegram-bot.readthedocs.io/en/stable/changelog.html",
},
download_url=f'https://pypi.org/project/python-telegram-bot{raw_ext}/',
keywords='python telegram bot api wrapper',
description="We have made you a wrapper you can't refuse",
long_description=fd.read(),
long_description_content_type='text/x-rst',
packages=packages,
install_requires=requirements,
extras_require={
'json': 'ujson',
'socks': 'PySocks',
# 3.4-3.4.3 contained some cyclical import bugs
'passport': 'cryptography!=3.4,!=3.4.1,!=3.4.2,!=3.4.3',
},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires='>=3.6'
)
return kwargs
def main():
# If we're building, build ptb-raw as well
if set(sys.argv[1:]) in [{'bdist_wheel'}, {'sdist'}, {'sdist', 'bdist_wheel'}]:
args = ['python', 'setup-raw.py']
args.extend(sys.argv[1:])
subprocess.run(args, check=True, capture_output=True)
setup(**get_setup_kwargs(raw=False))
if __name__ == '__main__':
main()
| lgpl-3.0 | 58,550,580,717,179,704 | 35.552846 | 99 | 0.571397 | false | 4.010705 | false | false | false |
realms-team/solmanager | libs/smartmeshsdk-REL-1.3.0.1/app/SyncTemp/logAnalysis.py | 2 | 9586 | import re
import time
import traceback
EVENT_TEMPERATURE = 'temperature'
EVENT_STARTAPP = 'start_app'
EVENT_STOPAPP = 'stop_app'
EVENT_CONNECTED = 'connected'
EVENT_DISCONNECTED = 'disconnected'
EVENT_ALL = [
EVENT_TEMPERATURE,
EVENT_STARTAPP,
EVENT_STOPAPP,
EVENT_CONNECTED,
EVENT_DISCONNECTED
]
class Printer(object):
_instance = None
_init = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Printer, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
# don't re-initialize an instance (needed because singleton)
if self._init:
return
self._init = True
self.f = open('statistics.txt','w')
def printline(self,line):
print line
self.f.write(line)
def __def__(self):
self.f.close()
#============================ statistics ======================================
class Stats(object):
def feedstat(self,stat):
raise NotImplementedError()
def formatheader(self):
output = []
output += ['']
output += ['']
output += ['='*79]
output += [self.description]
output += ['='*79]
output += ['']
output = '\n'.join(output)
return output
class NumDataPoints(Stats):
description = 'Number of Data Points Received'
def __init__(self):
self.numDataPoints = {}
def feedstat(self,statline):
if statline['eventType']!=EVENT_TEMPERATURE:
return
# count number of packets
if statline['mac'] not in self.numDataPoints:
self.numDataPoints[statline['mac']] = 0
self.numDataPoints[statline['mac']] += 1
def formatstat(self):
# identify missing
maxMumDataPoints = max([v for (k,v) in self.numDataPoints.items()])
# format output
output = []
output += [self.formatheader()]
output += ['({0} motes)'.format(len(self.numDataPoints))]
for (mac,num) in self.numDataPoints.items():
if num<maxMumDataPoints:
remark = ' ({0} missing)'.format(maxMumDataPoints-num)
else:
remark = ''
output += [' - {0} : {1}{2}'.format(mac,num,remark)]
output = '\n'.join(output)
return output
class MaxTimeGap(Stats):
description = 'Maximum Time Gap Between Consecutive Data Points'
def __init__(self):
self.maxgap = {}
def feedstat(self,statline):
if statline['eventType']!=EVENT_TEMPERATURE:
return
mac = statline['mac']
timestamp = statline['timestamp']
# count number of packets
if mac not in self.maxgap:
self.maxgap[mac] = {
'lasttimestamp' : None,
'maxgap': None,
}
if self.maxgap[mac]['lasttimestamp']!=None:
thisgap = timestamp-self.maxgap[mac]['lasttimestamp']
if self.maxgap[mac]['maxgap']==None or thisgap>self.maxgap[mac]['maxgap']:
self.maxgap[mac]['maxgap'] = thisgap
self.maxgap[mac]['lasttimestamp'] = timestamp
def formatstat(self):
output = []
output += [self.formatheader()]
output += ['({0} motes)'.format(len(self.maxgap))]
for (mac,v) in self.maxgap.items():
if v['maxgap']:
output += [' - {0} : {1}s'.format(mac,int(v['maxgap']))]
else:
output += [' - {0} : {1}'.format(mac, v['maxgap'] )]
output = '\n'.join(output)
return output
class BurstSpread(Stats):
description = 'Maximum time spread among measurements (for each burst)'
MAX_BURST_SPREAD = 5.0
def __init__(self):
self.bursts = {}
def calculateBurstId(self,timestamp):
return int(self.MAX_BURST_SPREAD * round(float(timestamp)/self.MAX_BURST_SPREAD))
def feedstat(self,statline):
if statline['eventType']!=EVENT_TEMPERATURE:
return
timestamp = statline['timestamp']
burstId = self.calculateBurstId(timestamp)
# count number of packets
if burstId not in self.bursts:
self.bursts[burstId] = {
'numpoints': 0,
'mintimestamp' : None,
'maxtimestamp': None,
}
self.bursts[burstId]['numpoints'] += 1
if self.bursts[burstId]['mintimestamp']==None or timestamp<self.bursts[burstId]['mintimestamp']:
self.bursts[burstId]['mintimestamp'] = timestamp
if self.bursts[burstId]['maxtimestamp']==None or timestamp>self.bursts[burstId]['maxtimestamp']:
self.bursts[burstId]['maxtimestamp'] = timestamp
def formatstat(self):
# calculate spread
for (k,v) in self.bursts.items():
if v['mintimestamp']!=None and v['maxtimestamp']!=None:
v['spread'] = v['maxtimestamp']-v['mintimestamp']
else:
v['spread'] = None
output = []
output += [self.formatheader()]
output += [
'({0} bursts separated by {1:.03f}s or more)'.format(
len(self.bursts),
self.MAX_BURST_SPREAD
)
]
allts = sorted(self.bursts.keys())
for ts in allts:
b = self.bursts[ts]
tsString = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(ts))
if b['spread']!=None:
spreadString = '{0:>5}ms'.format(int(1000*b['spread']))
else:
spreadString = 'not enough data'
output += [' - around {0} : {1} ({2} points)'.format(tsString,spreadString,b['numpoints'])]
output = '\n'.join(output)
return output
#============================ main ============================================
def main():
try:
print 'logAnalysis - Dust Networks (c) 2014'
# initialize stats
stats = [s() for s in Stats.__subclasses__()]
# parse file and fill in statistics
with open('temperaturelog.csv','r') as f:
for line in f:
m = re.search('([0-9\- :]*).([0-9]{3}),([a-zA_Z _]*),([0-9a-f\-]*),([0-9.]*)',line)
if not m:
print 'WARNING: following line not parsed: {0}'.format(line)
assert(0)
continue
# rawline
rawline = {}
rawline['timestamp'] = m.group(1)
rawline['timestampMs'] = m.group(2)
rawline['eventType'] = m.group(3)
rawline['mac'] = m.group(4)
rawline['temperature'] = m.group(5)
# print
output = []
output += ['']
output += ['====================']
output += ['']
output += ['{0:>20} : "{1}"'.format("line",line.strip())]
output += ['']
for (k,v) in rawline.items():
output += ['{0:>20} : {1}'.format(k,v)]
output = '\n'.join(output)
#Printer().printline(output)
# statline
statline = {}
statline['timestamp'] = time.mktime(time.strptime(rawline['timestamp'],"%Y-%m-%d %H:%M:%S"))
statline['timestamp'] += int(rawline['timestampMs'])/1000.0
statline['eventType'] = rawline['eventType']
assert rawline['eventType'] in EVENT_ALL
if rawline['mac']:
statline['mac'] = rawline['mac']
else:
statline['mac'] = None
if rawline['temperature']:
statline['temperature'] = float(rawline['temperature'])
else:
statline['temperature'] = None
# print
output = []
output += ['']
output += ['{0:>20} : {1:.03f}'.format("timestamp",statline['timestamp'])]
output += ['{0:>20} : {1}'.format("eventType",statline['eventType'])]
output += ['{0:>20} : {1}'.format("mac",rawline['mac'])]
if statline['temperature']:
output += ['{0:>20} : {1:.02f}'.format("temperature",statline['temperature'])]
else:
output += ['{0:>20} : {1}'.format("temperature",statline['temperature'])]
output = '\n'.join(output)
#Printer().printline(output)
# feed stat
for stat in stats:
stat.feedstat(statline)
# print statistics
for stat in stats:
Printer().printline(stat.formatstat())
except Exception as err:
print "FATAL: ({0}) {1}".format(type(err),err)
print traceback.print_exc()
else:
print "\n\nScript ended normally"
raw_input("\nPress enter to close.")
if __name__=="__main__":
main()
| bsd-3-clause | 6,065,518,607,416,828,000 | 33.235714 | 116 | 0.471938 | false | 4.112398 | false | false | false |
RyanWilsonDev/DomoPy | setup.py | 1 | 1771 | import os
from setuptools import setup
# Manage version in __init__.py
def get_version(version_tuple):
"""version from tuple accounting for possible a,b,rc tags."""
# in case an a, b, or rc tag is added
if not isinstance(version_tuple[-1], int):
return '.'.join(
map(str, version_tuple[:-1])
) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
# path to __init__ for package
INIT = os.path.join(
os.path.dirname(__file__), 'domopy',
'__init__.py'
)
VERSION_LINE = list(
filter(lambda line: line.startswith('VERSION'), open(INIT))
)[0]
# lotta effort but package might not be importable before
# install is finished so can't just import VERSION
VERSION = get_version(eval(VERSION_LINE.split('=')[-1]))
setup(
name='domopy',
version=VERSION,
author='Ryan Wilson',
license='MIT',
url='https://github.com/RyanWilsonDev/DomoPy',
description="methods for interacting with Domo APIs",
long_description="""
Set of classes and methods for interacting with
the Domo Data APIs and Domo User APIs. Handles
Authentication, pulling data from domo, creating
new domo datasets, replace/appending existing
datasets, etc.
""",
packages=[
'domopy'
],
package_data={'': ['LICENSE'], 'LICENSES': ['NOTICE', 'PANDAS_LICENSE', 'REQUESTS_LICENSE']},
include_package_data=True,
install_requires=[
'pandas',
'requests',
'requests_oauthlib'
],
classifiers=(
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython'
),
tests_require=[]
)
| mit | 5,546,842,748,305,411,000 | 26.671875 | 97 | 0.621683 | false | 3.833333 | false | false | false |
senarvi/theanolm | theanolm/training/nesterovoptimizer.py | 1 | 2905 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the Nesterov momentum optimizer.
"""
import numpy
from theanolm.backend import Parameters
from theanolm.training.basicoptimizer import BasicOptimizer
class NesterovOptimizer(BasicOptimizer):
"""Nesterov Momentum Optimization Method
Normally Nesterov momentum is implemented by first taking a step towards
the previous update direction, calculating gradient at that position,
using the gradient to obtain the new update direction, and finally
updating the parameters. We use an alternative formulation that requires
the gradient to be computed only at the current parameter values,
described here:
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617
v_{t} = mu * v_{t-1} - lr * gradient(params_{t-1})
params_{t} = params_{t-1} + mu * v_{t} - lr * gradient(params_{t-1})
"""
def __init__(self, optimization_options, network, *args, **kwargs):
"""Creates a Nesterov momentum optimizer. Nesterov momentum optimizer
does not use additional parameters.
:type optimization_options: dict
:param optimization_options: a dictionary of optimization options
:type network: Network
:param network: the neural network object
"""
self._params = Parameters()
for path, param in network.get_variables().items():
self._params.add(path + '_velocity',
numpy.zeros_like(param.get_value()))
# momentum
if 'momentum' not in optimization_options:
raise ValueError("Momentum is not given in optimization options.")
self._momentum = optimization_options['momentum']
super().__init__(optimization_options, network, *args, **kwargs)
def _get_param_updates(self, alpha):
"""Returns Theano expressions for updating the model parameters and any
additional parameters required by the optimizer.
:type alpha: Variable
:param alpha: a scale to be applied to the model parameter updates
:rtype: iterable over pairs (shared variable, new expression)
:returns: expressions how to update the optimizer parameters
"""
result = []
deltas = dict()
for path, gradient in zip(self.network.get_variables(),
self._gradients):
deltas[path] = -gradient
self._normalize(deltas)
for path, param_old in self.network.get_variables().items():
delta = deltas[path]
velocity_old = self._params[path + '_velocity']
velocity = self._momentum * velocity_old + alpha * delta
param = param_old + self._momentum * velocity + alpha * delta
result.append((velocity_old, velocity))
result.append((param_old, param))
return result
| apache-2.0 | 8,641,707,506,987,998,000 | 38.256757 | 79 | 0.64716 | false | 4.483025 | false | false | false |
mushkevych/scheduler | synergy/db/model/unit_of_work.py | 1 | 4120 | __author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, IntegerField, DictField, DateTimeField
TYPE_MANAGED = 'type_managed' # identifies UOW created by Abstract State Machine child for Managed Process
TYPE_FREERUN = 'type_freerun' # identifies UOW created by FreerunStateMachine for ad-hock processing
# UOW was successfully processed by the worker
STATE_PROCESSED = 'state_processed'
# UOW was received by the worker and it started the processing
STATE_IN_PROGRESS = 'state_in_progress'
# UOW was instantiated and send to the worker
STATE_REQUESTED = 'state_requested'
# Job has been manually marked as SKIPPED via MX
# and so the associated UOW got cancelled
# or the life-support threshold has been crossed for failing UOW
STATE_CANCELED = 'state_canceled'
# UOW can get into STATE_INVALID if:
# a. related Job was marked for reprocessing via MX
# b. have failed with an exception at the worker level
# NOTICE: GarbageCollector changes STATE_INVALID -> STATE_REQUESTED during re-posting
STATE_INVALID = 'state_invalid'
# UOW was received by a worker,
# but no data was found to process
STATE_NOOP = 'state_noop'
class UnitOfWork(BaseDocument):
""" Module represents persistent Model for atomic unit of work performed by the system.
UnitOfWork Instances are stored in the <unit_of_work> collection """
db_id = ObjectIdField(name='_id', null=True)
process_name = StringField()
timeperiod = StringField(null=True)
start_timeperiod = StringField(null=True) # [synergy date] lower boundary of the period that needs to be processed
end_timeperiod = StringField(null=True) # [synergy date] upper boundary of the period that needs to be processed
start_id = ObjectIdField(name='start_obj_id') # [DB _id] lower boundary of the period that needs to be processed
end_id = ObjectIdField(name='end_obj_id') # [DB _id] upper boundary of the period that needs to be processed
source = StringField(null=True) # defines source of data for the computation
sink = StringField(null=True) # defines sink where the aggregated data will be saved
arguments = DictField() # task-level arguments that could supplement or override process-level ones
state = StringField(choices=[STATE_INVALID, STATE_REQUESTED, STATE_IN_PROGRESS,
STATE_PROCESSED, STATE_CANCELED, STATE_NOOP])
created_at = DateTimeField()
submitted_at = DateTimeField()
started_at = DateTimeField()
finished_at = DateTimeField()
number_of_aggregated_documents = IntegerField()
number_of_processed_documents = IntegerField()
number_of_retries = IntegerField(default=0)
unit_of_work_type = StringField(choices=[TYPE_MANAGED, TYPE_FREERUN])
@classmethod
def key_fields(cls):
return (cls.process_name.name,
cls.timeperiod.name,
cls.start_id.name,
cls.end_id.name)
@property
def is_active(self):
return self.state in [STATE_REQUESTED, STATE_IN_PROGRESS, STATE_INVALID]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_CANCELED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_canceled(self):
return self.state == STATE_CANCELED
@property
def is_invalid(self):
return self.state == STATE_INVALID
@property
def is_requested(self):
return self.state == STATE_REQUESTED
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
PROCESS_NAME = UnitOfWork.process_name.name
TIMEPERIOD = UnitOfWork.timeperiod.name
START_TIMEPERIOD = UnitOfWork.start_timeperiod.name
END_TIMEPERIOD = UnitOfWork.end_timeperiod.name
START_ID = UnitOfWork.start_id.name
END_ID = UnitOfWork.end_id.name
STATE = UnitOfWork.state.name
UNIT_OF_WORK_TYPE = UnitOfWork.unit_of_work_type.name
| bsd-3-clause | -3,814,641,550,784,682,000 | 37.148148 | 120 | 0.706553 | false | 3.769442 | false | false | false |
smart-solution/icy | icy_installation_request/icy_installation_request.py | 2 | 10755 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
##############################################################################
from openerp.osv import osv, fields
import datetime
from datetime import timedelta
from datetime import date
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'installer': fields.boolean('Installateur'),
}
res_partner()
class crm_software_version(osv.osv):
_name = 'crm.software.version'
_columns = {
'case_section_id': fields.many2one('crm.case.section', 'Type thermostaat', required=True, select=True),
'name': fields.char('Software versie', required=True),
}
crm_software_version()
class crm_case_section(osv.osv):
_inherit = 'crm.case.section'
_columns = {
'software_ids': fields.one2many('crm.software.version', 'case_section_id', 'Software versies'),
}
crm_case_section()
class crm_installation_request(osv.osv):
_name = 'crm.installation.request'
_inherit = ['mail.thread']
_columns = {
'partner_id': fields.many2one('res.partner', 'Relatie', required=True, select=True),
'cust_zip': fields.char('Postcode'),
'zip_id': fields.many2one('res.country.city', 'Postcodetabel'),
'street_id': fields.many2one('res.country.city.street', 'Straattabel'),
'street_nbr': fields.char('Huisnummer', size=16),
'phone': fields.char('Telefoon'),
'mobile': fields.char('Mobiel'),
'email': fields.char('E-mail'),
'name': fields.char('ID'),
'user_id': fields.many2one('res.users', 'Gebruiker', required=True, select=True),
'state': fields.selection([
('new', 'Nieuw'),
('in_progress', 'In Behandeling'),
('problem', 'Probleem'),
('done', 'Ingepland'),
('cancel', 'Geannuleerd'),
], 'Status', readonly=True, track_visibility='onchange', select=True),
'case_section_id': fields.many2one('crm.case.section', 'Type thermostaat', required=True, select=True),
'software_version_id': fields.many2one('crm.software.version', 'Software versie', select=True),
'connected_to': fields.text('Aangesloten op'),
'problem': fields.text('Probleem'),
'installer_id': fields.many2one('res.partner', 'Installateur', select=True),
'request_date': fields.date('Aanvraagdatum'),
'installation_date': fields.date('Geplande installatiedatum'),
'first_name': fields.char('Voornaam', len=24),
'middle_name': fields.char('Tussenvoegsel(s)', len=24),
'last_name': fields.char('Achternaam', len=24),
'one': fields.integer('Een'),
'color': fields.integer('Color Index'),
'create_partner': fields.boolean('Relatie aanmaken'),
'address': fields.char('Adres'),
}
_defaults = {
'request_date': fields.date.context_today,
'user_id': lambda obj, cr, uid, context: uid,
# 'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'crm.installation.request'),
'state': 'new',
'one': 1,
'color': 0,
}
_order = 'id desc'
def onchange_street_id(self, cr, uid, ids, cust_zip, zip_id, street_id, street_nbr, context=None):
res = {}
zip = zip_id
street = street_id
nbr = street_nbr
partner_id = None
if cust_zip:
sql_stat = "select res_country_city_street.id as street_id, city_id, res_country_city_street.zip, res_country_city_street.name as street_name, res_country_city.name as city_name from res_country_city_street, res_country_city where replace(res_country_city_street.zip, ' ', '') = upper(replace('%s', ' ', '')) and city_id = res_country_city.id" % (cust_zip, )
cr.execute(sql_stat)
sql_res = cr.dictfetchone()
if sql_res and sql_res['street_id']:
res['street_id'] = sql_res['street_id']
res['zip_id'] = sql_res['city_id']
res['cust_zip'] = sql_res['zip']
address = sql_res['street_name']
if street_nbr:
address = address + ' ' + street_nbr
address = address + ', ' + sql_res['zip'] + ' ' + sql_res['city_name']
res['address'] = address
if zip_id and street_nbr:
sql_stat = "select id as partner_id from res_partner where zip_id = %d and trim(street_nbr) = trim('%s')" % (zip_id, street_nbr, )
cr.execute(sql_stat)
sql_res = cr.dictfetchone()
if sql_res:
if sql_res['partner_id']:
res['partner_id'] = sql_res['partner_id']
else:
res['partner_id'] = None
else:
res['partner_id'] = None
return {'value':res}
def icy_onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
res = {}
if partner_id:
partner_obj = self.pool.get('res.partner')
partner = partner_obj.browse(cr, uid, partner_id, context=context)
res['phone'] = partner.phone
res['email'] = partner.email
res['mobile'] = partner.mobile
res['zip_id'] = partner.zip_id.id
res['street_id'] = partner.street_id.id
res['street_nbr'] = partner.street_nbr
res['cust_zip'] = partner.zip
else:
res['cust_phone'] = None
res['email'] = None
res['mobile'] = None
return {'value':res}
def button_in_progress(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
return True
def button_problem(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'problem'}, context=context)
return True
def button_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def button_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
return True
def button_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'new'}, context=context)
return True
def onchange_create_partner(self, cr, uid, ids, partner_id, phone, email, mobile, zip_id, street_id, street_nbr, first_name, middle_name, last_name, context=None):
res = {}
if not partner_id:
obj_partner = self.pool.get('res.partner')
vals_partner = {}
cust_name = ''
if first_name:
cust_name = first_name
if middle_name:
if cust_name == '':
cust_name = middle_name
else:
cust_name = cust_name + ' ' + middle_name
if last_name:
if cust_name == '':
cust_name = last_name
else:
cust_name = cust_name + ' ' + last_name
vals_partner['name'] = cust_name
vals_partner['lang'] = "nl_NL"
vals_partner['company_id'] = 1
vals_partner['use_parent_address'] = False
vals_partner['active'] = True
sql_stat = "select res_country_city_street.name as cust_street, res_country_city.name as cust_city, res_country_city.zip as cust_zip from res_country_city_street, res_country_city where res_country_city_street.id = %d and city_id = res_country_city.id" % (street_id, )
cr.execute(sql_stat)
sql_res = cr.dictfetchone()
if sql_res and sql_res['cust_street']:
cust_street = sql_res['cust_street']
cust_zip = sql_res['cust_zip']
cust_city = sql_res['cust_city']
if street_nbr:
vals_partner['street'] = cust_street + ' ' + street_nbr
else:
vals_partner['street'] = cust_street
vals_partner['supplier'] = False
vals_partner['city'] = cust_city
vals_partner['zip'] = cust_zip
vals_partner['employee'] = False
vals_partner['installer'] = False
vals_partner['type'] = "contact"
vals_partner['email'] = email
vals_partner['phone'] = phone
vals_partner['mobile'] = mobile
vals_partner['customer'] = False
vals_partner['is_company'] = False
vals_partner['notification_email_send'] = "comment"
vals_partner['opt_out'] = False
vals_partner['display_name'] = cust_name
vals_partner['purchase_warn'] = "no-message"
vals_partner['sale_warn'] = "no-message"
vals_partner['invoice_warn'] = "no-message"
vals_partner['picking-warn'] = "no-message"
vals_partner['received_via'] = False
vals_partner['consumer'] = True
vals_partner['subcontractor'] = False
vals_partner['zip_id'] = zip_id
vals_partner['street_id'] = street_id
vals_partner['street_nbr'] = street_nbr
vals_partner['first_name'] = first_name
vals_partner['middle_name'] = middle_name
vals_partner['last_name'] = last_name
partner_id = obj_partner.create(cr, uid, vals=vals_partner, context=context)
res['partner_id'] = partner_id
return {'value':res}
def create(self, cr, uid, vals, context=None):
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'crm.installation.request')
if 'cust_zip' in vals and vals['cust_zip']:
sql_stat = "select res_country_city_street.id as street_id, city_id, res_country_city_street.zip, res_country_city_street.name as street_name, res_country_city.name as city_name from res_country_city_street, res_country_city where replace(res_country_city_street.zip, ' ', '') = upper(replace('%s', ' ', '')) and city_id = res_country_city.id" % (vals['cust_zip'], )
cr.execute(sql_stat)
sql_res = cr.dictfetchone()
if sql_res and sql_res['street_id']:
address = sql_res['street_name']
if 'street_nbr' in vals and vals['street_nbr']:
address = address + ' ' + vals['street_nbr']
address = address + ', ' + sql_res['zip'] + ' ' + sql_res['city_name']
vals['address'] = address
return super(crm_installation_request, self).create(cr, uid, vals, context=context)
crm_installation_request()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| lgpl-3.0 | -4,379,551,844,585,959,000 | 42.02 | 378 | 0.553045 | false | 3.595787 | false | false | false |
tomjelinek/pcs | pcs_test/tier1/cib_resource/test_stonith_create.py | 3 | 15836 | import re
from pcs_test.tier1.cib_resource.common import ResourceTest
from pcs_test.tier1.cib_resource.stonith_common import need_load_xvm_fence_agent
from pcs_test.tools.misc import is_minimum_pacemaker_version
PCMK_2_0_3_PLUS = is_minimum_pacemaker_version(2, 0, 3)
PCMK_2_0_5_PLUS = is_minimum_pacemaker_version(2, 0, 5)
ERRORS_HAVE_OCURRED = (
"Error: Errors have occurred, therefore pcs is unable to continue\n"
)
class PlainStonith(ResourceTest):
@need_load_xvm_fence_agent
def test_simplest(self):
self.assert_effect(
"stonith create S fence_xvm".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_xvm">
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
def test_base_with_agent_that_provides_unfencing(self):
self.assert_effect(
"stonith create S fence_scsi".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_scsi">
<meta_attributes id="S-meta_attributes">
<nvpair id="S-meta_attributes-provides" name="provides"
value="unfencing"
/>
</meta_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
def test_error_when_not_valid_name(self):
self.assert_pcs_fail_regardless_of_force(
"stonith create S fence_xvm:invalid".split(),
"Error: Invalid stonith agent name 'fence_xvm:invalid'. List of"
" agents can be obtained by using command 'pcs stonith list'."
" Do not use the 'stonith:' prefix. Agent name cannot contain"
" the ':' character.\n",
)
def test_error_when_not_valid_agent(self):
error = error_re = None
if PCMK_2_0_3_PLUS:
# pacemaker 2.0.5 adds 'crm_resource:'
# The exact message returned form pacemaker differs from version to
# version (sometimes from commit to commit), so we don't check for
# the whole of it.
error_re = re.compile(
"^"
"Error: Agent 'absent' is not installed or does not provide "
"valid metadata:( crm_resource:)? Metadata query for "
"stonith:absent failed:.+"
"use --force to override\n$",
re.MULTILINE,
)
else:
error = (
"Error: Agent 'absent' is not installed or does not provide "
"valid metadata: Agent absent not found or does not support "
"meta-data: Invalid argument (22), "
"Metadata query for stonith:absent failed: Input/output error, "
"use --force to override\n"
)
self.assert_pcs_fail(
"stonith create S absent".split(),
stdout_full=error,
stdout_regexp=error_re,
)
def test_warning_when_not_valid_agent(self):
error = error_re = None
if PCMK_2_0_3_PLUS:
# pacemaker 2.0.5 adds 'crm_resource:'
# The exact message returned form pacemaker differs from version to
# version (sometimes from commit to commit), so we don't check for
# the whole of it.
error_re = re.compile(
"^"
"Warning: Agent 'absent' is not installed or does not provide "
"valid metadata:( crm_resource:)? Metadata query for "
"stonith:absent failed:.+",
re.MULTILINE,
)
else:
error = (
"Warning: Agent 'absent' is not installed or does not provide "
"valid metadata: Agent absent not found or does not support "
"meta-data: Invalid argument (22), "
"Metadata query for stonith:absent failed: Input/output error\n"
)
self.assert_effect(
"stonith create S absent --force".split(),
"""<resources>
<primitive class="stonith" id="S" type="absent">
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
output=error,
output_regexp=error_re,
)
@need_load_xvm_fence_agent
def test_disabled_puts_target_role_stopped(self):
self.assert_effect(
"stonith create S fence_xvm --disabled".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_xvm">
<meta_attributes id="S-meta_attributes">
<nvpair id="S-meta_attributes-target-role"
name="target-role" value="Stopped"
/>
</meta_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
def test_debug_and_verbose_allowed(self):
self.assert_effect(
"stonith create S fence_apc ip=i username=u verbose=v debug=d".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_apc">
<instance_attributes id="S-instance_attributes">
<nvpair id="S-instance_attributes-debug"
name="debug" value="d"
/>
<nvpair id="S-instance_attributes-ip"
name="ip" value="i"
/>
<nvpair id="S-instance_attributes-username"
name="username" value="u"
/>
<nvpair id="S-instance_attributes-verbose"
name="verbose" value="v"
/>
</instance_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
@need_load_xvm_fence_agent
def test_error_when_action_specified(self):
self.assert_pcs_fail(
"stonith create S fence_xvm action=reboot".split(),
"Error: stonith option 'action' is deprecated and should not be"
" used, use 'pcmk_off_action', 'pcmk_reboot_action' instead, "
"use --force to override\n" + ERRORS_HAVE_OCURRED,
)
@need_load_xvm_fence_agent
def test_warn_when_action_specified_forced(self):
self.assert_effect(
"stonith create S fence_xvm action=reboot --force".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_xvm">
<instance_attributes id="S-instance_attributes">
<nvpair id="S-instance_attributes-action"
name="action" value="reboot"
/>
</instance_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
"Warning: stonith option 'action' is deprecated and should not be"
" used, use 'pcmk_off_action', 'pcmk_reboot_action' instead\n",
)
class WithMeta(ResourceTest):
@need_load_xvm_fence_agent
def test_simplest_with_meta_provides(self):
self.assert_effect(
"stonith create S fence_xvm meta provides=something".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_xvm">
<meta_attributes id="S-meta_attributes">
<nvpair id="S-meta_attributes-provides" name="provides"
value="something"
/>
</meta_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
def test_base_with_agent_that_provides_unfencing_with_meta_provides(self):
self.assert_effect(
"stonith create S fence_scsi meta provides=something".split(),
"""<resources>
<primitive class="stonith" id="S" type="fence_scsi">
<meta_attributes id="S-meta_attributes">
<nvpair id="S-meta_attributes-provides" name="provides"
value="unfencing"
/>
</meta_attributes>
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</resources>""",
)
class InGroup(ResourceTest):
@need_load_xvm_fence_agent
def test_command_simply_puts_stonith_into_group(self):
self.assert_effect(
"stonith create S fence_xvm --group G".split(),
"""<resources>
<group id="G">
<primitive class="stonith" id="S" type="fence_xvm">
<operations>
<op id="S-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</group>
</resources>""",
)
@need_load_xvm_fence_agent
def test_command_simply_puts_stonith_into_group_at_the_end(self):
self.assert_pcs_success("stonith create S1 fence_xvm --group G".split())
self.assert_effect(
"stonith create S2 fence_xvm --group G".split(),
"""<resources>
<group id="G">
<primitive class="stonith" id="S1" type="fence_xvm">
<operations>
<op id="S1-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
<primitive class="stonith" id="S2" type="fence_xvm">
<operations>
<op id="S2-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</group>
</resources>""",
)
@need_load_xvm_fence_agent
def test_command_simply_puts_stonith_into_group_before_another(self):
self.assert_pcs_success("stonith create S1 fence_xvm --group G".split())
self.assert_effect(
"stonith create S2 fence_xvm --group G --before S1".split(),
"""<resources>
<group id="G">
<primitive class="stonith" id="S2" type="fence_xvm">
<operations>
<op id="S2-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
<primitive class="stonith" id="S1" type="fence_xvm">
<operations>
<op id="S1-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</group>
</resources>""",
)
@need_load_xvm_fence_agent
def test_command_simply_puts_stonith_into_group_after_another(self):
self.assert_pcs_success_all(
[
"stonith create S1 fence_xvm --group G".split(),
"stonith create S2 fence_xvm --group G".split(),
]
)
self.assert_effect(
"stonith create S3 fence_xvm --group G --after S1".split(),
"""<resources>
<group id="G">
<primitive class="stonith" id="S1" type="fence_xvm">
<operations>
<op id="S1-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
<primitive class="stonith" id="S3" type="fence_xvm">
<operations>
<op id="S3-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
<primitive class="stonith" id="S2" type="fence_xvm">
<operations>
<op id="S2-monitor-interval-60s" interval="60s"
name="monitor"
/>
</operations>
</primitive>
</group>
</resources>""",
)
@need_load_xvm_fence_agent
def test_fail_when_inteded_before_item_does_not_exist(self):
self.assert_pcs_fail(
"stonith create S2 fence_xvm --group G --before S1".split(),
"Error: there is no resource 'S1' in the group 'G'\n",
)
@need_load_xvm_fence_agent
def test_fail_when_inteded_after_item_does_not_exist(self):
self.assert_pcs_fail(
"stonith create S2 fence_xvm --group G --after S1".split(),
"Error: there is no resource 'S1' in the group 'G'\n",
)
def test_fail_when_entered_both_after_and_before(self):
self.assert_pcs_fail(
"stonith create S fence_xvm --group G --after S1 --before S2".split(),
"Error: you cannot specify both --before and --after\n",
)
def test_fail_when_after_is_used_without_group(self):
self.assert_pcs_fail(
"stonith create S fence_xvm --after S1".split(),
"Error: you cannot use --after without --group\n",
)
def test_fail_when_before_is_used_without_group(self):
self.assert_pcs_fail(
"stonith create S fence_xvm --before S1".split(),
"Error: you cannot use --before without --group\n",
)
def test_fail_when_before_after_conflicts_and_moreover_without_group(self):
self.assert_pcs_fail(
"stonith create S fence_xvm --after S1 --before S2".split(),
"Error: you cannot specify both --before and --after"
" and you have to specify --group\n",
)
| gpl-2.0 | -8,530,056,943,300,553,000 | 40.025907 | 83 | 0.46849 | false | 4.39157 | true | false | false |
skirpichev/omg | diofant/tests/printing/test_mpmath.py | 2 | 1202 | from diofant import QQ, GoldenRatio, Rational, RootOf, Sum, oo, pi, sqrt
from diofant.abc import n, x
from diofant.printing.lambdarepr import MpmathPrinter
__all__ = ()
def test_basic():
p = MpmathPrinter()
assert p.doprint(GoldenRatio) == 'phi'
assert p.doprint(Rational(2)) == '2'
assert p.doprint(Rational(2, 3)) == '2*power(3, -1)'
def test_Pow():
p = MpmathPrinter()
assert p.doprint(sqrt(pi)) == 'root(pi, 2)'
assert p.doprint(pi**Rational(2, 3)) == 'root(pi, 3)**2'
assert p.doprint(pi**Rational(-2, 3)) == 'power(root(pi, 3), -2)'
assert p.doprint(pi**pi) == 'pi**pi'
def test_RootOf():
p = MpmathPrinter()
e = RootOf(x**3 + x - 1, x, 0)
r = ('findroot(lambda x: x**3 + x - 1, (%s, %s), '
"method='bisection')" % (p.doprint(QQ(0)), p.doprint(QQ(1))))
assert p.doprint(e) == r
e = RootOf(x**3 + x - 1, x, 1)
r = ('findroot(lambda x: x**3 + x - 1, mpc(%s, %s), '
"method='secant')" % (p.doprint(QQ(-3, 8)), p.doprint(QQ(-9, 8))))
assert p.doprint(e) == r
def test_Sum():
p = MpmathPrinter()
s = Sum(n**(-2), (n, 1, oo))
assert p.doprint(s) == 'nsum(lambda n: power(n, -2), (1, inf))'
| bsd-3-clause | 7,876,439,909,670,022,000 | 24.574468 | 75 | 0.549085 | false | 2.458078 | false | false | false |
CovingtonResearchGroup/olm | olm/loggers/TruBluToolkit.py | 1 | 1515 | #Tools for reading and analysis of data from TruBlu data loggers
from pandas import read_csv
from pandas import concat
from pandas import DataFrame
import os
"""
Functions to read TruBlu logger files.
"""
#read in the CSV file from a TruBlu logger and return a pandas DataFrame
def readTruBlu(csvfile):
"""
Reads data from a CSV file exported from a TruBlu logger.
Parameters
----------
csv_file : string
A string containing the file name of the CSV or MON file to be read.
Returns
-------
df : pandas.DataFrame
DataFrame containing data from a TruBlu csv file.
"""
sep = ','
header = 0
skiprows = 16 #this is somewhat weak, number of lines could change over time??
# Definitely weak. Probably an automated read to csv header would be better
index_col = 3
#names = ['ID','Name','Address','Time of Acquisition','Elapsed(Sec)','Level(PSI)','Temperature (\'C)','Battery Voltage(Volt)','Supply Voltage(Volt)','Scan No','blank']
parse_dates = True
#skip_footer = 1
#print(csvfile)
#df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates)
try:
if os.stat(csvfile).st_size > 0:
df = read_csv(csvfile, sep=sep, skiprows=skiprows, header=header, index_col=index_col, parse_dates=parse_dates)
return df
else:
print((csvfile + " is empty"))
except OSError:
print((csvfile + " does not exist"))
| mit | -6,940,888,148,651,302,000 | 31.234043 | 171 | 0.646205 | false | 3.564706 | false | false | false |
hlx98007/deployment-scripts | chef_scripts/merge.py | 1 | 5112 | #!/usr/bin/python
# Author: Luxing Huang
# Purpose: Merging a specific Chef environment into another, or into a
# template.
# Note: This can also be applied to Role files and Data Bag files, as long
# as they are valid JSON files.
#
# This script always creates a 3rd json file because we wouldn't want
# to overwrite the 2nd file for backup purpose.
from __future__ import print_function
import json
import os
import sys
import argparse
DEFAULT_KEYWORD = "template_default"
arg = argparse.ArgumentParser(description="I merge environments/roles into each other, or to the template")
arg.add_argument("-y", "--yes", help="Assume yes to overwrite 3rd argument if that file already exists.", dest="ifyes", action="store_true", required=False)
arg.add_argument("env1", type=str, help="first environment file, e.g. env1.json")
arg.add_argument("env2", type=str, help="second environment or template, e.g. env2.json")
arg.add_argument("merged", type=str, help="The target template file name, e.g. template.json")
arg.set_defaults(ifyes=False)
args = arg.parse_args()
env1_name = ""
env2_name = ""
def merge(j_env1, j_env2):
""" Merge 2 jsons into 1 and return the combined json """
# Creating a locally empty json template for argv[3]
j_template = {}
for key in j_env1.keys():
# if env2 has no such key:
if not j_env2.has_key(key):
# add the new entry to template
j_template[key] = j_env1[key]
# On to the next key
continue
if j_env1[key] == j_env2[key]:
# Then we update the key in our template json.
j_template.update({key: j_env2[key]})
# on to the next key
continue
else:
# env1 = template, env2 = string
if isinstance(j_env1[key], dict) and isinstance(j_env2[key], unicode):
print("Please do manual integration at key %s because env1 is a dict but env2 is a string" % key)
sys.exit(2)
# If env1 = str, env2 = str
if isinstance(j_env1[key], unicode) and isinstance(j_env2[key], unicode):
# obtain the name of env2
if env2_name == "":
# if the env2 name is missing, we assume env2 is actually a template.
# so we set it as the default value.
j_template[key] = {DEFAULT_KEYWORD : j_env2[key], env1_name : j_env1[key]}
else:
# Env2 is actually an environment
j_template[key] = {DEFAULT_KEYWORD : "", env1_name : j_env1[key],
env2_name : j_env2[key]}
# On to the next key
continue
# If env2 is a template and env1 is merging into it
if isinstance(j_env1[key], unicode) and isinstance(j_env2[key], dict):
# make sure it is a templated dict
if j_env2[key].has_key(DEFAULT_KEYWORD):
# copy env2 to the new template json
j_template[key] = j_env2[key]
# add or update env1 entry to it
j_template[key].update({env1_name : j_env1[key]})
# On to the next key
continue
else:
print("env2 file does not have a %s key on parent key %s, abort." %
(DEFAULT_KEYWORD, key), file=sys.stderr)
sys.exit(1)
if isinstance(j_env1[key], dict) and isinstance(j_env2[key], dict):
# if env1 is a dict, stop
if j_env1[key].has_key(DEFAULT_KEYWORD) or j_env2[key].has_key(DEFAULT_KEYWORD):
print("either environments must not be dict templates on %s." % key, file=sys.stderr)
sys.exit(2)
# Recursive call to build json's sub tree.
j_template[key] = merge(j_env1[key], j_env2[key])
continue
return j_template
if __name__ == "__main__":
# read env1 and env2 values and dict-ize
env1_fp = open(args.env1, 'r')
try:
env1_json = json.load(env1_fp)
except:
print("Cannot parse json, check if it's valid?", file=sys.stderr)
sys.exit(2)
env1_fp.close()
env2_fp = open(args.env2, 'r')
try:
env2_json = json.load(env2_fp)
except:
print("Cannot parse json, check if it's valid?", file=sys.stderr)
sys.exit(2)
env2_fp.close()
# set global name for env1/2
try:
name = env1_json['name']
except KeyError:
print("Name key not found in 1st environment. Giving up")
sys.exit(1)
if not isinstance(name, unicode):
print("file 1 must be an environment, not a template!", file=sys.stderr)
sys.exit(1)
else:
env1_name = name
try:
name = env2_json['name']
except KeyError:
print("Required name key not found in 2nd environment/template. Giving up")
sys.exit(1)
if isinstance(name, unicode):
# It's an environment
env2_name = name
else:
# It's a template
pass
merge_json = merge(env1_json, env2_json)
if args.ifyes is False:
if os.path.exists(args.merged):
answer = raw_input("Do you really want to overwrite %s? type YES to proceed: " % args.merged).strip()
if answer != "YES":
print("Abort.", file=sys.stderr)
sys.exit(2)
merge_fp = open(args.merged, 'w')
json.dump(merge_json, merge_fp, sort_keys=True, indent=2)
merge_fp.close()
| mit | -351,743,658,188,424,260 | 33.540541 | 156 | 0.62813 | false | 3.32596 | false | false | false |
mkeilman/sirepo | sirepo/job_supervisor.py | 2 | 35598 | # -*- coding: utf-8 -*-
"""TODO(e-carlin): Doc
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkconfig
from pykern import pkinspect
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdformat, pkdlog, pkdexc
from sirepo import job
import asyncio
import contextlib
import copy
import datetime
import os
import pykern.pkio
import sirepo.auth
import sirepo.auth_db
import sirepo.http_reply
import sirepo.sim_data
import sirepo.simulation_db
import sirepo.srdb
import sirepo.srtime
import sirepo.tornado
import sirepo.util
import time
import tornado.ioloop
import tornado.locks
#: where supervisor state is persisted to disk
_DB_DIR = None
_NEXT_REQUEST_SECONDS = None
_HISTORY_FIELDS = frozenset((
'alert',
'canceledAfterSecs',
'computeJobQueued',
'computeJobSerial',
'computeJobStart',
'computeModel'
'driverDetails',
'error',
'internalError',
'isParallel',
'isPremiumUser',
'jobRunMode',
'jobStatusMessage',
'lastUpdateTime',
'status',
))
_PARALLEL_STATUS_FIELDS = frozenset((
'computeJobHash',
'computeJobStart',
'computeModel',
'elapsedTime',
'frameCount',
'lastUpdateTime',
'percentComplete',
))
cfg = None
#: how many times restart request when Awaited() raised
_MAX_RETRIES = 10
class Awaited(Exception):
"""An await occurred, restart operation"""
pass
class ServerReq(PKDict):
def copy_content(self):
return copy.deepcopy(self.content)
def pkdebug_str(self):
c = self.get('content')
if not c:
return 'ServerReq(<no content>)'
return pkdformat('ServerReq({}, {})', c.api, c.get('computeJid'))
async def receive(self):
s = self.content.pkdel('serverSecret')
# no longer contains secret so ok to log
assert s, \
'no secret in message content={}'.format(self.content)
assert s == sirepo.job.cfg.server_secret, \
'server_secret did not match content={}'.format(self.content)
self.handler.write(await _ComputeJob.receive(self))
class SlotProxy(PKDict):
def __init__(self, **kwargs):
super().__init__(_value=None, **kwargs)
async def alloc(self, situation):
if self._value is not None:
return
try:
self._value = self._q.get_nowait()
except tornado.queues.QueueEmpty:
pkdlog('{} situation={}', self._op, situation)
with self._op.set_job_situation(situation):
self._value = await self._q.get()
raise Awaited()
def free(self):
if self._value is None:
return
self._q.task_done()
self._q.put_nowait(self._value)
self._value = None
class SlotQueue(sirepo.tornado.Queue):
def __init__(self, maxsize=1):
super().__init__(maxsize=maxsize)
for i in range(1, maxsize + 1):
self.put_nowait(i)
def sr_slot_proxy(self, op):
return SlotProxy(_op=op, _q=self)
def init():
global cfg, _DB_DIR, _NEXT_REQUEST_SECONDS, job_driver
if cfg:
return
job.init()
from sirepo import job_driver
job_driver.init(pkinspect.this_module())
cfg = pkconfig.init(
job_cache_secs=(300, int, 'when to re-read job state from disk'),
max_secs=dict(
analysis=(144, pkconfig.parse_seconds, 'maximum run-time for analysis job',),
parallel=(3600, pkconfig.parse_seconds, 'maximum run-time for parallel job (except sbatch)'),
parallel_premium=(3600*2, pkconfig.parse_seconds, 'maximum run-time for parallel job for premium user (except sbatch)'),
sequential=(360, pkconfig.parse_seconds, 'maximum run-time for sequential job'),
),
purge_non_premium_after_secs=(0, pkconfig.parse_seconds, 'how long to wait before purging non-premium users simulations'),
purge_non_premium_task_secs=(None, pkconfig.parse_seconds, 'when to clean up simulation runs of non-premium users (%H:%M:%S)'),
sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'),
)
_DB_DIR = sirepo.srdb.supervisor_dir()
_NEXT_REQUEST_SECONDS = PKDict({
job.PARALLEL: 2,
job.SBATCH: cfg.sbatch_poll_secs,
job.SEQUENTIAL: 1,
})
sirepo.auth_db.init()
tornado.ioloop.IOLoop.current().add_callback(
_ComputeJob.purge_free_simulations,
)
async def terminate():
from sirepo import job_driver
await job_driver.terminate()
class _ComputeJob(PKDict):
instances = PKDict()
_purged_jids_cache = set()
def __init__(self, req, **kwargs):
super().__init__(
ops=[],
run_op=None,
run_dir_slot_q=SlotQueue(),
**kwargs,
)
# At start we don't know anything about the run_dir so assume ready
self.pksetdefault(db=lambda: self.__db_init(req))
self.cache_timeout_set()
def cache_timeout(self):
if self.ops:
self.cache_timeout_set()
else:
del self.instances[self.db.computeJid]
def cache_timeout_set(self):
self.timer = tornado.ioloop.IOLoop.current().call_later(
cfg.job_cache_secs,
self.cache_timeout,
)
def destroy_op(self, op):
if op in self.ops:
self.ops.remove(op)
if self.run_op == op:
self.run_op = None
def elapsed_time(self):
if not self.db.computeJobStart:
return 0
return (
sirepo.srtime.utc_now_as_int() if self._is_running_pending()
else self.db.dbUpdateTime
) - self.db.computeJobStart
@classmethod
def get_instance_or_class(cls, req):
try:
j = req.content.computeJid
except AttributeError:
return cls
self = cls.instances.pksetdefault(j, lambda: cls.__create(req))[j]
# SECURITY: must only return instances for authorized user
assert req.content.uid == self.db.uid, \
'req.content.uid={} is not same as db.uid={} for jid={}'.format(
req.content.uid,
self.db.uid,
j,
)
return self
def pkdebug_str(self):
d = self.get('db')
if not d:
return '_ComputeJob()'
return pkdformat(
'_ComputeJob({} u={} {} {})',
d.get('computeJid'),
d.get('uid'),
d.get('status'),
self.ops,
)
@classmethod
async def purge_free_simulations(cls):
def _get_uids_and_files():
r = []
u = None
p = sirepo.auth_db.UserRole.uids_of_paid_users()
for f in pkio.sorted_glob(_DB_DIR.join('*{}'.format(
sirepo.simulation_db.JSON_SUFFIX,
))):
n = sirepo.sim_data.split_jid(jid=f.purebasename).uid
if n in p or f.mtime() > _too_old \
or f.purebasename in cls._purged_jids_cache:
continue
if u != n:
# POSIT: Uid is the first part of each db file. The files are
# sorted so this should yield all of a user's files
if r:
yield u, r
u = n
r = []
r.append(f)
if r:
yield u, r
def _purge_sim(jid):
d = cls.__db_load(jid)
# OPTIMIZATION: We assume the uids_of_paid_users doesn't change very
# frequently so we don't need to check again. A user could run a sim
# at anytime so we need to check that they haven't
if d.lastUpdateTime > _too_old:
return
cls._purged_jids_cache.add(jid)
if d.status == job.JOB_RUN_PURGED:
return
p = sirepo.simulation_db.simulation_run_dir(d)
pkio.unchecked_remove(p)
n = cls.__db_init_new(d, d)
n.status = job.JOB_RUN_PURGED
cls.__db_write_file(n)
if not cfg.purge_non_premium_task_secs:
return
s = sirepo.srtime.utc_now()
u = None
f = None
try:
_too_old = (
sirepo.srtime.utc_now_as_int()
- cfg.purge_non_premium_after_secs
)
with sirepo.auth_db.session():
for u, v in _get_uids_and_files():
with sirepo.auth.set_user_outside_of_http_request(u):
for f in v:
_purge_sim(jid=f.purebasename)
await tornado.gen.sleep(0)
except Exception as e:
pkdlog('u={} f={} error={} stack={}', u, f, e, pkdexc())
finally:
tornado.ioloop.IOLoop.current().call_later(
cfg.purge_non_premium_task_secs,
cls.purge_free_simulations,
)
@classmethod
async def receive(cls, req):
if req.content.get('api') != 'api_runStatus':
pkdlog('{}', req)
try:
o = cls.get_instance_or_class(req)
return await getattr(
o,
'_receive_' + req.content.api,
)(req)
except sirepo.util.ASYNC_CANCELED_ERROR:
return PKDict(state=job.CANCELED)
except Exception as e:
pkdlog('{} error={} stack={}', req, e, pkdexc())
return sirepo.http_reply.gen_tornado_exception(e)
def set_situation(self, op, situation, exception=None):
if op.opName != job.OP_RUN:
return
s = self.db.jobStatusMessage
p = 'Exception: '
if situation is not None:
# POSIT: no other situation begins with exception
assert not s or not s.startswith(p), \
f'Trying to overwrite existing jobStatusMessage="{s}" with situation="{situation}"'
if exception is not None:
if not str(exception):
exception = repr(exception)
situation = f'{p}{exception}, while {s}'
self.__db_update(jobStatusMessage=situation)
@classmethod
def __create(cls, req):
try:
d = cls.__db_load(req.content.computeJid)
self = cls(req, db=d)
if self._is_running_pending():
#TODO(robnagler) when we reconnect with running processes at startup,
# we'll need to change this
self.__db_update(status=job.CANCELED)
return self
except Exception as e:
if pykern.pkio.exception_is_not_found(e):
return cls(req).__db_write()
raise
@classmethod
def __db_file(cls, computeJid):
return _DB_DIR.join(
computeJid + sirepo.simulation_db.JSON_SUFFIX,
)
def __db_init(self, req, prev_db=None):
self.db = self.__db_init_new(req.content, prev_db)
return self.db
@classmethod
def __db_init_new(cls, data, prev_db=None):
db = PKDict(
alert=None,
canceledAfterSecs=None,
computeJid=data.computeJid,
computeJobHash=data.computeJobHash,
computeJobQueued=0,
computeJobSerial=0,
computeJobStart=0,
computeModel=data.computeModel,
dbUpdateTime=sirepo.srtime.utc_now_as_int(),
driverDetails=PKDict(),
error=None,
history=cls.__db_init_history(prev_db),
isParallel=data.isParallel,
isPremiumUser=data.get('isPremiumUser'),
jobStatusMessage=None,
lastUpdateTime=0,
simName=None,
simulationId=data.simulationId,
simulationType=data.simulationType,
status=job.MISSING,
uid=data.uid,
)
r = data.get('jobRunMode')
if not r:
assert data.api != 'api_runSimulation', \
'api_runSimulation must have a jobRunMode content={}'.format(data)
# __db_init() will be called when runDirNotFound.
# The api_* that initiated the request may not have
# a jobRunMode (ex api_downloadDataFile). In that
# case use the existing jobRunMode because the
# request doesn't care about the jobRunMode
r = prev_db.jobRunMode
db.pkupdate(
jobRunMode=r,
nextRequestSeconds=_NEXT_REQUEST_SECONDS[r],
)
if db.isParallel:
db.parallelStatus = PKDict(
((k, 0) for k in _PARALLEL_STATUS_FIELDS),
)
return db
@classmethod
def __db_init_history(cls, prev_db):
if prev_db is None:
return []
return prev_db.history + [
PKDict(((k, v) for k, v in prev_db.items() if k in _HISTORY_FIELDS)),
]
@classmethod
def __db_load(cls, compute_jid):
v = None
f = cls.__db_file(compute_jid)
d = pkcollections.json_load_any(f)
for k in [
'alert',
'canceledAfterSecs',
'isPremiumUser',
'jobStatusMessage',
'internalError',
]:
d.setdefault(k, v)
for h in d.history:
h.setdefault(k, v)
d.pksetdefault(
computeModel=lambda: sirepo.sim_data.split_jid(compute_jid).compute_model,
dbUpdateTime=lambda: f.mtime(),
)
if 'cancelledAfterSecs' in d:
d.canceledAfterSecs = d.pkdel('cancelledAfterSecs', default=v)
for h in d.history:
h.canceledAfterSecs = d.pkdel('cancelledAfterSecs', default=v)
return d
def __db_restore(self, db):
self.db = db
self.__db_write()
def __db_update(self, **kwargs):
self.db.pkupdate(**kwargs)
return self.__db_write()
def __db_write(self):
self.db.dbUpdateTime = sirepo.srtime.utc_now_as_int()
self.__db_write_file(self.db)
return self
@classmethod
def __db_write_file(cls, db):
sirepo.util.json_dump(db, path=cls.__db_file(db.computeJid))
@classmethod
def _get_running_pending_jobs(cls, uid=None):
def _filter_jobs(job):
if uid and job.db.uid != uid:
return False
return job._is_running_pending()
def _get_header():
h = [
['App', 'String'],
['Simulation id', 'String'],
['Start', 'DateTime'],
['Last update', 'DateTime'],
['Elapsed', 'Time'],
['Status', 'String'],
]
if uid:
h.insert(l, ['Name', 'String'])
else:
h.insert(l, ['User id', 'String'])
h.extend([
['Queued', 'Time'],
['Driver details', 'String'],
['Premium user', 'String'],
])
return h
def _get_rows():
def _get_queued_time(db):
m = i.db.computeJobStart if i.db.status == job.RUNNING \
else sirepo.srtime.utc_now_as_int()
return m - db.computeJobQueued
r = []
for i in filter(_filter_jobs, cls.instances.values()):
d = [
i.db.simulationType,
i.db.simulationId,
i.db.computeJobStart,
i.db.lastUpdateTime,
i.elapsed_time(),
i.db.get('jobStatusMessage', ''),
]
if uid:
d.insert(l, i.db.simName)
else:
d.insert(l, i.db.uid)
d.extend([
_get_queued_time(i.db),
' | '.join(sorted(i.db.driverDetails.values())),
i.db.isPremiumUser,
])
r.append(d)
r.sort(key=lambda x: x[l])
return r
l = 2
return PKDict(header=_get_header(), rows=_get_rows())
def _is_running_pending(self):
return self.db.status in (job.RUNNING, job.PENDING)
def _init_db_missing_response(self, req):
self.__db_init(req, prev_db=self.db)
self.__db_write()
assert self.db.status == job.MISSING, \
'expecting missing status={}'.format(self.db.status)
return PKDict(state=self.db.status)
def _raise_if_purged_or_missing(self, req):
if self.db.status in (job.MISSING, job.JOB_RUN_PURGED):
sirepo.util.raise_not_found('purged or missing {}', req)
@classmethod
async def _receive_api_admJobs(cls, req):
return cls._get_running_pending_jobs()
async def _receive_api_downloadDataFile(self, req):
self._raise_if_purged_or_missing(req)
return await self._send_with_single_reply(
job.OP_ANALYSIS,
req,
jobCmd='download_data_file',
dataFileKey=req.content.pop('dataFileKey')
)
@classmethod
async def _receive_api_ownJobs(cls, req):
return cls._get_running_pending_jobs(uid=req.content.uid)
async def _receive_api_runCancel(self, req, timed_out_op=None):
"""Cancel a run and related ops
Analysis ops that are for a parallel run (ex. sim frames) will not
be canceled.
Args:
req (ServerReq): The cancel request
timed_out_op (_Op, Optional): the op that was timed out, which
needs to be canceled
Returns:
PKDict: Message with state=canceled
"""
def _ops_to_cancel():
r = set(
o for o in self.ops
# Do not cancel sim frames. Allow them to come back for a canceled run
if not (self.db.isParallel and o.opName == job.OP_ANALYSIS)
)
if timed_out_op in self.ops:
r.add(timed_out_op)
return r
r = PKDict(state=job.CANCELED)
if (
# a running simulation may be canceled due to a
# downloadDataFile request timeout in another browser window (only the
# computeJids must match between the two requests). This might be
# a weird UX but it's important to do, because no op should take
# longer than its timeout.
#
# timed_out_op might not be a valid request, because a new compute
# may have been started so either we are canceling a compute by
# user directive (left) or timing out an op (and canceling all).
(not self._req_is_valid(req) and not timed_out_op)
or (not self._is_running_pending() and not self.ops)
):
# job is not relevant, but let the user know it isn't running
return r
candidates = _ops_to_cancel()
c = None
o = set()
# No matter what happens the job is canceled
self.__db_update(status=job.CANCELED)
self._canceled_serial = self.db.computeJobSerial
try:
for i in range(_MAX_RETRIES):
try:
o = _ops_to_cancel().intersection(candidates)
if o:
#TODO(robnagler) cancel run_op, not just by jid, which is insufficient (hash)
if not c:
c = self._create_op(job.OP_CANCEL, req)
await c.prepare_send()
elif c:
c.destroy()
c = None
pkdlog('{} cancel={}', self, o)
for x in o:
x.destroy(cancel=True)
if timed_out_op:
self.db.canceledAfterSecs = timed_out_op.max_run_secs
if c:
c.msg.opIdsToCancel = [x.opId for x in o]
c.send()
await c.reply_get()
return r
except Awaited:
pass
else:
raise AssertionError('too many retries {}'.format(req))
finally:
if c:
c.destroy(cancel=False)
async def _receive_api_runSimulation(self, req, recursion_depth=0):
def _set_error(compute_job_serial, internal_error):
if self.db.computeJobSerial != compute_job_serial:
# Another run has started
return
self.__db_update(
error='Server error',
internalError=internal_error,
status=job.ERROR,
)
f = req.content.data.get('forceRun')
if self._is_running_pending():
if f or not self._req_is_valid(req):
return PKDict(
state=job.ERROR,
error='another browser is running the simulation',
)
return self._status_reply(req)
if (
not f
and self._req_is_valid(req)
and self.db.status == job.COMPLETED
):
# Valid, completed, transient simulation
# Read this first https://github.com/radiasoft/sirepo/issues/2007
r = await self._receive_api_runStatus(req)
if r.state == job.MISSING:
# happens when the run dir is deleted (ex _purge_free_simulations)
assert recursion_depth == 0, \
'Infinite recursion detected. Already called from self. req={}'.format(
req,
)
return await self._receive_api_runSimulation(
req,
recursion_depth + 1,
)
return r
# Forced or canceled/errored/missing/invalid so run
o = self._create_op(
job.OP_RUN,
req,
jobCmd='compute',
nextRequestSeconds=self.db.nextRequestSeconds,
)
t = sirepo.srtime.utc_now_as_int()
s = self.db.status
d = self.db
self.__db_init(req, prev_db=d)
self.__db_update(
computeJobQueued=t,
computeJobSerial=t,
computeModel=req.content.computeModel,
driverDetails=o.driver.driver_details,
# run mode can change between runs so we must update the db
jobRunMode=req.content.jobRunMode,
simName=req.content.data.models.simulation.name,
status=job.PENDING,
)
self._purged_jids_cache.discard(self.__db_file(self.db.computeJid).purebasename)
c = self.db.computeJobSerial
try:
for i in range(_MAX_RETRIES):
try:
await o.prepare_send()
self.run_op = o
o.make_lib_dir_symlink()
o.send()
r = self._status_reply(req)
assert r
o.run_callback = tornado.ioloop.IOLoop.current().call_later(
0,
self._run,
o,
)
o = None
return r
except Awaited:
pass
else:
raise AssertionError('too many retries {}'.format(req))
except sirepo.util.ASYNC_CANCELED_ERROR:
if self.pkdel('_canceled_serial') == c:
# We were canceled due to api_runCancel.
# api_runCancel destroyed the op and updated the db
raise
# There was a timeout getting the run started. Set the
# error and let the user know. The timeout has destroyed
# the op so don't need to destroy here
_set_error(c, o.internal_error)
return self._status_reply(req)
except Exception as e:
# _run destroys in the happy path (never got to _run here)
o.destroy(cancel=False)
if isinstance(e, sirepo.util.SRException) and \
e.sr_args.params.get('isGeneral'):
self.__db_restore(d)
else:
_set_error(c, o.internal_error)
raise
async def _receive_api_runStatus(self, req):
r = self._status_reply(req)
if r:
return r
r = await self._send_with_single_reply(
job.OP_ANALYSIS,
req,
jobCmd='sequential_result',
)
if r.state == job.ERROR:
return self._init_db_missing_response(req)
return r
async def _receive_api_sbatchLogin(self, req):
return await self._send_with_single_reply(job.OP_SBATCH_LOGIN, req)
async def _receive_api_simulationFrame(self, req):
if not self._req_is_valid(req):
sirepo.util.raise_not_found('invalid req={}', req)
self._raise_if_purged_or_missing(req)
return await self._send_with_single_reply(
job.OP_ANALYSIS,
req,
jobCmd='get_simulation_frame'
)
async def _receive_api_statelessCompute(self, req):
return await self._send_with_single_reply(
job.OP_ANALYSIS,
req,
jobCmd='stateless_compute'
)
def _create_op(self, opName, req, **kwargs):
#TODO(robnagler) kind should be set earlier in the queuing process.
req.kind = job.PARALLEL if self.db.isParallel and opName != job.OP_ANALYSIS \
else job.SEQUENTIAL
req.simulationType = self.db.simulationType
# run mode can change between runs so use req.content.jobRunMode
# not self.db.jobRunMode
r = req.content.get('jobRunMode', self.db.jobRunMode)
if r not in sirepo.simulation_db.JOB_RUN_MODE_MAP:
# happens only when config changes, and only when sbatch is missing
sirepo.util.raise_not_found('invalid jobRunMode={} req={}', r, req)
o = _Op(
#TODO(robnagler) don't like the camelcase. It doesn't actually work right because
# these values are never sent directly, only msg which can be camelcase
computeJob=self,
kind=req.kind,
msg=PKDict(req.copy_content()).pksetdefault(jobRunMode=r),
opName=opName,
task=asyncio.current_task(),
)
if 'dataFileKey' in kwargs:
kwargs['dataFileUri'] = job.supervisor_file_uri(
o.driver.cfg.supervisor_uri,
job.DATA_FILE_URI,
kwargs.pop('dataFileKey'),
)
o.msg.pkupdate(**kwargs)
self.ops.append(o)
return o
def _req_is_valid(self, req):
return (
self.db.computeJobHash == req.content.computeJobHash
and (
not req.content.computeJobSerial or
self.db.computeJobSerial == req.content.computeJobSerial
)
)
async def _run(self, op):
op.task = asyncio.current_task()
op.pkdel('run_callback')
try:
with op.set_job_situation('Entered __create._run'):
while True:
try:
r = await op.reply_get()
#TODO(robnagler) is this ever true?
if op != self.run_op:
return
# run_dir is in a stable state so don't need to lock
op.run_dir_slot.free()
self.db.status = r.state
self.db.alert = r.get('alert')
if self.db.status == job.ERROR:
self.db.error = r.get('error', '<unknown error>')
if 'computeJobStart' in r:
self.db.computeJobStart = r.computeJobStart
if 'parallelStatus' in r:
self.db.parallelStatus.update(r.parallelStatus)
self.db.lastUpdateTime = r.parallelStatus.lastUpdateTime
else:
# sequential jobs don't send this
self.db.lastUpdateTime = sirepo.srtime.utc_now_as_int()
#TODO(robnagler) will need final frame count
self.__db_write()
if r.state in job.EXIT_STATUSES:
break
except sirepo.util.ASYNC_CANCELED_ERROR:
return
except Exception as e:
pkdlog('error={} stack={}', e, pkdexc())
if op == self.run_op:
self.__db_update(
status=job.ERROR,
error='server error',
)
finally:
op.destroy(cancel=False)
async def _send_with_single_reply(self, opName, req, **kwargs):
o = self._create_op(opName, req, **kwargs)
try:
for i in range(_MAX_RETRIES):
try:
await o.prepare_send()
o.send()
r = await o.reply_get()
# POSIT: any api_* that could run into runDirNotFound
# will call _send_with_single_reply() and this will
# properly format the reply
if r.get('runDirNotFound'):
return self._init_db_missing_response(req)
return r
except Awaited:
pass
else:
raise AssertionError('too many retries {}'.format(req))
finally:
o.destroy(cancel=False)
def _status_reply(self, req):
def res(**kwargs):
r = PKDict(**kwargs)
if self.db.canceledAfterSecs is not None:
r.canceledAfterSecs = self.db.canceledAfterSecs
if self.db.error:
r.error = self.db.error
if self.db.alert:
r.alert = self.db.alert
if self.db.isParallel:
r.update(self.db.parallelStatus)
r.computeJobHash = self.db.computeJobHash
r.computeJobSerial = self.db.computeJobSerial
r.elapsedTime = self.elapsed_time()
if self._is_running_pending():
c = req.content
r.update(
nextRequestSeconds=self.db.nextRequestSeconds,
nextRequest=PKDict(
computeJobHash=self.db.computeJobHash,
computeJobSerial=self.db.computeJobSerial,
computeJobStart=self.db.computeJobStart,
report=c.analysisModel,
simulationId=self.db.simulationId,
simulationType=self.db.simulationType,
),
)
return r
if self.db.computeJobHash != req.content.computeJobHash:
return PKDict(state=job.MISSING, reason='computeJobHash-mismatch')
if (
req.content.computeJobSerial and
self.db.computeJobSerial != req.content.computeJobSerial
):
return PKDict(state=job.MISSING, reason='computeJobSerial-mismatch')
if self.db.isParallel or self.db.status != job.COMPLETED:
return res(
state=self.db.status,
dbUpdateTime=self.db.dbUpdateTime,
)
return None
class _Op(PKDict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.update(
do_not_send=False,
internal_error=None,
opId=job.unique_key(),
run_dir_slot=self.computeJob.run_dir_slot_q.sr_slot_proxy(self),
_reply_q=sirepo.tornado.Queue(),
)
self.msg.update(opId=self.opId, opName=self.opName)
self.driver = job_driver.assign_instance_op(self)
self.cpu_slot = self.driver.cpu_slot_q.sr_slot_proxy(self)
q = self.driver.op_slot_q.get(self.opName)
self.op_slot = q and q.sr_slot_proxy(self)
self.max_run_secs = self._get_max_run_secs()
pkdlog('{} runDir={}', self, self.msg.get('runDir'))
def destroy(self, cancel=True, internal_error=None):
self.run_dir_slot.free()
if cancel:
if self.task:
self.task.cancel()
self.task = None
# Ops can be destroyed multiple times
# The first error is "closest to the source" so don't overwrite it
if not self.internal_error:
self.internal_error = internal_error
for x in 'run_callback', 'timer':
if x in self:
tornado.ioloop.IOLoop.current().remove_timeout(self.pkdel(x))
self.computeJob.destroy_op(self)
self.driver.destroy_op(self)
def make_lib_dir_symlink(self):
self.driver.make_lib_dir_symlink(self)
def pkdebug_str(self):
return pkdformat('_Op({}, {:.4})', self.opName, self.opId)
async def prepare_send(self):
"""Ensures resources are available for sending to agent
To maintain consistency, do not modify global state before
calling this method.
"""
await self.driver.prepare_send(self)
async def reply_get(self):
# If we get an exception (canceled), task is not done.
# Had to look at the implementation of Queue to see that
# task_done should only be called if get actually removes
# the item from the queue.
pkdlog('{} await _reply_q.get()', self)
r = await self._reply_q.get()
self._reply_q.task_done()
return r
def reply_put(self, reply):
self._reply_q.put_nowait(reply)
async def run_timeout(self):
"""Can be any op that's timed"""
pkdlog('{} max_run_secs={}', self, self.max_run_secs)
await self.computeJob._receive_api_runCancel(
ServerReq(content=self.msg),
timed_out_op=self,
)
def send(self):
if self.max_run_secs:
self.timer = tornado.ioloop.IOLoop.current().call_later(
self.max_run_secs,
self.run_timeout,
)
self.driver.send(self)
@contextlib.contextmanager
def set_job_situation(self, situation):
self.computeJob.set_situation(self, situation)
try:
yield
self.computeJob.set_situation(self, None)
except Exception as e:
pkdlog('{} situation={} stack={}', self, situation, pkdexc())
self.computeJob.set_situation(self, None, exception=e)
raise
def _get_max_run_secs(self):
if self.driver.op_is_untimed(self):
return 0
if self.opName == sirepo.job.OP_ANALYSIS:
return cfg.max_secs.analysis
if self.kind == job.PARALLEL and self.msg.get('isPremiumUser'):
return cfg.max_secs['parallel_premium']
return cfg.max_secs[self.kind]
def __hash__(self):
return hash((self.opId,))
| apache-2.0 | 7,034,954,215,981,804,000 | 34.491525 | 135 | 0.531041 | false | 4.001124 | false | false | false |
alrusdi/video-store | ticketing/models.py | 1 | 1935 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from videostore.models import Video
TICKET_STATUS_CHOICES = (
('pending', _('Pending view')),
('seen', _('Seen')),
('overdue', _('Overdue')),
('blocked', _('Blocked')),
)
class Server(models.Model):
title = models.CharField(
max_length=50,
verbose_name=_('Title')
)
ip_address = models.CharField(
max_length=50,
verbose_name=_('IP address')
)
is_enabled = models.BooleanField(
verbose_name=_('Enabled?'),
default=True,
)
def __unicode__(self):
return u'%s on %s' % (self.title, self.ip_address)
class Meta:
verbose_name = _(u'Server')
verbose_name_plural = _(u'Servers')
class Ticket(models.Model):
video = models.ForeignKey(
Video,
verbose_name=_('Video')
)
hash = models.CharField(
max_length=50,
editable = False,
db_index=True,
)
created_at = models.DateTimeField(
verbose_name=_('Created time'),
auto_now_add=True,
)
seen_at = models.DateTimeField(
verbose_name=_('Created time'),
null=True,
)
valid_to = models.DateTimeField(
verbose_name=_('Valid to'),
null=True,
)
status = models.CharField(
max_length=50,
choices=TICKET_STATUS_CHOICES,
verbose_name=_('Status'),
default='pending',
)
client_id = models.CharField(
max_length=50,
verbose_name=_('Client id'),
)
headers = models.TextField(
verbose_name=_('Dump of viewer HTTP headers'),
editable=False,
null=True,
)
def __unicode__(self):
return u'#%s %s %s' % (self.id, self.created_at, self.status)
class Meta:
verbose_name = _('Ticket')
verbose_name_plural = _('Tickets')
| mit | 5,529,225,616,823,311,000 | 20.988636 | 69 | 0.555039 | false | 3.824111 | false | false | false |
hzlf/openbroadcast | website/apps/alibrary/forms/mediaforms.py | 1 | 13193 | from django import forms
from django.conf import settings
from django.forms import ModelForm, Form
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from django.contrib.contenttypes.generic import BaseGenericInlineFormSet, generic_inlineformset_factory
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from crispy_forms.bootstrap import FormActions
from filer.models.imagemodels import Image
from django.contrib.admin import widgets as admin_widgets
import autocomplete_light
from alibrary.models import Media, Relation, Artist, MediaExtraartists
from pagedown.widgets import PagedownWidget
import selectable.forms as selectable
from alibrary.lookups import ReleaseNameLookup, ArtistLookup, LicenseLookup
import floppyforms as forms
from django_date_extensions.fields import ApproximateDateFormField
from ajax_select.fields import AutoCompleteSelectField
from ajax_select import make_ajax_field
from django.forms.widgets import FileInput, HiddenInput
#from floppyforms.widgets import DateInput
from tagging.forms import TagField
from ac_tagging.widgets import TagAutocompleteTagIt
from lib.widgets.widgets import ReadOnlyIconField
ACTION_LAYOUT = action_layout = FormActions(
HTML('<button type="submit" name="save" value="save" class="btn btn-primary pull-right ajax_submit" id="submit-id-save-i-classicon-arrow-upi"><i class="icon-save icon-white"></i> Save</button>'),
HTML('<button type="reset" name="reset" value="reset" class="reset resetButton btn btn-secondary pull-right" id="reset-id-reset"><i class="icon-trash"></i> Cancel</button>'),
)
ACTION_LAYOUT_EXTENDED = action_layout = FormActions(
Field('publish', css_class='input-hidden' ),
HTML('<button type="submit" name="save" value="save" class="btn btn-primary pull-right ajax_submit" id="submit-id-save-i-classicon-arrow-upi"><i class="icon-save icon-white"></i> Save</button>'),
HTML('<button type="submit" name="save-and-publish" value="save" class="btn pull-right ajax_submit save-and-publish" id="submit-id-save-i-classicon-arrow-upi"><i class="icon-bullhorn icon-white"></i> Save & Publish</button>'),
HTML('<button type="reset" name="reset" value="reset" class="reset resetButton btn btn-secondary pull-right" id="reset-id-reset"><i class="icon-trash"></i> Cancel</button>'),
)
class MediaActionForm(Form):
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance', False)
super(MediaActionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_tag = False
self.helper.add_layout(ACTION_LAYOUT)
"""
if self.instance and self.instance.publish_date:
self.helper.add_layout(ACTION_LAYOUT)
else:
self.helper.add_layout(ACTION_LAYOUT_EXTENDED)
"""
publish = forms.BooleanField(required=False)
class MediaForm(ModelForm):
class Meta:
model = Media
fields = ('name', 'description', 'artist', 'tracknumber', 'mediatype', 'license', 'release', 'd_tags', 'isrc', )
def __init__(self, *args, **kwargs):
self.user = kwargs['initial']['user']
self.instance = kwargs['instance']
print self.instance
print self.user.has_perm("alibrary.edit_release")
print self.user.has_perm("alibrary.admin_release", self.instance)
self.label = kwargs.pop('label', None)
super(MediaForm, self).__init__(*args, **kwargs)
"""
Prototype function, set some fields to readonly depending on permissions
"""
"""
if not self.user.has_perm("alibrary.admin_release", self.instance):
self.fields['catalognumber'].widget.attrs['readonly'] = 'readonly'
"""
self.helper = FormHelper()
self.helper.form_id = "id_feedback_form_%s" % 'asd'
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.form_tag = False
base_layout = Fieldset(
_('General'),
LookupField('name', css_class='input-xlarge'),
LookupField('release', css_class='input-xlarge'),
LookupField('artist', css_class='input-xlarge'),
LookupField('tracknumber', css_class='input-xlarge'),
LookupField('mediatype', css_class='input-xlarge'),
)
license_layout = Fieldset(
_('License/Source'),
Field('license', css_class='input-xlarge'),
)
catalog_layout = Fieldset(
_('Label/Catalog'),
LookupField('label', css_class='input-xlarge'),
LookupField('catalognumber', css_class='input-xlarge'),
LookupField('release_country', css_class='input-xlarge'),
# LookupField('releasedate', css_class='input-xlarge'),
LookupField('releasedate_approx', css_class='input-xlarge'),
)
meta_layout = Fieldset(
'Meta',
LookupField('description', css_class='input-xxlarge'),
)
tagging_layout = Fieldset(
'Tags',
LookupField('d_tags'),
)
identifiers_layout = Fieldset(
_('Identifiers'),
LookupField('isrc', css_class='input-xlarge'),
)
layout = Layout(
base_layout,
# artist_layout,
meta_layout,
license_layout,
tagging_layout,
identifiers_layout,
)
self.helper.add_layout(layout)
d_tags = TagField(widget=TagAutocompleteTagIt(max_tags=9), required=False, label=_('Tags'))
release = selectable.AutoCompleteSelectField(ReleaseNameLookup, allow_new=True, required=True)
"""
extra_artists = forms.ModelChoiceField(Artist.objects.all(),
widget=autocomplete_light.ChoiceWidget('ArtistAutocomplete'), required=False)
"""
artist = selectable.AutoCompleteSelectField(ArtistLookup, allow_new=True, required=False)
description = forms.CharField(widget=PagedownWidget(), required=False, help_text="Markdown enabled text")
#license = selectable.AutoCompleteSelectField(LicenseLookup, widget=selectable.AutoComboboxSelectWidget(lookup_class=LicenseLookup), allow_new=False, required=False, label=_('License'))
# aliases = selectable.AutoCompleteSelectMultipleField(ArtistLookup, required=False)
# aliases = make_ajax_field(Media,'aliases','aliases',help_text=None)
#members = selectable.AutoCompleteSelectMultipleField(ArtistLookup, required=False)
def clean(self, *args, **kwargs):
cd = super(MediaForm, self).clean()
print "*************************************"
print cd
print "*************************************"
"""
if 'main_image' in cd and cd['main_image'] != None:
try:
ui = cd['main_image']
dj_file = DjangoFile(open(ui.temporary_file_path()), name='cover.jpg')
cd['main_image'], created = Image.objects.get_or_create(
original_filename='cover_%s.jpg' % self.instance.pk,
file=dj_file,
folder=self.instance.folder,
is_public=True)
except Exception, e:
print e
pass
else:
cd['main_image'] = self.instance.main_image
"""
return cd
# TODO: take a look at save
def save(self, *args, **kwargs):
return super(MediaForm, self).save(*args, **kwargs)
"""
Album Artists
"""
class BaseExtraartistFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
self.instance = kwargs['instance']
super(BaseExtraartistFormSet, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = "id_artists_form_%s" % 'inline'
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.form_tag = False
base_layout = Row(
Column(
Field('artist', css_class='input-large'),
css_class='span5'
),
Column(
Field('profession', css_class='input-large'),
css_class='span5'
),
Column(
Field('DELETE', css_class='input-mini'),
css_class='span2'
),
css_class='albumartist-row row-fluid form-autogrow',
)
self.helper.add_layout(base_layout)
def add_fields(self, form, index):
# allow the super class to create the fields as usual
super(BaseExtraartistFormSet, self).add_fields(form, index)
# created the nested formset
try:
instance = self.get_queryset()[index]
pk_value = instance.pk
except IndexError:
instance=None
pk_value = hash(form.prefix)
class BaseExtraartistForm(ModelForm):
class Meta:
model = MediaExtraartists
parent_model = Media
fields = ('artist','profession',)
def __init__(self, *args, **kwargs):
super(BaseExtraartistForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
artist = selectable.AutoCompleteSelectField(ArtistLookup, allow_new=True, required=False)
class BaseMediaReleationFormSet(BaseGenericInlineFormSet):
def __init__(self, *args, **kwargs):
self.instance = kwargs['instance']
super(BaseMediaReleationFormSet, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = "id_releasemediainline_form_%s" % 'asdfds'
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.form_tag = False
base_layout = Row(
Column(
Field('url', css_class='input-xlarge'),
css_class='span6 relation-url'
),
Column(
Field('service', css_class='input-mini'),
css_class='span4'
),
Column(
Field('DELETE', css_class='input-mini'),
css_class='span2'
),
css_class='row-fluid relation-row form-autogrow',
)
self.helper.add_layout(base_layout)
class BaseMediaReleationForm(ModelForm):
class Meta:
model = Relation
parent_model = Media
formset = BaseMediaReleationFormSet
fields = ('url','service',)
def __init__(self, *args, **kwargs):
super(BaseMediaReleationForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
self.fields['service'].widget.instance = instance
if instance and instance.id:
self.fields['service'].widget.attrs['readonly'] = True
def clean_service(self):
return self.instance.service
service = forms.CharField(label='', widget=ReadOnlyIconField(), required=False)
url = forms.URLField(label=_('Website / URL'), required=False)
# Compose Formsets
MediaRelationFormSet = generic_inlineformset_factory(Relation,
form=BaseMediaReleationForm,
formset=BaseMediaReleationFormSet,
extra=10, exclude=('action',),
can_delete=True)
ExtraartistFormSet = inlineformset_factory(Media,
MediaExtraartists,
form=BaseExtraartistForm,
formset=BaseExtraartistFormSet,
fk_name = 'media',
extra=10,
#exclude=('position',),
can_delete=True,
can_order=False,)
| gpl-3.0 | 3,522,110,206,090,351,600 | 32.402532 | 254 | 0.555446 | false | 4.466148 | false | false | false |
newenclave/moscatell | agent/addsubsys.py | 1 | 2847 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import os
def header_file( ):
"""
#ifndef SUBSYS_%ss-name%_H
#define SUBSYS_%ss-name%_H
#include "application.h"
namespace msctl { namespace agent {
class %ss-name%: public common::subsys_iface {
struct impl;
friend struct impl;
impl *impl_;
public:
%ss-name%( application *app );
static std::shared_ptr<%ss-name%> create( application *app );
static const char *name( )
{
return "%ss-name%";
}
private:
void init( ) override;
void start( ) override;
void stop( ) override;
};
}}
#endif // SUBSYS_%ss-name%_H
"""
return header_file.__doc__
def source_file( ):
"""
#include "subsys-%ss-name%.h"
#define LOG(lev) log_(lev, "%ss-name%")
#define LOGINF LOG(logger_impl::level::info)
#define LOGDBG LOG(logger_impl::level::debug)
#define LOGERR LOG(logger_impl::level::error)
#define LOGWRN LOG(logger_impl::level::warning)
namespace msctl { namespace agent {
struct %ss-name%::impl {
application *app_;
%ss-name% *parent_;
logger_impl &log_;
impl( application *app )
:app_(app)
,log_(app_->log( ))
{ }
};
%ss-name%::%ss-name%( application *app )
:impl_(new impl(app))
{
impl_->parent_ = this;
}
void %ss-name%::init( )
{ }
void %ss-name%::start( )
{
impl_->LOGINF << "Started.";
}
void %ss-name%::stop( )
{
impl_->LOGINF << "Stopped.";
}
std::shared_ptr<%ss-name%> %ss-name%::create( application *app )
{
return std::make_shared<%ss-name%>( app );
}
}}
"""
return source_file.__doc__
def usage( ):
"""
usage: addsubsys.py <subsystem-name>
"""
print( usage.__doc__ )
def fix_iface_inc( ss_name ):
src_path = os.path.join( 'subsys.inc' )
s = open( src_path, 'r' );
content = s.readlines( )
s.close()
content.append( '#include "subsys-' + ss_name + '.h"\n')
s = open( src_path, 'w' );
s.writelines( content )
if __name__ == '__main__':
if len( argv ) < 2:
usage( )
exit( 1 )
ss_file = argv[1]
ss_name = ss_file # ss_file.replace( '-', '_' )
src_name = 'subsys-' + ss_file + '.cpp';
hdr_name = 'subsys-' + ss_file + '.h';
if os.path.exists( src_name ) or os.path.exists( hdr_name ):
print ( "File already exists" )
exit(1)
src_content = source_file( ).replace( '%ss-name%', ss_name )
hdr_content = header_file( ).replace( '%ss-name%', ss_name )
s = open( src_name, 'w' );
s.write( src_content )
h = open( hdr_name, 'w' );
h.write( hdr_content )
fix_iface_inc( ss_name )
| gpl-3.0 | 4,424,536,121,336,183,300 | 19.933824 | 69 | 0.517387 | false | 3.108079 | false | false | false |
jinyu121/HowOldAreYou | HowOldWebsite/trainers/util_trainer.py | 1 | 4606 | # -*- coding: UTF-8 -*-
import os
import threading
import django.conf
from HowOldWebsite.models import RecordFace
from HowOldWebsite.utils.image import do_imread
from HowOldWebsite.utils.image import do_rgb2gray
from HowOldWebsite.utils.language import reflect_get_class
__author__ = 'Hao Yu'
class UtilTrainer:
__busy = False
# __threads = []
@classmethod
def is_busy(cls):
return cls.__busy
@classmethod
def train(cls, request):
if cls.is_busy():
return False
cls.__busy = True
# Is it OK?
th = threading.Thread(target=UtilTrainer.__train_main, args=(request,))
th.start()
return True
@classmethod
def __train_main(cls, model_names):
model_names = [m.lower() for m in model_names]
print("=" * 10 + " Train Start " + "=" * 10)
try:
faces = RecordFace.objects.filter(used_flag=1)
if not django.conf.settings.DEBUG:
if len(faces) < 100:
print("Error: The training set is too small.")
print("\t Skip the training!")
raise Exception()
image_jar = dict()
feature_jar = dict()
target_jar = dict()
estimator_jar = dict()
threads = list()
# Get estimator class
for m in model_names:
class_estimator = 'HowOldWebsite.estimators.estimator_{}.Estimator{}'.format(
m, m.capitalize()
)
estimator_jar[m] = reflect_get_class(class_estimator)
for face in faces:
face_id = face.id
# Get image
face_filename_color = os.path.join(
django.conf.settings.SAVE_DIR['FACE'],
str(face_id) + '.jpg'
)
# face_filename_gray = os.path.join(SAVE_DIR['FACE_GRAY'], str(face_id) + '.jpg')
cv_face_image = do_imread(face_filename_color)
cv_face_gray = do_rgb2gray(cv_face_image)
if 'rgb' not in image_jar.keys():
image_jar['rgb'] = list()
image_jar['rgb'].append(cv_face_image)
if 'gray' not in image_jar.keys():
image_jar['gray'] = list()
image_jar['gray'].append(cv_face_gray)
# Get target
if 'sex' not in target_jar.keys():
target_jar['sex'] = list()
target_jar['sex'].append((face.recordsex_set.first()).value_user)
if 'age' not in target_jar.keys():
target_jar['age'] = list()
target_jar['age'].append((face.recordage_set.first()).value_user)
if 'smile' not in target_jar.keys():
target_jar['smile'] = list()
target_jar['smile'].append((face.recordsmile_set.first()).value_user)
# Extract features
for m in model_names:
feature_jar = estimator_jar[m].feature_extract(feature_jar, image_jar)
# Train
for m in model_names:
th = threading.Thread(target=cls.__do_thread_train,
args=(m,
estimator_jar[m],
feature_jar,
target_jar[m])
)
threads.append(th)
th.start()
for item in threads:
item.join()
# Change the used flag
if not django.conf.settings.DEBUG:
faces.update(used_flag=2)
except Exception as e:
# print(e)
print("Error occurred while training")
pass
print("=" * 10 + " Train Finish " + "=" * 10)
# Set the busy flag
UtilTrainer.__busy = False
@classmethod
def __do_thread_train(cls, model_name, estimator, feature_jar, target):
print("{} Start".format(model_name.capitalize()))
try:
class_worker = 'HowOldWebsite.trainers.trainer_{}.Trainer{}'.format(
model_name, model_name.capitalize()
)
obj_worker = reflect_get_class(class_worker)
worker = obj_worker(estimator)
worker.train(feature_jar, target)
except Exception as e:
print(e)
pass
print("{} OK".format(model_name.capitalize()))
| gpl-3.0 | 5,053,455,212,866,597,000 | 31.20979 | 97 | 0.490664 | false | 4.312734 | false | false | false |
philipn/sycamore | Sycamore/buildDB.py | 2 | 50124 | # -*- coding: utf-8 -*-
"""
Build a wiki database from scratch. You should run this the FIRST TIME you install your wiki.
"""
# Imports
import sys
import os
import shutil
import time
from copy import copy
import __init__ # woo hackmagic
__directory__ = os.path.dirname(__file__)
share_directory = os.path.abspath(os.path.join(__directory__, '..', 'share'))
sys.path.extend([share_directory])
from Sycamore import wikidb
from Sycamore import config
from Sycamore import wikiutil
from Sycamore import maintenance
from Sycamore import wikiacl
from Sycamore.wikiutil import quoteFilename, unquoteFilename
from Sycamore.action import Files
class FlatPage(object):
"""
A basic flat page object containing text and possibly files to be imported.
"""
def __init__(self, text=""):
self.text = text
self.files = []
self.acl = None
def add_file(self, filename, filecontent):
self.files.append((filename, filecontent))
def parseACL(text):
groupdict = None
lines = text.split('\n')
if lines:
groupdict = {}
for line in lines:
line = line.strip()
if not line:
continue
groupname = line[:line.find(':')]
rights = line[line.find(':')+1:].split(',')
for right in rights:
if right == 'none':
groupdict[groupname] = [False, False, False, False]
break
if not groupdict.has_key(groupname):
groupdict[groupname] = [False, False, False, False]
groupdict[groupname][wikiacl.ACL_RIGHTS_TABLE[right]] = True
return groupdict
def init_basic_pages(prefix='common'):
"""
Initializes basic pages from share/initial_pages directory.
"""
pages = {}
# We do the basic database population here
page_list = map(unquoteFilename,
filter(lambda x: not x.startswith('.'),
os.listdir(os.path.join(share_directory,
'initial_pages',
prefix))))
for pagename in page_list:
page_loc = os.path.join(share_directory, 'initial_pages', prefix,
quoteFilename(pagename))
page_text_file = open(os.path.join(page_loc, "text"))
page_text = ''.join(page_text_file.readlines())
page_text_file.close()
pages[pagename] = FlatPage(text=page_text)
if os.path.exists(os.path.join(page_loc, "files")):
file_list = map(unquoteFilename,
filter(lambda x: not x.startswith('.'),
os.listdir(os.path.join(page_loc,
"files"))))
for filename in file_list:
file = open(os.path.join(page_loc, "files",
quoteFilename(filename)))
file_content = ''.join(file.readlines())
file.close()
pages[pagename].files.append((filename, file_content))
if os.path.exists(os.path.join(page_loc, "acl")):
file = open(os.path.join(page_loc, "acl"), "r")
text = ''.join(file.readlines())
file.close()
pages[pagename].acl = parseACL(text)
return pages
def init_db(cursor):
if config.db_type == 'postgres':
cursor.execute("""CREATE FUNCTION UNIX_TIMESTAMP(timestamp)
RETURNS integer AS 'SELECT date_part(''epoch'', $1)::int4
AS result' language 'sql'""",
isWrite=True)
def create_tables(cursor):
print "creating tables.."
if config.db_type == 'mysql':
cursor.execute("""create table curPages
(
name varchar(100) not null,
text mediumtext,
cachedText mediumblob,
editTime double,
cachedTime double,
userEdited char(20),
propercased_name varchar(100) not null,
wiki_id int,
primary key (name, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table curPages
(
name varchar(100) not null,
text text,
cachedText bytea,
editTime double precision,
cachedTime double precision,
userEdited char(20),
propercased_name varchar(100) not null,
wiki_id int,
primary key (name, wiki_id)
)""", isWrite=True)
cursor.execute("CREATE INDEX curPages_userEdited on curPages (userEdited)")
if config.db_type == 'mysql':
cursor.execute("""create table allPages
(
name varchar(100) not null,
text mediumtext,
editTime double,
userEdited char(20),
editType CHAR(30) CHECK (editType in
('SAVE','SAVENEW','ATTNEW','ATTDEL','RENAME','NEWEVENT',
'COMMENT_MACRO','SAVE/REVERT','DELETE', 'SAVEMAP')),
comment varchar(194),
userIP char(16),
propercased_name varchar(100) not null,
wiki_id int,
primary key(name, editTime, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table allPages
(
name varchar(100) not null,
text text,
editTime double precision,
userEdited char(20),
editType CHAR(30) CHECK (editType in
('SAVE','SAVENEW','ATTNEW','ATTDEL','RENAME','NEWEVENT',
'COMMENT_MACRO','SAVE/REVERT','DELETE', 'SAVEMAP')),
comment varchar(194),
userIP inet,
propercased_name varchar(100) not null,
wiki_id int,
primary key (name, editTime, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX allPages_userEdited on
allPages (userEdited);""", isWrite=True)
cursor.execute("""CREATE INDEX allPages_userIP on
allPages (userIP);""", isWrite=True)
cursor.execute("""CREATE INDEX editTime_wiki_id on
allPages (editTime, wiki_id);""", isWrite=True) #for local-wiki changes
cursor.execute("""CREATE INDEX editTime on
allPages (editTime);""", isWrite=True) # global changes
if config.db_type == 'mysql':
cursor.execute("""create table users
(
id char(20) primary key not null,
name varchar(100) unique not null,
email varchar(255),
enc_password varchar(255),
language varchar(80),
remember_me tinyint,
css_url varchar(255),
disabled tinyint,
edit_cols smallint,
edit_rows smallint,
edit_on_doubleclick tinyint,
theme_name varchar(40),
last_saved double,
join_date double,
created_count int default 0,
edit_count int default 0,
file_count int default 0,
last_page_edited varchar(255),
last_edit_date double,
rc_bookmark double,
rc_showcomments tinyint default 1,
tz varchar(50),
propercased_name varchar(100) not null,
last_wiki_edited int,
wiki_for_userpage varchar(100),
rc_group_by_wiki BOOLEAN default false
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table users
(
id char(20) primary key not null,
name varchar(100) unique not null,
email varchar(255),
enc_password varchar(255),
language varchar(80),
remember_me smallint,
css_url varchar(255),
disabled smallint,
edit_cols smallint,
edit_rows smallint,
edit_on_doubleclick smallint,
theme_name varchar(40),
last_saved double precision,
join_date double precision,
created_count int default 0,
edit_count int default 0,
file_count int default 0,
last_page_edited varchar(255),
last_edit_date double precision,
rc_bookmark double precision,
rc_showcomments smallint default 1,
tz varchar(50),
propercased_name varchar(100) not null,
last_wiki_edited int,
wiki_for_userpage varchar(100),
rc_group_by_wiki boolean default false,
CHECK (disabled IN ('0', '1')),
CHECK (remember_me IN ('0', '1')),
CHECK (rc_showcomments IN ('0', '1'))
);""", isWrite=True)
cursor.execute("CREATE INDEX users_name on users (name);", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userFavorites
(
username varchar(100) not null,
page varchar(100) not null,
viewTime double,
wiki_name varchar(100) not null,
primary key (username, page, wiki_name)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userFavorites
(
username varchar(100) not null,
page varchar(100) not null,
viewTime double precision,
wiki_name varchar(100) not null,
primary key (username, page, wiki_name)
);""", isWrite=True)
cursor.execute("""CREATE INDEX userfavorites_username on
userFavorites(username);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userWatchedWikis
(
username varchar(100) not null,
wiki_name varchar(100) not null,
primary key (username, wiki_name)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userWatchedWikis
(
username varchar(100) not null,
wiki_name varchar(100) not null,
primary key (username, wiki_name)
);""", isWrite=True)
cursor.execute("""CREATE INDEX userWatchedWikis_username on
userWatchedWikis (username);""", isWrite=True)
cursor.execute("""CREATE INDEX userWatchedWikis_wiki_name on
userWatchedWikis (wiki_name);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userPageOnWikis
(
username varchar(100) not null,
wiki_name varchar(100) not null,
primary key (username, wiki_name)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userPageOnWikis
(
username varchar(100) not null,
wiki_name varchar(100) not null,
primary key (username, wiki_name)
);""", isWrite=True)
cursor.execute("""CREATE INDEX userPageOnWikis_username on
userPageOnWikis (username);""", isWrite=True)
if config.db_type == 'mysql':
#This is throw-away data. User sessions aren't that important
# so we'll use a MyISAM table for speed
cursor.execute("""create table userSessions
(
user_id char(20) not null,
session_id char(28) not null,
secret char(28) not null,
expire_time double,
primary key (user_id, session_id)
) ENGINE=MyISAM CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userSessions
(
user_id char(20) not null,
session_id char(28) not null,
secret char(28) not null,
expire_time double precision,
primary key (user_id, session_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX userSessions_expire_time on
userSessions (expire_time);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table links
(
source_pagename varchar(100) not null,
destination_pagename varchar(100) not null,
destination_pagename_propercased varchar(100) not null,
wiki_id int,
primary key (source_pagename, destination_pagename, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table links
(
source_pagename varchar(100) not null,
destination_pagename varchar(100) not null,
destination_pagename_propercased varchar(100) not null,
wiki_id int,
primary key (source_pagename, destination_pagename, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX links_source_pagename_wiki_id on
links (source_pagename, wiki_id);""", isWrite=True)
cursor.execute("""CREATE INDEX links_destination_pagename_wiki_id on
links (destination_pagename, wiki_id);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table events
(
uid int not null AUTO_INCREMENT primary key,
event_time double not null,
posted_by varchar(100),
text mediumtext not null,
location mediumtext not null,
event_name mediumtext not null,
posted_by_ip char(16),
posted_time double,
wiki_id int
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
cursor.execute("ALTER TABLE events AUTO_INCREMENT = 1;", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""CREATE sequence events_seq
start 1 increment 1;""", isWrite=True)
cursor.execute("""create table events
(
uid int primary key not null,
event_time double precision not null,
posted_by varchar(100),
text text not null,
location text not null,
event_name text not null,
posted_by_ip inet,
posted_time double precision,
wiki_id int
);""", isWrite=True)
cursor.execute("""CREATE INDEX events_event_time on
events (event_time, wiki_id);""", isWrite=True)
cursor.execute("""CREATE INDEX events_posted_by on
events (posted_by);""", isWrite=True)
cursor.execute("""CREATE INDEX events_posted_by_ip on
events (posted_by_ip);""", isWrite=True)
cursor.execute("""CREATE INDEX events_posted_time on
events (posted_time);""", isWrite=True) # global events
cursor.execute("""CREATE INDEX events_posted_time_wiki_id on
events (posted_time, wiki_id);""", isWrite=True) # global events
if config.db_type == 'mysql':
cursor.execute("""create table files
(
name varchar(100) not null,
file mediumblob not null,
uploaded_time double not null,
uploaded_by char(20),
attached_to_pagename varchar(255) not null,
uploaded_by_ip char(16),
attached_to_pagename_propercased varchar(255) not null,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table files
(
name varchar(100) not null,
file bytea not null,
uploaded_time double precision not null,
uploaded_by char(20),
attached_to_pagename varchar(255) not null,
uploaded_by_ip inet,
attached_to_pagename_propercased varchar(255) not null,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX files_uploaded_by on
files (uploaded_by);""", isWrite=True)
cursor.execute("""CREATE INDEX files_uploaded_time on
files (uploaded_time);""", isWrite=True) # global rc
cursor.execute("""CREATE INDEX files_uploaded_time_wiki_id on
files (uploaded_time, wiki_id);""", isWrite=True) # local rc
if config.db_type == 'mysql':
cursor.execute("""create table oldFiles
(
name varchar(100) not null,
file mediumblob not null,
uploaded_time double not null,
uploaded_by char(20),
attached_to_pagename varchar(255) not null,
deleted_time double,
deleted_by char(20),
uploaded_by_ip char(16),
deleted_by_ip char(16),
attached_to_pagename_propercased varchar(255) not null,
wiki_id int,
primary key (name, attached_to_pagename, uploaded_time, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table oldFiles
(
name varchar(100) not null,
file bytea not null,
uploaded_time double precision not null,
uploaded_by char(20),
attached_to_pagename varchar(255) not null,
deleted_time double precision,
deleted_by char(20),
uploaded_by_ip inet,
deleted_by_ip inet,
attached_to_pagename_propercased varchar(255) not null,
wiki_id int,
primary key (name, attached_to_pagename, uploaded_time, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX oldFiles_deleted_time on
oldFiles (deleted_time);""", isWrite=True) # global rc
cursor.execute("""CREATE INDEX oldFiles_deleted_time_wiki_id on
oldFiles (deleted_time, wiki_id);""", isWrite=True) # local rc
if config.db_type == 'mysql':
#throw-away and easily regenerated data
cursor.execute("""create table thumbnails
(
xsize smallint,
ysize smallint,
name varchar(100) not null,
attached_to_pagename varchar(100) not null,
image mediumblob not null,
last_modified double,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
) ENGINE=MyISAM CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table thumbnails
(
xsize smallint,
ysize smallint,
name varchar(100) not null,
attached_to_pagename varchar(100) not null,
image bytea not null,
last_modified double precision,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table imageInfo
(
name varchar(100) not null,
attached_to_pagename varchar(255) not null,
xsize smallint,
ysize smallint,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table imageInfo
(
name varchar(100) not null,
attached_to_pagename varchar(255) not null,
xsize smallint,
ysize smallint,
wiki_id int,
primary key (name, attached_to_pagename, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table oldImageInfo
(
name varchar(100) not null,
attached_to_pagename varchar(255) not null,
xsize smallint,
ysize smallint,
uploaded_time double not null,
wiki_id int,
primary key (name, attached_to_pagename, uploaded_time, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table oldImageInfo
(
name varchar(100) not null,
attached_to_pagename varchar(255) not null,
xsize smallint,
ysize smallint,
uploaded_time double precision not null,
wiki_id int,
primary key (name, attached_to_pagename, uploaded_time, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table imageCaptions
(
image_name varchar(100) not null,
attached_to_pagename varchar(100) not null,
linked_from_pagename varchar(100),
caption text not null,
wiki_id int,
primary key
(image_name, attached_to_pagename, linked_from_pagename, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table imageCaptions
(
image_name varchar(100) not null,
attached_to_pagename varchar(100) not null,
linked_from_pagename varchar(100),
caption text not null,
wiki_id int,
primary key
(image_name, attached_to_pagename, linked_from_pagename, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table mapCategoryDefinitions
(
id int not null,
img varchar(100),
name varchar(100) not null,
wiki_id int,
primary key (id, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table mapCategoryDefinitions
(
id int not null,
img varchar(100),
name varchar(100) not null,
wiki_id int,
primary key (id, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table mapPoints
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
created_time double,
created_by char(20),
created_by_ip char(16),
pagename_propercased varchar(100) not null,
address varchar(255),
wiki_id int,
primary key (pagename, x, y, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table mapPoints
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
created_time double precision,
created_by char(20),
created_by_ip inet,
pagename_propercased varchar(100) not null,
address varchar(255),
wiki_id int,
primary key (pagename, x, y, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX mapPoints_pagename_wiki_id on
mapPoints (pagename, wiki_id);""", isWrite=True)
cursor.execute("""CREATE INDEX mapPoints_x on
mapPoints (x);""", isWrite=True)
cursor.execute("""CREATE INDEX mapPoints_y on
mapPoints (y);""", isWrite=True)
cursor.execute("""CREATE INDEX mapPoints_wiki on
mapPoints (wiki_id);""", isWrite=True)
cursor.execute("""CREATE INDEX mapPoints_created_time on
mapPoints (created_time);""", isWrite=True) # global rc
cursor.execute("""CREATE INDEX mapPoints_created_time_wiki_id on
mapPoints (created_time, wiki_id);""", isWrite=True) # local rc
cursor.execute("""CREATE INDEX mapPoints_address on
mapPoints (address);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table oldMapPoints
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
created_time double,
created_by char(20),
created_by_ip char(16),
deleted_time double,
deleted_by char(20),
deleted_by_ip char(16),
pagename_propercased varchar(100) not null,
address varchar(255),
wiki_id int,
primary key (pagename, x, y, deleted_time, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table oldMapPoints
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
created_time double precision,
created_by char(20),
created_by_ip inet,
deleted_time double precision,
deleted_by char(20),
deleted_by_ip inet,
pagename_propercased varchar(100) not null,
address varchar(255),
wiki_id int,
primary key (pagename, x, y, deleted_time, wiki_id)
);""", isWrite=True)
cursor.execute("""CREATE INDEX oldMapPoints_deleted_time on
oldMapPoints (deleted_time);""", isWrite=True) # global rc
cursor.execute("""CREATE INDEX oldMapPoints_deleted_time_wiki_id on
oldMapPoints (deleted_time, wiki_id);""", isWrite=True) # local rc
cursor.execute("""CREATE INDEX oldMapPoints_created_time on
oldMapPoints (created_time);""", isWrite=True) # global rc
cursor.execute("""CREATE INDEX oldMapPoints_created_time_wiki_id on
oldMapPoints (created_time, wiki_id);""", isWrite=True) # local rc
if config.db_type == 'mysql':
cursor.execute("""create table mapPointCategories
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
id int not null,
wiki_id int,
primary key (pagename, x, y, id, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table mapPointCategories
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
id int not null,
wiki_id int,
primary key (pagename, x, y, id, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table oldMapPointCategories
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
id int not null,
deleted_time double,
wiki_id int,
primary key (pagename, x, y, id, deleted_time, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table oldMapPointCategories
(
pagename varchar(100) not null,
x varchar(100) not null,
y varchar(100) not null,
id int not null,
deleted_time double precision,
wiki_id int,
primary key (pagename, x, y, id, deleted_time, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table pageDependencies
(
page_that_depends varchar(100) not null,
source_page varchar(100) not null,
wiki_id int,
primary key (page_that_depends, source_page, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table pageDependencies
(
page_that_depends varchar(100) not null,
source_page varchar(100) not null,
wiki_id int,
primary key (page_that_depends, source_page, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table metadata
(
pagename varchar(100),
type varchar(100),
name varchar(100),
value varchar(100),
wiki_id int,
primary key (pagename, type, name, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table metadata
(
pagename varchar(100),
type varchar(100),
name varchar(100),
value varchar(100),
wiki_id int,
primary key (pagename, type, name, wiki_id)
);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table wikis
(
id int not null AUTO_INCREMENT,
name varchar(100) unique not null,
domain varchar(64),
is_disabled BOOLEAN,
sitename varchar(100),
other_settings mediumblob,
primary key (id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
cursor.execute("ALTER TABLE wikis AUTO_INCREMENT = 1;", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table wikis
(
id int not null,
name varchar(100) unique not null,
domain varchar(64),
is_disabled boolean,
sitename varchar(100),
other_settings bytea,
primary key (id)
)""", isWrite=True)
cursor.execute("CREATE sequence wikis_seq start 1 increment 1;",
isWrite=True)
cursor.execute("CREATE INDEX wikis_name on wikis (name);", isWrite=True)
cursor.execute("CREATE INDEX wikis_domain on wikis (domain);",
isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userWikiInfo
(
user_name varchar(100) not null,
wiki_id int,
first_edit_date double,
created_count int default 0,
edit_count int default 0,
file_count int default 0,
last_page_edited varchar(100),
last_edit_date double,
rc_bookmark double,
primary key (user_name, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userWikiInfo
(
user_name varchar(100) not null,
wiki_id int,
first_edit_date double precision,
created_count int default 0,
edit_count int default 0,
file_count int default 0,
last_page_edited varchar(100),
last_edit_date double precision,
rc_bookmark double precision,
primary key (user_name, wiki_id)
)""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table pageAcls
(
pagename varchar(100) not null,
groupname varchar(100) not null,
wiki_id int,
may_read BOOLEAN,
may_edit BOOLEAN,
may_delete BOOLEAN,
may_admin BOOLEAN,
primary key (pagename, groupname, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table pageAcls
(
pagename varchar(100) not null,
groupname varchar(100) not null,
wiki_id int,
may_read boolean,
may_edit boolean,
may_delete boolean,
may_admin boolean,
primary key (pagename, groupname, wiki_id)
)""", isWrite=True)
cursor.execute("""CREATE INDEX pageAcls_pagename_wiki on
pageAcls (pagename, wiki_id);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userGroups
(
username varchar(100) not null,
groupname varchar(100) not null,
wiki_id int,
primary key (username, groupname, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userGroups
(
username varchar(100) not null,
groupname varchar(100) not null,
wiki_id int,
primary key (username, groupname, wiki_id)
)""", isWrite=True)
cursor.execute("""CREATE INDEX user_groups_group_wiki on
userGroups (groupname, wiki_id);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table userGroupsIPs
(
ip char(16) not null,
groupname varchar(100) not null,
wiki_id int,
primary key (ip, groupname, wiki_id)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table userGroupsIPs
(
ip inet not null,
groupname varchar(100) not null,
wiki_id int,
primary key (ip, groupname, wiki_id)
)""", isWrite=True)
cursor.execute("""CREATE INDEX user_groups_ip_ips on
userGroupsIPs (groupname, wiki_id);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table lostPasswords
(
uid char(20) not null,
code varchar(255),
written_time double,
primary key (uid, code, written_time)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table lostPasswords
(
uid char(20) not null,
code varchar(255),
written_time double precision,
primary key (uid, code, written_time)
)""", isWrite=True)
cursor.execute("""CREATE INDEX lostpasswords_uid on
lostPasswords (uid);""", isWrite=True)
cursor.execute("""CREATE INDEX lostpasswords_written_time on
lostPasswords (written_time);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table wikisPending
(
wiki_name varchar(100) not null,
code varchar(255) not null,
written_time double not null,
primary key (wiki_name, code, written_time)
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table wikisPending
(
wiki_name varchar(100) not null,
code varchar(255) not null,
written_time double precision not null,
primary key (wiki_name, code, written_time)
)""", isWrite=True)
cursor.execute("""CREATE INDEX wikispending_written_time on
wikisPending (written_time);""", isWrite=True)
if config.db_type == 'mysql':
cursor.execute("""create table captchas
(
id char(33) primary key,
secret varchar(100) not null,
human_readable_secret mediumblob,
written_time double
) ENGINE=InnoDB CHARACTER SET utf8;""", isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""create table captchas
(
id char(33) primary key,
secret varchar(100) not null,
human_readable_secret bytea,
written_time double precision
)""", isWrite=True)
cursor.execute("""CREATE INDEX captchas_written_time on
captchas (written_time);""", isWrite=True)
print "tables created"
def create_views(cursor):
print "creating views..."
if config.db_type == 'mysql':
cursor.execute("""CREATE VIEW eventChanges as SELECT
'Events Board'as name, events.posted_time as changeTime,
users.id as id, 'NEWEVENT' as editType,
events.event_name as comment, events.posted_by_IP as userIP,
'Events Board' as propercased_name, events.wiki_id as wiki_id
from events, users
where users.propercased_name=events.posted_by;""", isWrite=True)
cursor.execute("""CREATE VIEW deletedFileChanges as
SELECT oldFiles.attached_to_pagename as name,
oldFiles.deleted_time as changeTime,
oldFiles.deleted_by as id, 'ATTDEL' as editType,
name as comment, oldFiles.deleted_by_ip as userIP,
oldFiles.attached_to_pagename_propercased as
propercased_name,
oldFiles.wiki_id as wiki_id from oldFiles;""", isWrite=True)
cursor.execute("""CREATE VIEW oldFileChanges as
SELECT oldFiles.attached_to_pagename as name,
oldFiles.uploaded_time as changeTime,
oldFiles.uploaded_by as id, 'ATTNEW' as editType,
name as comment, oldFiles.uploaded_by_ip as userIP,
oldFiles.attached_to_pagename_propercased as
propercased_name,
oldFiles.wiki_id as wiki_id from oldFiles;""", isWrite=True)
cursor.execute("""CREATE VIEW currentFileChanges as
SELECT files.attached_to_pagename as name,
files.uploaded_time as changeTime, files.uploaded_by as id,
'ATTNEW' as editType, name as comment,
files.uploaded_by_ip as userIP,
files.attached_to_pagename_propercased as propercased_name,
files.wiki_id as wiki_id from files;""", isWrite=True)
cursor.execute("""CREATE VIEW pageChanges as
SELECT name, editTime as changeTime, userEdited as id, editType,
comment, userIP, propercased_name, wiki_id
from allPages;""", isWrite=True)
cursor.execute("""CREATE VIEW currentMapChanges as
SELECT mapPoints.pagename as name,
mapPoints.created_time as changeTime,
mapPoints.created_by as id, 'SAVEMAP' as editType,
NULL as comment, mapPoints.created_by_ip as userIP,
mapPoints.pagename_propercased as propercased_name,
mapPoints.wiki_id as wiki_id from mapPoints;""",
isWrite=True)
cursor.execute("""CREATE VIEW oldMapChanges as
SELECT oldMapPoints.pagename as name,
oldMapPoints.created_time as changeTime,
oldMapPoints.created_by as id, 'SAVEMAP' as editType,
NULL as comment, oldMapPoints.created_by_ip as userIP,
oldMapPoints.pagename_propercased as propercased_name,
oldMapPoints.wiki_id as wiki_id from oldMapPoints;""",
isWrite=True)
cursor.execute("""CREATE VIEW deletedMapChanges as
SELECT oldMapPoints.pagename as name,
oldMapPoints.deleted_time as changeTime,
oldMapPoints.deleted_by as id, 'SAVEMAP' as editType,
NULL as comment, oldMapPoints.deleted_by_ip as userIP,
oldMapPoints.pagename_propercased as propercased_name,
oldMapPoints.wiki_id as wiki_id from oldMapPoints;""",
isWrite=True)
elif config.db_type == 'postgres':
cursor.execute("""CREATE VIEW eventChanges as
SELECT char 'Events Board' as name,
events.posted_time as changeTime, users.id as id,
char 'NEWEVENT' as editType, events.event_name as comment,
events.posted_by_IP as userIP, events.wiki_id as wiki_id
from events, users
where users.propercased_name=events.posted_by;""",
isWrite=True)
cursor.execute("""CREATE VIEW deletedFileChanges as
SELECT oldFiles.attached_to_pagename as name,
oldFiles.deleted_time as changeTime,
oldFiles.deleted_by as id, char 'ATTDEL' as editType,
name as comment, oldFiles.deleted_by_ip as userIP,
oldFiles.attached_to_pagename_propercased
as propercased_name,
oldFiles.wiki_id as wiki_id from oldFiles;""",
isWrite=True)
cursor.execute("""CREATE VIEW oldFileChanges as
SELECT oldFiles.attached_to_pagename as name,
oldFiles.uploaded_time as changeTime,
oldFiles.uploaded_by as id, char 'ATTNEW' as editType,
name as comment, oldFiles.uploaded_by_ip as userIP,
oldFiles.attached_to_pagename_propercased
as propercased_name,
oldFiles.wiki_id as wiki_id from oldFiles;""",
isWrite=True)
cursor.execute("""CREATE VIEW currentFileChanges as
SELECT files.attached_to_pagename as name,
files.uploaded_time as changeTime,
files.uploaded_by as id, char 'ATTNEW' as editType,
name as comment, files.uploaded_by_ip as userIP,
files.attached_to_pagename_propercased as propercased_name,
files.wiki_id as wiki_id from files;""", isWrite=True)
cursor.execute("""CREATE VIEW pageChanges as
SELECT name, editTime as changeTime, userEdited as id, editType,
comment, userIP, propercased_name, wiki_id
from allPages;""", isWrite=True)
cursor.execute("""CREATE VIEW currentMapChanges as
SELECT mapPoints.pagename as name,
mapPoints.created_time as changeTime,
mapPoints.created_by as id, char 'SAVEMAP' as editType,
char ''as comment, mapPoints.created_by_ip as userIP,
mapPoints.pagename_propercased as propercased_name,
mapPoints.wiki_id as wiki_id from mapPoints;""",
isWrite=True)
cursor.execute("""CREATE VIEW oldMapChanges as
SELECT oldMapPoints.pagename as name,
oldMapPoints.created_time as changeTime,
oldMapPoints.created_by as id, char 'SAVEMAP' as editType,
char '' as comment, oldMapPoints.created_by_ip as userIP,
oldMapPoints.pagename_propercased as propercased_name,
oldMapPoints.wiki_id as wiki_id from oldMapPoints;""",
isWrite=True)
cursor.execute("""CREATE VIEW deletedMapChanges as
SELECT oldMapPoints.pagename as name,
oldMapPoints.deleted_time as changeTime,
oldMapPoints.deleted_by as id, char 'SAVEMAP' as editType,
char '' as comment, oldMapPoints.deleted_by_ip as userIP,
oldMapPoints.pagename_propercased as propercased_name,
oldMapPoints.wiki_id as wiki_id from oldMapPoints;""",
isWrite=True)
print "views created"
def create_config(request, wiki_id=None):
config_dict = config.reduce_to_local_config(config.CONFIG_VARS)
# want to make sure we increment the wiki_id properly
del config_dict['wiki_id']
if wiki_id is not None:
config_dict['wiki_id'] = wiki_id
config_dict['active'] = True
site_conf = config.Config(config.wiki_name, request, process_config=False)
request.config = site_conf
request.config.active = True
request.config.set_config(request.config.wiki_name, config_dict, request)
request.setup_basics()
def create_other_stuff(request):
print "creating other stuff..."
d = {'wiki_id' : request.config.wiki_id}
cursor = request.cursor
cursor.execute("""INSERT into mapCategoryDefinitions values
(1, 'food.png', 'Restaurants', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(2, 'dollar.png', 'Shopping', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(3, 'hand.png', 'Services', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(4, 'run.png', 'Parks & Recreation', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(5, 'people.png', 'Community', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(6, 'arts.png', 'Arts & Entertainment', %(wiki_id)s);""",
d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(7, 'edu.png', 'Education', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(9, 'head.png', 'People', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(10, 'gov.png', 'Government', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(11, 'bike.png', 'Bars & Night Life', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(12, 'coffee.png', 'Cafes', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(13, 'house.png', 'Housing', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(14, 'wifi.png', 'WiFi Hot Spots', %(wiki_id)s);""", d, isWrite=True)
cursor.execute("""INSERT into mapCategoryDefinitions values
(99, NULL, 'Other', %(wiki_id)s);""", d, isWrite=True)
print "other stuff created"
def insert_acl(plist, flat_page_dict, request):
for pagename in plist:
if flat_page_dict[pagename].acl:
wikiacl.setACL(pagename, flat_page_dict[pagename].acl, request)
def insert_pages(request, flat_page_dict=None, plist=None,
without_files=False, global_pages=True):
timenow = time.time()
cursor = request.cursor
if not flat_page_dict:
if global_pages:
flat_page_dict = all_pages
else:
flat_page_dict = basic_pages
if not plist:
plist = flat_page_dict.keys()
file_dict = { 'uploaded_time': 0, 'uploaded_by': None,
'uploaded_by_ip': None }
for pagename in plist:
request.req_cache['pagenames'][
pagename.lower()] = pagename # for caching
flatpage = flat_page_dict[pagename]
cursor.execute("""INSERT into curPages (name, text, cachedText,
editTime, cachedTime,
userEdited, propercased_name,
wiki_id)
values (%(pagename)s, %(pagetext)s, NULL,
%(timenow)s, NULL, NULL,
%(propercased_name)s, %(wiki_id)s);""",
{'pagename':pagename.lower(), 'pagetext':flatpage.text,
'propercased_name':pagename,
'wiki_id': request.config.wiki_id,
'timenow': timenow}, isWrite=True)
cursor.execute("""INSERT into allPages (name, text, editTime,
userEdited, editType, comment,
userIP, propercased_name,
wiki_id)
values (%(pagename)s, %(pagetext)s, %(timenow)s,
NULL, 'SAVENEW', 'System page', NULL,
%(propercased_name)s, %(wiki_id)s);""",
{'pagename':pagename.lower(), 'pagetext':flatpage.text,
'propercased_name':pagename,
'wiki_id': request.config.wiki_id, 'timenow': timenow},
isWrite=True)
file_dict['pagename'] = pagename
for filename, content in flatpage.files:
file_dict['filename'] = filename
file_dict['filecontent'] = content
if wikiutil.isImage(filename):
xsize, ysize = Files.openImage(content).size
file_dict['xsize'] = xsize
file_dict['ysize'] = ysize
wikidb.putFile(request, file_dict)
insert_acl(plist, flat_page_dict, request)
def build_search_index():
"""
Builds the title and full text search indexes.
"""
if not config.has_xapian:
print ("You don't have Xapian installed..."
"skipping configuration of search index.")
return
if not os.path.exists(config.search_db_location):
# create it if it doesn't exist, we don't want to create
# intermediates though
os.mkdir(config.search_db_location)
# prune existing db directories, do this explicitly in case third party
# extensions use this directory (they really shouldn't)
for db in ('title', 'text'):
dbpath = os.path.join(config.search_db_location, db)
if os.path.exists(dbpath):
shutil.rmtree(dbpath)
print "Building search index..."
from Sycamore import wikiutil, search
pages = wikiutil.getPageList(req, objects=True)
for page in pages:
print " %s added to search index." % page.page_name
# don't use remote server on initial build
search.add_to_index(page, try_remote=False)
def setup_admin(request):
print "\n-------------"
print "Enter the primary admin's wiki username:"
username = raw_input()
group = wikiacl.Group("Admin", request, fresh=True)
groupdict = {username.lower(): None}
group.update(groupdict)
group.save()
basic_pages = init_basic_pages()
all_pages = copy(basic_pages)
all_pages.update(init_basic_pages('global'))
if __name__ == '__main__':
from Sycamore import request
# building for first time..don't try to load config from db
req = request.RequestDummy(process_config=False)
cursor = req.cursor
init_db(cursor)
create_tables(cursor)
create_views(cursor)
create_config(req)
create_other_stuff(req)
print "inserting basic pages..."
insert_pages(req)
build_search_index()
setup_admin(req)
req.db_disconnect() # commit before building caches
req = request.RequestDummy(process_config=True)
wiki_list = wikiutil.getWikiList(req)
for wiki_name in wiki_list:
req.config = config.Config(wiki_name, req, process_config=True)
plist = wikiutil.getPageList(req)
maintenance.buildCaches(wiki_name, plist, doprint=True)
req.db_disconnect()
print "-------------"
print ("All done! Now, start up the sycamore server and "
"create the admin account!")
| gpl-2.0 | 1,005,118,203,528,990,100 | 39.32502 | 94 | 0.586007 | false | 3.998724 | true | false | false |
guaix-ucm/pyemir | emirdrp/preprocess.py | 3 | 4954 | #
# Copyright 2008-2014 Universidad Complutense de Madrid
#
# This file is part of PyEmir
#
# PyEmir is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyEmir is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyEmir. If not, see <http://www.gnu.org/licenses/>.
#
"""Preprocessing EMIR readout modes"""
from __future__ import division
import six
from astropy.io import fits
from numina.array import ramp_array, fowler_array
from .core import EMIR_READ_MODES
PREPROC_KEY = 'READPROC'
PREPROC_VAL = True
class ReadModeGuessing(object):
def __init__(self, mode, info=None):
self.mode = mode
self.info = info
def image_readmode(hdulist, default=None):
header = hdulist[0].header
if 'READMODE' in header:
p_readmode = header['READMODE'].lower()
if p_readmode in EMIR_READ_MODES:
return ReadModeGuessing(mode=p_readmode,
info={'source': 'keyword'}
)
# Using heuristics
shape = hdulist[0].shape
if len(shape) == 2:
# A 2D image, mode is single
return ReadModeGuessing(mode='single', info={'source': 'heuristics'})
else:
nd = min(shape)
if nd == 2:
# A NXNX2 image
# mide is cds
return ReadModeGuessing(mode='cds', info={'source': 'heuristics'})
# Insufficient information
if default:
return ReadModeGuessing(mode=default, info={'source': 'default'})
else:
return None
def preprocess_single(hdulist):
return hdulist
def preprocess_cds(hdulist):
# CDS is Fowler with just one pair of reads
return preprocess_fowler(hdulist)
def preprocess_fowler(hdulist):
hdulist[0].header.update(PREPROC_KEY, PREPROC_VAL)
# We need:
tint = 0.0 # Integration time (from first read to last read)
ts = 0.0 # Time between samples
gain = 1.0 # Detector gain (number)
ron = 1.0 # Detector RON (number)
# A master badpixel mask
cube = hdulist[0].data
res, var, npix, mask = fowler_array(cube, ti=tint, ts=ts,
gain=gain, ron=ron,
badpixels=None,
dtype='float32',
saturation=55000.0
)
hdulist[0].data = res
varhdu = fits.ImageHDU(var)
varhdu.update_ext_name('VARIANCE')
hdulist.append(varhdu)
nmap = fits.ImageHDU(npix)
nmap.update_ext_name('MAP')
hdulist.append(nmap)
nmask_hdu = fits.ImageHDU(mask)
nmask_hdu.update_ext_name('MASK')
hdulist.append(nmask_hdu)
return hdulist
def preprocess_ramp(hdulist):
hdulist[0].header.update(PREPROC_KEY, PREPROC_VAL)
cube = hdulist[0].data
# We need
ti = 0.0 # Integration time
gain = 1.0
ron = 1.0
rslt = ramp_array(cube, ti, gain=gain,
ron=ron, badpixels=None,
dtype='float32', saturation=55000.0
)
result, var, npix, mask = rslt
hdulist[0].data = result
varhdu = fits.ImageHDU(var)
varhdu.update_ext_name('VARIANCE')
hdulist.append(varhdu)
nmap = fits.ImageHDU(npix)
nmap.update_ext_name('MAP')
hdulist.append(nmap)
nmask = fits.ImageHDU(mask)
nmask.update_ext_name('MASK')
hdulist.append(nmask)
return hdulist
def fits_wrapper(frame):
if isinstance(frame, six.string_types):
return fits.open(frame)
elif isinstance(frame, fits.HDUList):
return frame
else:
raise TypeError
def preprocess(input_, output):
with fits_wrapper(input_) as hdulist:
header = hdulist[0].header
if 'PREPROC' in header:
# if the image is preprocessed, do nothing
if input != output:
hdulist.writeto(output, overwrite=True)
return
# determine the READ mode
guess = image_readmode(hdulist, 'single')
if guess is None:
# We have a problem here
return
if guess.mode == 'single':
hduproc = preprocess_single(hdulist)
elif guess.mode == 'cds':
hduproc = preprocess_cds(hdulist)
elif guess.mode == 'fowler':
hduproc = preprocess_fowler(hdulist)
elif guess.mode == 'ramp':
pass
else:
hduproc = preprocess_single(hdulist)
hduproc.writeto(output, overwrite=True)
| gpl-3.0 | 7,333,595,398,069,707,000 | 28.664671 | 78 | 0.601736 | false | 3.50106 | false | false | false |
Alan-Robertson/python-qinfer | src/qinfer/tomography/models.py | 3 | 9912 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# models.py: Likelihood models for quantum state and process tomography.
##
# © 2017, Chris Ferrie ([email protected]) and
# Christopher Granade ([email protected]).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
# TODO: docstrings!
# TODO: unit tests!
## DOCSTRING #################################################################
"""
"""
## FEATURES ##################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
## IMPORTS ###################################################################
from builtins import range, map
from qinfer import FiniteOutcomeModel
import numpy as np
## EXPORTS ###################################################################
# TODO
## DESIGN NOTES ##############################################################
"""
Bases are always assumed to have exactly one traceful element— in particular,
the zeroth basis element.
"""
## FUNCTIONS #################################################################
# TODO: document, contribute to QuTiP?
def heisenberg_weyl_operators(d=2):
w = np.exp(2 * np.pi * 1j / d)
X = qt.Qobj([
qt.basis(d, (idx + 1) % d).data.todense().view(np.ndarray)[:, 0] for idx in range(d)
])
Z = qt.Qobj(np.diag(w ** np.arange(d)))
return [X**i * Z**j for i in range(d) for j in range(d)]
## CLASSES ###################################################################
class TomographyModel(FiniteOutcomeModel):
r"""
Model for tomographically learning a quantum state using
two-outcome positive-operator valued measures (POVMs).
:param TomographyBasis basis: Basis used in representing
states as model parameter vectors.
:param bool allow_subnormalized: If `False`, states
:math:`\rho` are constrained during resampling such
that :math:`\Tr(\rho) = 1`.
"""
def __init__(self, basis, allow_subnormalized=False):
self._dim = basis.dim
self._basis = basis
self._allow_subnormalied = allow_subnormalized
super(TomographyModel, self).__init__()
@property
def dim(self):
"""
Dimension of the Hilbert space on which density
operators learned by this model act.
:type: `int`
"""
return self._dim
@property
def basis(self):
"""
Basis used in converting between :class:`~qutip.Qobj` and
model parameter vector representations of states.
:type: `TomographyBasis`
"""
return self._basis
@property
def n_modelparams(self):
return self._dim ** 2
@property
def modelparam_names(self):
return list(map(
r'\langle\!\langle{} | \rho\rangle\!\rangle'.format,
self.basis.labels
))
@property
def is_n_outcomes_constant(self):
return True
@property
def expparams_dtype(self):
return [
(str('meas'), float, self._dim ** 2)
]
def n_outcomes(self, expparams):
return 2
def are_models_valid(self, modelparams):
# This is wrong, but is wrong for the sake of speed.
# As a future improvement, validity checking needs to
# be enabled as a non-default option.
return np.ones((modelparams.shape[0],), dtype=bool)
def canonicalize(self, modelparams):
"""
Truncates negative eigenvalues and from each
state represented by a tensor of model parameter
vectors, and renormalizes as appropriate.
:param np.ndarray modelparams: Array of shape
``(n_states, dim**2)`` containing model parameter
representations of each of ``n_states`` different
states.
:return: The same model parameter tensor with all
states truncated to be positive operators. If
:attr:`~TomographyModel.allow_subnormalized` is
`False`, all states are also renormalized to trace
one.
"""
modelparams = np.apply_along_axis(self.trunc_neg_eigs, 1, modelparams)
# Renormalizes particles if allow_subnormalized=False.
if not self._allow_subnormalied:
modelparams = self.renormalize(modelparams)
return modelparams
def trunc_neg_eigs(self, particle):
"""
Given a state represented as a model parameter vector,
returns a model parameter vector representing the same
state with any negative eigenvalues set to zero.
:param np.ndarray particle: Vector of length ``(dim ** 2, )``
representing a state.
:return: The same state with any negative eigenvalues
set to zero.
"""
arr = np.tensordot(particle, self._basis.data.conj(), 1)
w, v = np.linalg.eig(arr)
if np.all(w >= 0):
return particle
else:
w[w < 0] = 0
new_arr = np.dot(v * w, v.conj().T)
new_particle = np.real(np.dot(self._basis.flat(), new_arr.flatten()))
assert new_particle[0] > 0
return new_particle
def renormalize(self, modelparams):
"""
Renormalizes one or more states represented as model
parameter vectors, such that each state has trace 1.
:param np.ndarray modelparams: Array of shape ``(n_states,
dim ** 2)`` representing one or more states as
model parameter vectors.
:return: The same state, normalized to trace one.
"""
# The 0th basis element (identity) should have
# a value 1 / sqrt{dim}, since the trace of that basis
# element is fixed to be sqrt{dim} by convention.
norm = modelparams[:, 0] * np.sqrt(self._dim)
assert not np.sum(norm == 0)
return modelparams / norm[:, None]
def likelihood(self, outcomes, modelparams, expparams):
super(TomographyModel, self).likelihood(outcomes, modelparams, expparams)
pr1 = np.empty((modelparams.shape[0], expparams.shape[0]))
pr1[:, :] = np.einsum(
'ei,mi->me',
# This should be the Hermitian conjugate, but since
# expparams['meas'] is real (that is, since the measurement)
# is Hermitian, then that's not needed here.
expparams['meas'],
modelparams
)
np.clip(pr1, 0, 1, out=pr1)
return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, 1 - pr1)
class DiffusiveTomographyModel(TomographyModel):
@property
def n_modelparams(self):
return super(DiffusiveTomographyModel, self).n_modelparams + 1
@property
def expparams_dtype(self):
return super(DiffusiveTomographyModel, self).expparams_dtype + [
(str('t'), float)
]
@property
def modelparam_names(self):
return super(DiffusiveTomographyModel, self).modelparam_names + [r'\epsilon']
def are_models_valid(self, modelparams):
return np.logical_and(
super(DiffusiveTomographyModel, self).are_models_valid(modelparams),
modelparams[:, -1] > 0
)
def canonicalize(self, modelparams):
return np.concatenate([
super(DiffusiveTomographyModel, self).canonicalize(modelparams[:, :-1]),
modelparams[:, -1, None]
], axis=1)
def likelihood(self, outcomes, modelparams, expparams):
return super(DiffusiveTomographyModel, self).likelihood(outcomes, modelparams[:, :-1], expparams)
def update_timestep(self, modelparams, expparams):
# modelparams: [n_m, d² + 1]
# expparams: [n_e,]
# eps: [n_m, 1] * [n_e] → [n_m, n_e, 1]
eps = (modelparams[:, -1, None] * np.sqrt(expparams['t']))[:, :, None]
# steps: [n_m, n_e, 1] * [n_m, 1, d²]
steps = eps * np.random.randn(*modelparams[:, None, :].shape)
steps[:, :, [0, -1]] = 0
raw_modelparams = modelparams[:, None, :] + steps
# raw_modelparams[:, :, :-1] = np.apply_along_axis(self.trunc_neg_eigs, 2, raw_modelparams[:, :, :-1])
for idx_experiment in range(len(expparams)):
raw_modelparams[:, idx_experiment, :] = self.canonicalize(raw_modelparams[:, idx_experiment, :])
return raw_modelparams.transpose((0, 2, 1))
| agpl-3.0 | 7,411,290,033,158,219,000 | 35.282051 | 110 | 0.600808 | false | 4.052782 | false | false | false |
FBergeron/FeedNotifier | controls.py | 1 | 6026 | import wx
import wx.lib.wordwrap as wordwrap
import util
class Event(wx.PyEvent):
def __init__(self, event_object, type):
super(Event, self).__init__()
self.SetEventType(type.typeId)
self.SetEventObject(event_object)
EVT_HYPERLINK = wx.PyEventBinder(wx.NewEventType())
class Line(wx.PyPanel):
def __init__(self, parent, pen=wx.BLACK_PEN):
super(Line, self).__init__(parent, -1, style=wx.BORDER_NONE)
self.pen = pen
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_SIZE, self.on_size)
def on_size(self, event):
self.Refresh()
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
dc.Clear()
dc.SetPen(self.pen)
width, height = self.GetClientSize()
y = height / 2
dc.DrawLine(0, y, width, y)
def DoGetBestSize(self):
return -1, self.pen.GetWidth()
class Text(wx.PyPanel):
def __init__(self, parent, width, text):
super(Text, self).__init__(parent, -1, style=wx.BORDER_NONE)
self.text = text
self.width = width
self.wrap = True
self.rects = []
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_SIZE, self.on_size)
def on_size(self, event):
self.Refresh()
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
self.setup_dc(dc)
dc.Clear()
self.draw_lines(dc)
def setup_dc(self, dc):
dc.SetFont(self.GetFont())
dc.SetTextBackground(self.GetBackgroundColour())
dc.SetTextForeground(self.GetForegroundColour())
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
def draw_lines(self, dc, emulate=False):
if self.wrap:
text = wordwrap.wordwrap(self.text.strip(), self.width, dc)
else:
text = self.text.strip()
lines = text.split('\n')
lines = [line.strip() for line in lines]
lines = [line for line in lines if line]
x, y = 0, 0
rects = []
for line in lines:
if not emulate:
dc.DrawText(line, x, y)
w, h = dc.GetTextExtent(line)
rects.append(wx.Rect(x, y, w, h))
y += h
if not emulate:
self.rects = rects
return y
def compute_height(self):
dc = wx.ClientDC(self)
self.setup_dc(dc)
height = self.draw_lines(dc, True)
return height
def fit_no_wrap(self):
dc = wx.ClientDC(self)
self.setup_dc(dc)
width, height = dc.GetTextExtent(self.text.strip())
self.width = width
self.wrap = False
def DoGetBestSize(self):
height = self.compute_height()
return self.width, height
class Link(Text):
def __init__(self, parent, width, link, text):
super(Link, self).__init__(parent, width, text)
self.link = link
self.trigger = False
self.hover = False
self.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave)
self.Bind(wx.EVT_MOTION, self.on_motion)
self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.Bind(wx.EVT_LEFT_UP, self.on_left_up)
def hit_test(self, point):
for rect in self.rects:
if rect.Contains(point):
self.on_hover()
break
else:
self.on_unhover()
def on_motion(self, event):
self.hit_test(event.GetPosition())
def on_leave(self, event):
self.on_unhover()
def on_hover(self):
if self.hover:
return
self.hover = True
font = self.GetFont()
font.SetUnderlined(True)
self.SetFont(font)
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
self.Refresh()
def on_unhover(self):
if not self.hover:
return
self.hover = False
self.trigger = False
font = self.GetFont()
font.SetUnderlined(False)
self.SetFont(font)
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
self.Refresh()
def on_left_down(self, event):
if self.hover:
self.trigger = True
def on_left_up(self, event):
if self.hover and self.trigger:
event = Event(self, EVT_HYPERLINK)
event.link = self.link
wx.PostEvent(self, event)
self.trigger = False
class BitmapLink(wx.PyPanel):
def __init__(self, parent, link, bitmap, hover_bitmap=None):
super(BitmapLink, self).__init__(parent, -1)
self.link = link
self.bitmap = bitmap
self.hover_bitmap = hover_bitmap or bitmap
self.hover = False
self.trigger = False
self.SetInitialSize(bitmap.GetSize())
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.Bind(wx.EVT_ENTER_WINDOW, self.on_enter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave)
self.Bind(wx.EVT_LEFT_DOWN, self.on_left_down)
self.Bind(wx.EVT_LEFT_UP, self.on_left_up)
def on_paint(self, event):
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
bitmap = self.hover_bitmap if self.hover else self.bitmap
dc.DrawBitmap(bitmap, 0, 0, True)
def on_enter(self, event):
self.hover = True
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
self.Refresh()
def on_leave(self, event):
self.trigger = False
self.hover = False
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
self.Refresh()
def on_left_down(self, event):
self.trigger = True
def on_left_up(self, event):
if self.trigger:
event = Event(self, EVT_HYPERLINK)
event.link = self.link
wx.PostEvent(self, event)
self.trigger = False
| bsd-3-clause | 7,334,771,045,423,110,000 | 33.244318 | 71 | 0.577498 | false | 3.481225 | false | false | false |
curtisforrester/dyna_settings | tests/test_dyna_settings.py | 1 | 5775 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dyna_settings
----------------------------------
Tests for `dyna_settings` module. I normally use py.test.
"""
import unittest
from dyna_settings.core import DynaSettings, _dyna_controller, register_dyna_settings, dyna_value, \
NoMatchingSettingsClass, DynaSettingsController
__author__ = 'curtis'
class ChildOK_Match(DynaSettings):
def value_dict(self):
return {'A': 'a', 'B': 'b', 'C': 9}
def env_detector(self):
return True
class ChildOK_NoMatch(DynaSettings):
def value_dict(self):
return {'A': 'aa', 'B': 'bb', 'C': 99}
def env_detector(self):
return False
class EnvSettingTrue(DynaSettings):
def __init__(self):
super(EnvSettingTrue, self).__init__()
self._environ_vars_trump = True
def value_dict(self):
return {
'PATH': 'a very wrong path',
'AINT_THAR': 'This aint gonna be there'
}
def env_detector(self):
return True
class TestDynaSettings(unittest.TestCase):
def test_parent_interface_excepts(self):
bad = DynaSettings()
with self.assertRaises(NotImplementedError):
bad.env_detector()
with self.assertRaises(NotImplementedError):
bad.value_dict()
def test_child_interface(self):
good = ChildOK_Match()
self.assertIsInstance(good.value_dict(), dict)
self.assertTrue(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'a')
def test_no_match_child_interface(self):
good = ChildOK_NoMatch()
self.assertIsInstance(good.value_dict(), dict)
self.assertFalse(good.env_detector())
good.init_values()
self.assertEqual(good.get_value('A', 'x'), 'aa')
def test_register_match(self):
_dyna_controller.reset()
instance = ChildOK_Match()
register_dyna_settings(instance)
register_dyna_settings(ChildOK_NoMatch())
self.assertEqual(_dyna_controller.detected_settings, instance)
def test_register_nomatch(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_NoMatch())
self.assertIsNone(_dyna_controller.detected_settings)
def test_get_values(self):
_dyna_controller.reset()
register_dyna_settings(ChildOK_Match())
register_dyna_settings(ChildOK_NoMatch())
val = dyna_value('A', production_value='x')
self.assertEqual(val, 'a')
val = dyna_value('B', production_value='x')
self.assertEqual(val, 'b')
val = dyna_value('UNDEFINED', production_value='prod')
self.assertEqual(val, 'prod')
def test_get_values_with_no_settings_class(self):
_dyna_controller.reset()
with self.assertRaises(NoMatchingSettingsClass):
val = dyna_value('BAD')
def test_environ_var_trump_global(self):
"""
Verify that with the global trump set True we'll get from the environment
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=True)
self.assertTrue(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value=None)
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_off(self):
"""
Verify that with the environment var trump off we obtain the value from
our dyna settings and not the environment variable.
:return:
"""
DynaSettingsController.set_environ_vars_trump(flag=False)
self.assertFalse(_dyna_controller.environ_vars_trump)
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertNotEqual(path_from_settings, path)
def test_environ_var_trump_instance(self):
"""
Verify that, with a DynaSettings instance registered that sets trump True it behaves
properly by obtaining the value from the environment variable. Should ignore both the
production_value and the settings class definition.
:return:
"""
_dyna_controller.reset()
self.assertFalse(_dyna_controller.environ_vars_trump)
register_dyna_settings(EnvSettingTrue())
import os
path = os.environ.get('PATH')
self.assertTrue(path)
path_from_settings = dyna_value('PATH', production_value='Internal path')
self.assertTrue(path_from_settings)
self.assertEqual(path_from_settings, path)
def test_environ_var_trump_no_env_var(self):
"""
Verify that if trump is True but the environment var is not defined we'll still pick
up the value if the class instance has defined it
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
path = dyna_value('AINT_THAR', production_value=None)
self.assertTrue(path)
def test_environ_var_trump_fail(self):
"""
Verifies that if Trump is true, environment doesn't have the variable, production_value doesn't
define it, and the class does not either, then exception is raised.
:return:
"""
_dyna_controller.reset()
register_dyna_settings(EnvSettingTrue())
with self.assertRaises(NoMatchingSettingsClass):
bad = dyna_value('VOODOOUDO', production_value=None)
print bad
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,955,690,828,082,807,300 | 29.555556 | 103 | 0.627186 | false | 3.857715 | true | false | false |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/userdata/addon_data/plugin.video.p2p-streams/acestream/ace/ACEStream/Core/Statistics/Logger.py | 1 | 5405 | #Embedded file name: ACEStream\Core\Statistics\Logger.pyo
import sys
import os
import time
import socket
import threading
from traceback import print_exc
DEBUG = False
log_separator = ' '
logger = None
def create_logger(file_name):
global logger
logger = Logger(3, file_name)
def get_logger():
if logger is None:
create_logger('global.log')
return logger
def get_today():
return time.gmtime(time.time())[:3]
class Logger:
def __init__(self, threshold, file_name, file_dir = '.', prefix = '', prefix_date = False, open_mode = 'a+b'):
self.threshold = threshold
self.Log = self.log
if file_name == '':
self.logfile = sys.stderr
else:
try:
if not os.access(file_dir, os.F_OK):
try:
os.mkdir(file_dir)
except os.error as msg:
raise 'logger: mkdir error: ' + msg
file_path = self.get_file_path(file_dir, prefix, prefix_date, file_name)
self.logfile = open(file_path, open_mode)
except Exception as msg:
self.logfile = None
print >> sys.stderr, 'logger: cannot open log file', file_name, file_dir, prefix, prefix_date, msg
print_exc()
def __del__(self):
self.close()
def get_file_path(self, file_dir, prefix, prefix_date, file_name):
if prefix_date is True:
today = get_today()
date = '%04d%02d%02d' % today
else:
date = ''
return os.path.join(file_dir, prefix + date + file_name)
def log(self, level, msg, showtime = True):
if level <= self.threshold:
if self.logfile is None:
return
if showtime:
time_stamp = '%.01f' % time.time()
self.logfile.write(time_stamp + log_separator)
if isinstance(msg, str):
self.logfile.write(msg)
else:
self.logfile.write(repr(msg))
self.logfile.write('\n')
self.logfile.flush()
def close(self):
if self.logfile is not None:
self.logfile.close()
class OverlayLogger:
__single = None
__lock = threading.RLock()
def __init__(self, file_name, file_dir = '.'):
if OverlayLogger.__single:
raise RuntimeError, 'OverlayLogger is singleton2'
self.file_name = file_name
self.file_dir = file_dir
OverlayLogger.__single = self
self.Log = self.log
self.__call__ = self.log
def getInstance(*args, **kw):
OverlayLogger.__lock.acquire()
try:
if OverlayLogger.__single is None:
OverlayLogger(*args, **kw)
return OverlayLogger.__single
finally:
OverlayLogger.__lock.release()
getInstance = staticmethod(getInstance)
def log(self, *msgs):
log_msg = ''
nmsgs = len(msgs)
if nmsgs < 2:
print >> sys.stderr, 'Error message for log', msgs
return
for i in range(nmsgs):
if isinstance(msgs[i], tuple) or isinstance(msgs[i], list):
log_msg += log_separator
for msg in msgs[i]:
try:
log_msg += str(msg)
except:
log_msg += repr(msg)
log_msg += log_separator
else:
try:
log_msg += str(msgs[i])
except:
log_msg += repr(msgs[i])
log_msg += log_separator
if log_msg:
self._write_log(log_msg)
def _write_log(self, msg):
today = get_today()
if not hasattr(self, 'today'):
self.logger = self._make_logger(today)
elif today != self.today:
self.logger.close()
self.logger = self._make_logger(today)
self.logger.log(3, msg)
def _make_logger(self, today):
self.today = today
hostname = socket.gethostname()
logger = Logger(3, self.file_name, self.file_dir, hostname, True)
logger.log(3, '# ACEStream Overlay Log Version 3', showtime=False)
logger.log(3, '# BUCA_STA: nRound nPeer nPref nTorrent ' + 'nBlockSendList nBlockRecvList ' + 'nConnectionsInSecureOver nConnectionsInBuddyCast ' + 'nTasteConnectionList nRandomConnectionList nUnconnectableConnectionList', showtime=False)
logger.log(3, '# BUCA_STA: Rd Pr Pf Tr Bs Br SO Co Ct Cr Cu', showtime=False)
return logger
if __name__ == '__main__':
create_logger('test.log')
get_logger().log(1, 'abc ' + str(['abc', 1, (2, 3)]))
get_logger().log(0, [1, 'a', {(2, 3): 'asfadf'}])
ol = OverlayLogger('overlay.log')
ol.log('CONN_TRY', '123.34.3.45', 34, 'asdfasdfasdfasdfsadf')
ol.log('CONN_ADD', '123.34.3.45', 36, 'asdfasdfasdfasdfsadf', 3)
ol.log('CONN_DEL', '123.34.3.45', 38, 'asdfasdfasdfasdfsadf', 'asbc')
ol.log('SEND_MSG', '123.34.3.45', 39, 'asdfasdfasdfasdfsadf', 2, 'BC', 'abadsfasdfasf')
ol.log('RECV_MSG', '123.34.3.45', 30, 'asdfasdfasdfasdfsadf', 3, 'BC', 'bbbbbbbbbbbbb')
ol.log('BUCA_STA', (1, 2, 3), (4, 5, 6), (7, 8), (9, 10, 11))
ol.log('BUCA_CON', ['asfd',
'bsdf',
'wevs',
'wwrewv'])
| apache-2.0 | -4,605,293,595,324,325,400 | 31.957317 | 253 | 0.537095 | false | 3.605737 | false | false | false |
wenjie2wang/touchpy | icd9.py | 1 | 1901 | #!/usr/bin/python3
import os
import re
import time
import touch_icd9 as touch
# read input data
inputFile = input("Please enter the input CSV file name: ")
# inputFile = "sample_icd9.csv"
try:
fhand0 = open(inputFile)
except:
print("Error: failed to open/find", inputFile)
exit()
# define output file name
basePath = os.path.basename(inputFile)
outputFile = re.sub(".csv", "_touch.csv", basePath)
# read in dictionaries
touch.dicts_icd9()
# read input and write output line by line
fout = open(outputFile, "w")
firstObs = 1
for line in fhand0:
tmpLine = line.strip().lower()
tmpLine = re.sub('"|[ ]', '', tmpLine)
oneLine = tmpLine.split(",")
if firstObs:
input_colNames = oneLine
output_colNames = [touch.dicts_icd9.colNames[i].upper()
for i in range(len(touch.dicts_icd9.colNames))]
fout.write(",".join(output_colNames) + '\n')
dx_idx = []
drg_idx = None
for i in range(len(input_colNames)):
if input_colNames[i].startswith("dx"):
dx_idx.append(i)
if input_colNames[i].startswith("drg"):
drg_idx = i
firstObs = 0
# quick check on dx_idx and drg_idx
if len(dx_idx) <= 1:
print("Error: failed to locate (secondary) diagnoses code",
"in the input file:", inputFile)
exit()
if drg_idx is None:
print("Error: failed to locate DRG code",
"in the input file:", inputFile)
exit()
else:
tmp = touch.icd9(oneLine, drg_idx, dx_idx, touch.dicts_icd9)
fout.write(",".join(list(map(str, tmp))) + "\n")
fout.close()
# output message
print("Comorbidity measures have been successfully generated.")
print("Output file:", outputFile)
print("The system and user CPU time: %.3f seconds." % time.process_time())
| gpl-3.0 | -8,298,090,924,444,540,000 | 26.955882 | 74 | 0.59495 | false | 3.400716 | false | false | false |
neilvallon/pyMap | MapGenerator.py | 1 | 4649 | import random, math
class MapGenerator:
def __init__(self, x, y, seed):
self.x = x
self.y = y
self.rand = random.Random(seed)
self.mapMatrix = self.emptyMap(x, y)
def emptyMap(self, x, y):
return [[0]*x for i in range(y)]
def randomCord(self):
x = int(self.rand.uniform(0, self.x-1)) % self.x#int(self.rand.normalvariate(self.x/2, self.x) % self.x)
y = int(self.rand.uniform(0, self.y-1)) % self.y#int(self.rand.normalvariate(self.y/2, self.y) % self.y)
return (x, y)
def randomPlayable(self):
point = self.randomCord()
while not self.isPlayArea(point):
point = self.randomCord();
return point
def neighborCord(self, point):
ptList = []
(x, y) = point
for a in range(x-1, x+2):
for b in range(y-1, y+2):
if(a>=0 and b>=0 and a < self.x and b < self.y):
ptList.append((a, b))
ptList.remove(point)
return ptList
def manhatanDist(self, pt1, pt2):
return abs(pt1[0] - pt2[0]) + abs(pt1[1] - pt2[1])
def moveableNeighbors(self, point):
(x, y) = point
ptList = [(x-1, y), (x+1, y), (x, y-1), (x, y+1)]
return filter(self.isPlayArea, ptList)
def makeRandom(self):
for i in range(int(self.x * self.y * 0.75)):
(x, y) = self.randomCord()
self.mapMatrix[y][x] ^= 1
return self
def smooth(self, r=1, factor=4):
newMap = self.emptyMap(self.x, self.y)
for x in range(self.x):
for y in range(self.y):
i = 0
for a in range(x-r, x+r+1):
for b in range(y-r, y+r+1):
i += self.mapMatrix[b%self.y][a%self.x]
newMap[y][x] = i>=factor
self.mapMatrix = newMap
return self
def nearestPlayable(self, point):
frontier = [point]
explored = []
while frontier:
node = frontier.pop(0)
if self.isPlayArea(node):
return node
neighbors = self.neighborCord(node)
toAdd = filter(lambda x: not (x in frontier or x in explored), neighbors)
frontier.extend(toAdd)
explored.append(node)
def removeIslands(self):
newMap = self.emptyMap(self.x, self.y)
point = self.randomPlayable()
frontier = [point]
explored = []
while frontier:
node = frontier.pop()
neighbors = self.moveableNeighbors(node)
toAdd = filter(lambda x: not (x in frontier or x in explored), neighbors)
frontier.extend(toAdd)
explored.append(node)
newMap[node[1]][node[0]] = 1
self.mapMatrix = newMap
return self
def removeIslands_n(self):
newMap = self.emptyMap(self.x, self.y)
(start, goal) = self.spawns
frontier = [(start, self.manhatanDist(start, goal))]
explored = []
while frontier:
frontier.sort(key=lambda tup: tup[1], reverse=True)
(node, weight) = frontier.pop()
if node == goal:
self.searched = explored
return self
neighbors = self.moveableNeighbors(node)
toAdd = filter(lambda x: not (x in explored), neighbors)
toAddW = map(lambda x: (x, self.manhatanDist(x, goal)), toAdd)
frontier.extend(toAddW)
explored.extend(toAdd)
newMap[node[1]][node[0]] = 1
self.searched = explored
self.mapMatrix = newMap
self.findSpawns()
return self
def findSpawns(self):
posibleSpawns = self.playableCords()
s1 = self.randomPlayable()
s2 = self.randomPlayable()
for i in range(20):
s1acum = (0, 0)
s2acum = (0, 0)
s1c = 0
s2c = 0
deadCord = ( (int( (s1[0]+s2[0]) / 2) ), (int( (s1[1]+s2[1]) / 2 )) )
for s in posibleSpawns:
s1Dis = self.manhatanDist(s, s1)
s2Dis = self.manhatanDist(s, s2)
deadDis = self.manhatanDist(s, deadCord)/2
if s1Dis < s2Dis:
if deadDis < s1Dis: continue
s1c += 1
s1acum = (s1acum[0] + s[0], s1acum[1] + s[1])
else:
if deadDis < s2Dis: continue
s2c += 1
s2acum = (s2acum[0] + s[0], s2acum[1] + s[1])
s1 = (s1acum[0] / s1c, s1acum[1] / s1c)
s2 = (s2acum[0] / s2c, s2acum[1] / s2c)
s1 = self.nearestPlayable( (int(s1[0]), int(s1[1])) )
s2 = self.nearestPlayable( (int(s2[0]), int(s2[1])) )
self.spawns = (s1, s2)
return self
def isPlayArea(self, point):
(x, y) = point
if(x<0 or y<0 or x >= self.x or y >= self.y):
return False
return self.mapMatrix[y][x] == 1
def isWall(self, point):
neighbors = self.neighborCord(point)
if self.isPlayArea(point):
return len(neighbors) != 8
allNeighborsPlayable = False
for n in neighbors:
allNeighborsPlayable |= self.isPlayArea(n)
return allNeighborsPlayable
def playableCords(self):
ptLst = []
for y in range(self.y):
for x in range(self.x):
if self.isPlayArea((x, y)):
ptLst.append((x, y))
return ptLst
def matrix(self):
return self.mapMatrix
def size(self):
return (self.x, self.y)
| mit | -2,824,779,610,451,991,000 | 23.994624 | 106 | 0.619273 | false | 2.465005 | false | false | false |
mbertrand/OWSLib | owslib/wms.py | 3 | 27171 | # -*- coding: iso-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <[email protected]>
# Julien Anguenot <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Currently supports only version 1.1.1 of the WMS protocol.
"""
from __future__ import (absolute_import, division, print_function)
import cgi
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
import warnings
import six
from .etree import etree
from .util import openURL, testXMLValue, extract_xml_list, xmltag_split, OrderedDict
from .fgdc import Metadata
from .iso import MD_Metadata
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapService(object):
"""Abstraction for OGC Web Map Service (WMS).
Implements IWebMapService.
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents'):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, version='1.1.1', xml=None, username=None, password=None, parse_remote_metadata=False, timeout=30):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = version
self.timeout = timeout
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(self.version, url=self.url, un=self.username, pw=self.password)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url, timeout=self.timeout)
# avoid building capabilities metadata if the response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents = OrderedDict()
caps = self._capabilities.find('Capability')
#recursively gather content metadata for all layer elements.
#To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
layers = []
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
layers.append(cm)
self.contents[cm.id] = cm
cm.children = gather_layers(elem, cm)
return layers
gather_layers(caps, None)
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def __build_getmap_request(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor=None, exceptions=None, **kwargs):
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
return request
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
timeout=None,
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
>>> img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
>>> out = open('example.jpg', 'wb')
>>> bytes_written = out.write(img.read())
>>> out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = self.__build_getmap_request(layers=layers, styles=styles, srs=srs, bbox=bbox,
format=format, size=size, time=time, transparent=transparent,
bgcolor=bgcolor, exceptions=exceptions, kwargs=kwargs)
data = urlencode(request)
u = openURL(base_url, data, method, username=self.username, password=self.password, timeout=timeout or self.timeout)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getfeatureinfo(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
query_layers = None, xy=None, info_format=None, feature_count=20,
method='Get',
timeout=None,
**kwargs
):
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetFeatureInfo').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
# GetMap-Request
request = self.__build_getmap_request(layers=layers, styles=styles, srs=srs, bbox=bbox,
format=format, size=size, time=time, transparent=transparent,
bgcolor=bgcolor, exceptions=exceptions, kwargs=kwargs)
# extend to GetFeatureInfo-Request
request['request'] = 'GetFeatureInfo'
if not query_layers:
__str_query_layers = ','.join(layers)
else:
__str_query_layers = ','.join(query_layers)
request['query_layers'] = __str_query_layers
request['x'] = str(xy[0])
request['y'] = str(xy[1])
request['info_format'] = info_format
request['feature_count'] = str(feature_count)
data = urlencode(request)
u = openURL(base_url, data, method, username=self.username, password=self.password, timeout=timeout or self.timeout)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root=infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root=infoset
name=self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name=name.text
else:
self.name=None
self.url=self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
#contact metadata
contact = self._root.find('ContactInformation')
## sometimes there is a contact block that is empty, so make
## sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError("No content named %s" % name)
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, children=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self._children = children
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
b = elem.find('BoundingBox')
self.boundingBox = None
if b is not None:
try: #sometimes the SRS attribute is (wrongly) not provided
srs=b.attrib['SRS']
except KeyError:
srs=None
self.boundingBox = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
)
elif self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBox = self.parent.boundingBox
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
if 'min' in sh.attrib and 'max' in sh.attrib:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
if attribution is not None:
self.attribution = dict()
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (int(logo.attrib['width']), int(logo.attrib['height']))
self.attribution['logo_url'] = logo.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
#SRS options
self.crsOptions = []
#Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
#Look for SRS option attached to this layer
if elem.find('SRS') is not None:
## some servers found in the wild use a single SRS
## tag containing a whitespace separated list of SRIDs
## instead of several SRS tags. hence the inner loop
for srslist in [x.text for x in elem.findall('SRS')]:
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
#Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
# Comment by Jachym:
# Do not set it to None, but to [], which will make the code
# work further. Fixed by anthonybaxter
self.crsOptions=[]
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
self.defaulttimeposition = None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
if extent.text:
self.timepositions=extent.text.split(',')
self.defaulttimeposition = extent.attrib.get("default")
break
# Elevations - available vertical levels
self.elevations=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='elevation':
if extent.text:
self.elevations=extent.text.split(',')
break
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = openURL(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
# DataURLs
self.dataUrls = []
for m in elem.findall('DataURL'):
dataUrl = {
'format': m.find('Format').text.strip(),
'url': m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
}
self.dataUrls.append(dataUrl)
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
@property
def children(self):
return self._children
@children.setter
def children(self, value):
if self._children is None:
self._children = value
else:
self._children.extend(value)
# If layer is a group and one of its children is queryable, the layer must be queryable.
if self._children and self.queryable == 0:
for child in self._children:
if child.queryable:
self.queryable = child.queryable
break
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
self.methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url, timeout=30):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=self.username, password=self.password, timeout=timeout)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str) and not isinstance(st, bytes):
raise ValueError("String must be of type string or bytes, not %s" % type(st))
return etree.fromstring(st)
| bsd-3-clause | 1,327,718,469,035,729,000 | 37.001399 | 156 | 0.567186 | false | 4.322463 | true | false | false |
cfrantz/bubbles | bubbles/xmlimpl.py | 2 | 1784 | #####################################################
#
# copyright.txt
#
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Hewlett-Packard and the Hewlett-Packard logo are trademarks of
# Hewlett-Packard Development Company, L.P. in the U.S. and/or other countries.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Author:
# Chris Frantz
#
# Description:
# Import ElementTree. Pick lxml as the preferred version
#
#####################################################
#try:
# import lxml.etree as ET
#except ImportError:
# import xml.etree.ElementTree as ET
import lxml.etree as ET
class xmlstr:
'''
A class that defers ET.tostring until the instance is
evaluated in string context.
Use this to print xml etrees in debug statements:
xml = ET.parse(...)
log.debug("The xml was: %s", xmlstr(xml))
'''
def __init__(self, xml):
self.xml = xml
def __str__(self):
return ET.tostring(self.xml, pretty_print=True)
__all__ = [ 'ET', 'xmlstr']
# VIM options (place at end of file)
# vim: ts=4 sts=4 sw=4 expandtab:
| lgpl-2.1 | 452,225,795,640,824,770 | 30.857143 | 80 | 0.662556 | false | 3.895197 | false | false | false |
koss822/misc | Linux/MySettings/myvim/vim/bundle/jedi-vim/pythonx/parso/test/normalizer_issue_files/E26.py | 3 | 1509 | #: E261:4
pass # an inline comment
#: E261:4
pass# an inline comment
# Okay
pass # an inline comment
pass # an inline comment
#: E262:11
x = x + 1 #Increment x
#: E262:11
x = x + 1 # Increment x
#: E262:11
x = y + 1 #: Increment x
#: E265
#Block comment
a = 1
#: E265+1
m = 42
#! This is important
mx = 42 - 42
# Comment without anything is not an issue.
#
# However if there are comments at the end without anything it obviously
# doesn't make too much sense.
#: E262:9
foo = 1 #
#: E266+2:4 E266+5:4
def how_it_feel(r):
### This is a variable ###
a = 42
### Of course it is unused
return
#: E266 E266+1
##if DEBUG:
## logging.error()
#: E266
#########################################
# Not at the beginning of a file
#: E265
#!/usr/bin/env python
# Okay
pass # an inline comment
x = x + 1 # Increment x
y = y + 1 #: Increment x
# Block comment
a = 1
# Block comment1
# Block comment2
aaa = 1
# example of docstring (not parsed)
def oof():
"""
#foo not parsed
"""
###########################################################################
# A SEPARATOR #
###########################################################################
# ####################################################################### #
# ########################## another separator ########################## #
# ####################################################################### #
| gpl-3.0 | -5,574,635,903,315,972,000 | 18.346154 | 79 | 0.413519 | false | 3.899225 | false | false | false |
5nizza/party-elli | parsing/acacia_parser_desc.py | 1 | 3091 | import os
from helpers.python_ext import lmap
from parsing.acacia_lexer_desc import *
class Assumption:
def __init__(self, data):
self.data = data
class Guarantee:
def __init__(self, data):
self.data = data
precedence = (
('left','OR'),
('left','IMPLIES','EQUIV'),
('left','AND'),
('left', 'TEMPORAL_BINARY'),
('left', 'NEG'), # left - right should not matter..
('left', 'TEMPORAL_UNARY'), # left - right should not matter..
('nonassoc','EQUALS')
)
def p_start(p):
"""start : empty
| units """
if p[1] is not None:
p[0] = p[1]
else:
p[0] = []
def p_empty(p):
r"""empty :"""
pass
def p_units(p):
"""units : unit
| units unit"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
#TODO: figure out why conflict arises
#def p_unit_without_header(p):
# """unit : unit_data """
# p[0] = ('None', p[1])
def p_unit_with_header(p):
"""unit : unit_header unit_data """
p[0] = (p[1], p[2])
def p_unit_header(p):
"""unit_header : LBRACKET SPEC_UNIT NAME RBRACKET"""
p[0] = p[3]
def p_unit_data(p):
"""unit_data : empty
| expressions """
if p[1] is None:
p[0] = ([],[])
else:
assumptions = lmap(lambda e: e.data, filter(lambda e: isinstance(e, Assumption), p[1]))
guarantees = lmap(lambda e: e.data, filter(lambda e: isinstance(e, Guarantee), p[1]))
p[0] = (assumptions, guarantees)
def p_signal_name(p):
""" signal_name : NAME
"""
p[0] = Signal(p[1], )
def p_unit_data_expressions(p):
"""expressions : expression SEP
| expressions expression SEP"""
if len(p) == 3:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_unit_data_expression_assumption(p):
""" expression : ASSUME property
"""
p[0] = Assumption(p[2])
def p_unit_data_expression_guarantee(p):
""" expression : property """
p[0] = Guarantee(p[1])
def p_unit_data_property_bool(p):
"""property : BOOL"""
p[0] = p[1]
def p_unit_data_property_binary_operation(p):
"""property : signal_name EQUALS NUMBER
| property AND property
| property OR property
| property IMPLIES property
| property EQUIV property
| property TEMPORAL_BINARY property"""
assert p[2] in BIN_OPS
p[0] = BinOp(p[2], p[1], p[3])
def p_unit_data_property_unary(p):
"""property : TEMPORAL_UNARY property
| NEG property """
p[0] = UnaryOp(p[1], p[2])
def p_unit_data_property_grouping(p):
"""property : LPAREN property RPAREN"""
p[0] = p[2]
def p_error(p):
if p:
print("----> Syntax error at '%s'" % p.value)
print("lineno: %d" % p.lineno)
else:
print('----> Syntax error, t is None')
assert 0
from third_party.ply import yacc
acacia_parser = yacc.yacc(debug=0, outputdir=os.path.dirname(os.path.realpath(__file__)))
| mit | -961,444,308,778,483,700 | 20.921986 | 95 | 0.526043 | false | 3.060396 | false | false | false |
alfa-addon/addon | plugin.video.alfa/channels/doramedplay.py | 1 | 11294 | # -*- coding: utf-8 -*-
# -*- Channel DoramedPlay -*-
# -*- BASED ON: Channel DramasJC -*-
# -*- Created for Alfa-addon -*-
import requests
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://doramedplay.com/'
IDIOMAS = {'VOSE': 'VOSE', 'LAT':'LAT'}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['okru', 'mailru', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Doramas", action="list_all", url=host+'tvshows/',
type="tvshows", thumbnail=get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'movies/',
type='movies', thumbnail=get_thumb('movies', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Generos", action="section",
# url=host + 'catalogue', thumbnail=get_thumb('genres', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Por Años", action="section", url=host + 'catalogue',
# thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id="post-\d+".*?<img (?:data-)?src="([^"]+").*?<div class="rating">([^<]+)?<.*?'
patron += '<h3><a href="([^"]+)".*?>([^<]+)<.*?<span>.*?, (\d{4})<.*?<div class="texto">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedrating, scrapedurl, scrapedtitle, scrapedyear, scrapedplot in matches:
url = scrapedurl
year = scrapedyear
filtro_tmdb = list({"first_air_date": year}.items())
contentname = scrapedtitle
title = '%s (%s) [%s]'%(contentname, scrapedrating, year)
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
contentSerieName=contentname,
plot=scrapedplot,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year, 'filtro': filtro_tmdb}
)
if item.type == 'tvshows':
new_item.action = 'seasons'
else:
new_item.action = 'findvideos'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<span class=\"current\">.?<\/span>.*?<a href='([^']+)'")
if url_next_page:
itemlist.append(Item(channel=item.channel, type=item.type, title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<div id='seasons'>.*?>Temporada([^<]+)<i>([^<]+).*?<\/i>"
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info("hola mundo")
for temporada, fecha in matches:
title = 'Temporada %s (%s)' % (temporada.strip(), fecha)
contentSeasonNumber = temporada.strip()
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, ">Temporada %s .*?<ul class='episodios'>(.*?)<\/ul>" % season)
patron = "<a href='([^']+)'>([^<]+)<\/a>.*?<span[^>]+>([^<]+)<\/span>"
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle, fecha in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s (%s)' % (epi, epi, fecha)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
post_id = scrapertools.find_single_match(data, "'https:\/\/doramedplay\.com\/\?p=(\d+)'")
body = "action=doo_player_ajax&post=%s&nume=1&type=tv" % post_id
source_headers = dict()
source_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
source_headers["X-Requested-With"] = "XMLHttpRequest"
source_headers["Referer"] = host
source_result = httptools.downloadpage(host + "wp-admin/admin-ajax.php", post=body, headers=source_headers)
# logger.info(source_result.json)
if source_result.code == 200:
source_json = source_result.json
if source_json['embed_url']:
source_url = source_json['embed_url']
logger.info("source: " + source_url)
DIRECT_HOST = "v.pandrama.com"
if DIRECT_HOST in source_url:
# logger.info(source_url)
directo_result = httptools.downloadpage(source_url, headers={"Referer": item.url})
directo_result = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", directo_result.data)
metadata_url = scrapertools.find_single_match(directo_result, 'videoSources\":\[{\"file\":\"([^"]+)\"')
metadata_url = re.sub(r'\\', "", metadata_url)
metadata_url = re.sub(r'/1/', "/" + DIRECT_HOST + "/", metadata_url)
metadata_url += "?s=1&d="
# logger.info(metadata_url)
# logger.info('metadata_url: ' + re.sub(r'\\', "", metadata_url))
# get metadata_url
logger.info(source_url)
# metadata_headers = dict()
# metadata_headers["Referer"] = source_url
# metadata = httptools.downloadpage(metadata_url, headers=metadata_headers)
# metadata = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", metadata.data)
metadata = requests.get(metadata_url, headers={"Referer": source_url}).content
# metadata = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", metadata)
# Get URLs
patron = "RESOLUTION=(.*?)http([^#]+)"
video_matches = re.compile(patron, re.DOTALL).findall(metadata)
for video_resolution, video_url in video_matches:
final_url = "http" + video_url.strip()
url_video = final_url + "|referer="+ final_url
logger.info(final_url)
itemlist.append(Item(channel=item.channel, title='%s (' + video_resolution.strip() + ')', url=url_video, action='play'))
# https://1/cdn/hls/9be120188fe6b91e70db037b674c686d/master.txt
else:
itemlist.append(Item(channel=item.channel, title='%s', url=source_json['embed_url'], action='play'))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def list_search(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="result-item">.*?<div class="thumbnail.*?<a href="([^"]+)">'
patron += '<img src="([^"]+)".*?<span class="([^"]+)".*?<div class="title">'
patron += '<a href="[^"]+">([^<]+)<.*?<span class="year">([^<]+)<.*?<p>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtype, scrapedtitle, scrapedyear, scrapedplot in matches:
# logger.info(scrapedurl)
url = scrapedurl
year = scrapedyear
contentname = scrapedtitle
title = '%s (%s) (%s)'%(contentname, scrapedtype, year)
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
plot=scrapedplot,
type=scrapedtype,
action='list_all',
infoLabels={'year':year}
)
new_item.contentSerieName = contentname
if new_item.type == 'tvshows':
new_item.action = 'seasons'
else:
new_item.action = 'findvideos'
itemlist.append(new_item)
# tmdb.set_infoLabels_itemlist(itemlist, True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
return list_search(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
| gpl-3.0 | -3,013,731,974,807,321,600 | 37.52901 | 140 | 0.564975 | false | 3.752992 | false | false | false |
networks-lab/metaknowledge | metaknowledge/tests/test_citation.py | 2 | 2640 | #Written by Reid McIlroy-Young for Dr. John McLevey, University of Waterloo 2015
import unittest
import metaknowledge
class TestCitation(unittest.TestCase):
def setUp(self):
self.Cite = metaknowledge.Citation("John D., 2015, TOPICS IN COGNITIVE SCIENCE, V1, P1, DOI 0.1063/1.1695064")
def test_citation_author(self):
self.assertEqual(self.Cite.author, "John D")
def test_citation_year(self):
self.assertEqual(self.Cite.year, 2015)
def test_citation_journal(self):
self.assertEqual(self.Cite.journal, "TOPICS IN COGNITIVE SCIENCE")
def test_citation_v(self):
self.assertEqual(self.Cite.V, "V1")
def test_citation_p(self):
self.assertEqual(self.Cite.P, "P1")
def test_citation_DOI(self):
self.assertEqual(self.Cite.DOI, "0.1063/1.1695064")
def test_citation_id(self):
self.assertEqual(self.Cite.ID(), "John D, 2015, TOPICS IN COGNITIVE SCIENCE")
def test_citation_str(self):
self.assertEqual(str(self.Cite), "John D., 2015, TOPICS IN COGNITIVE SCIENCE, V1, P1, DOI 0.1063/1.1695064")
def test_citation_extra(self):
self.assertEqual(self.Cite.Extra(), "V1, P1, 0.1063/1.1695064")
def test_citation_badDetection(self):
self.assertTrue(metaknowledge.Citation("").bad)
def test_citation_equality(self):
c1 = metaknowledge.Citation("John D., 2015, TOPICS IN COGNITIVE SCIENCE, P1, DOI 0.1063/1.1695064")
c2 = metaknowledge.Citation("John D., 2015, TOPICS IN COGNITIVE SCIENCE, V1, P1")
c3 = metaknowledge.Citation("John D., 2015, TOPICS IN COGNITIVE SCIENCE, V1, P2")
self.assertTrue(c1 == self.Cite)
self.assertTrue(c2 == self.Cite)
self.assertFalse(c1 != c2)
self.assertFalse(c3 != c1)
def test_citation_hash(self):
self.assertTrue(bool(hash(self.Cite)))
self.assertTrue(bool(hash(metaknowledge.Citation("John D., 2015, TOPICS IN COGNITIVE SCIENCE, V1, P1"))))
self.assertTrue(bool(hash(metaknowledge.Citation("John D., 2015"))))
def test_citation_badLength(self):
c = metaknowledge.Citation("ab, c")
self.assertTrue(c.bad)
self.assertEqual(str(c.error), "Not a complete set of author, year and journal")
self.assertEqual(c.Extra(),'')
self.assertEqual(c.author,'Ab')
self.assertEqual(c.ID(),'Ab, C')
def test_citation_badNumbers(self):
c = metaknowledge.Citation("1, 2, 3, 4")
self.assertTrue(c.bad)
self.assertEqual(c.ID(), '1, 2')
self.assertEqual(str(c.error), "The citation did not fully match the expected pattern")
| gpl-2.0 | -6,042,287,925,163,941,000 | 39.615385 | 118 | 0.65947 | false | 3.091335 | true | false | false |
PrashntS/numptyphysics | tools/update_license_header.py | 2 | 2137 | import subprocess
import sys
import re
import collections
LICENSE_BLOCK_RE = r'/[*].*?This program is free software.*?\*/'
HEADER_TEMPLATE = """/*
* This file is part of NumptyPhysics <http://thp.io/2015/numptyphysics/>
* <<COPYRIGHT>>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/"""
def update(filename):
d = subprocess.check_output(['git', 'log', '--format=%an <%ae>|%ad', '--follow', filename])
dd = collections.defaultdict(list)
for author, date in map(lambda x: x.split('|'), d.decode('utf-8').splitlines()):
dd[author].append(date)
def combine(s, date):
# "Sat Feb 18 12:58:00 2012 -0800"
s.add(date.split()[4])
return s
for author in dd:
dd[author] = sorted(reduce(combine, dd[author], set()))
def year_sort(item):
_, years = item
return tuple(map(int, years))
def inject():
for line in HEADER_TEMPLATE.splitlines():
line = line.rstrip('\n')
if '<<COPYRIGHT>>' in line:
for author, years in sorted(dd.items(), key=year_sort):
copyright = 'Coyright (c) {years} {author}'.format(years=', '.join(years), author=author)
yield line.replace('<<COPYRIGHT>>', copyright)
continue
yield line
license = '\n'.join(inject())
d = open(filename).read()
if re.search(LICENSE_BLOCK_RE, d, re.DOTALL) is None:
open(filename, 'w').write(license + '\n\n' + d)
else:
d = re.sub(LICENSE_BLOCK_RE, license, d, 0, re.DOTALL)
open(filename, 'w').write(d)
for filename in sys.argv[1:]:
print 'Updating:', filename
update(filename)
| gpl-3.0 | -2,561,553,201,379,447,300 | 32.920635 | 109 | 0.61722 | false | 3.736014 | false | false | false |
kobox/achilles.pl | src/packaging/views.py | 1 | 2590 | from django.views.generic import DetailView, ListView, TemplateView
from django.views.generic.detail import SingleObjectMixin
from .models import Category, Product, ProductImage
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from fm.views import AjaxCreateView
from .forms import SignUpForm
class CategoryDetail(SingleObjectMixin, ListView):
paginate_by = 2
template_name = "category_detail.html"
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Category.objects.all())
return super(CategoryDetail, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(CategoryDetail, self).get_context_data(**kwargs)
context['category'] = self.object
context['product_list'] = Product.objects.filter(category=self.object)
return context
class ProductDetailView(DetailView):
template_name = "product_detail.html"
model = Product
#context_object_name = 'product'
#def get(self, request, *args, **kwargs):
# self.object = self.get_object(queryset=Product.objects.all())
# return super(ProductDetailView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProductDetailView, self).get_context_data(**kwargs)
context['product_images'] = ProductImage.objects.filter(product=self.get_object())
return context
class TestView(AjaxCreateView):
form_class = SignUpForm
def add_quote(request):
# Get the context from the request.
context = RequestContext(request)
# A HTTP POST?
if request.method == 'POST':
form = SignUpForm(request.POST or None)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
#post_save.connect(send_update, sender=Book)
# Now call the index() view.
# The user will be shown the homepage.
return redirect('/thanks/')
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = SignUpForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render_to_response('add_signup.html', {'form': form}, context)
class ThanksPage(TemplateView):
template_name = "thanks.html" | mit | -1,861,989,263,490,719,200 | 34.986111 | 90 | 0.673745 | false | 4.034268 | false | false | false |
wendlers/pyscmpd | setup.py | 1 | 1673 | #!/usr/bin/env python
##
# This file is part of the carambot-usherpa project.
#
# Copyright (C) 2012 Stefan Wendler <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
'''
This file is part of the pyscmpd project. To install pyscmpd:
sudo python setup.py install
'''
import os
from distutils.core import setup
from distutils.sysconfig import get_python_lib
setup(name='pyscmpd',
version='0.1',
description='sound-cloud music server daemon',
long_description='Python based sound-cloud music server talking MPD protocol',
author='Stefan Wendler',
author_email='[email protected]',
url='http://www.kaltpost.de/',
license='GPL 3.0',
platforms=['Linux'],
packages = ['pyscmpd', 'mpdserver'],
package_dir = {'pyscmpd' : 'src/pyscmpd', 'mpdserver' : 'extlib/python-mpd-server/mpdserver' },
requires = ['soundcloud(>=0.3.1)']
)
# Symlink starter
linkSrc = "%s/pyscmpd/pyscmpdctrl.py" % get_python_lib(False, False, '/usr/local')
linkDst = "/usr/local/bin/pyscmpdctrl"
if not os.path.lexists(linkDst):
os.symlink(linkSrc, linkDst)
os.chmod(linkSrc, 0755)
| gpl-3.0 | -6,542,002,191,140,657,000 | 29.418182 | 96 | 0.73162 | false | 3.115456 | false | false | false |
DavidAndreev/indico | indico/MaKaC/services/implementation/abstracts.py | 2 | 3384 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from MaKaC.services.implementation.base import ParameterManager
from MaKaC.services.implementation.conference import ConferenceModifBase
import MaKaC.user as user
from MaKaC.common.fossilize import fossilize
from MaKaC.user import AvatarHolder
from MaKaC.services.interface.rpc.common import ServiceError, NoReportError
class ChangeAbstractSubmitter(ConferenceModifBase):
def _checkParams(self):
ConferenceModifBase._checkParams(self)
pm = ParameterManager(self._params)
submitterId = pm.extract("submitterId", pType=str, allowEmpty=False)
abstractId = pm.extract("abstractId", pType=str, allowEmpty=False)
self._abstract = self._conf.getAbstractMgr().getAbstractById(abstractId)
self._submitter = user.AvatarHolder().getById(submitterId)
if self._submitter is None:
raise NoReportError(_("The user that you are changing does not exist anymore in the database"))
def _getAnswer(self):
self._abstract.setSubmitter(self._submitter)
return {"name": self._submitter.getFullName(),
"affiliation": self._submitter.getAffiliation(),
"email": self._submitter.getEmail()}
class AddLateSubmissionAuthUser(ConferenceModifBase):
def _checkParams(self):
ConferenceModifBase._checkParams(self)
pm = ParameterManager(self._params)
self._userList = pm.extract("userList", pType=list, allowEmpty=False)
def _getAnswer(self):
ah = AvatarHolder()
for user in self._userList:
if user["id"] != None:
self._conf.getAbstractMgr().addAuthorizedSubmitter(ah.getById(user["id"]))
else:
raise ServiceError("ERR-U0", _("User does not exist."))
return fossilize(self._conf.getAbstractMgr().getAuthorizedSubmitterList())
class RemoveLateSubmissionAuthUser(ConferenceModifBase):
def _checkParams(self):
ConferenceModifBase._checkParams(self)
pm = ParameterManager(self._params)
ah = AvatarHolder()
userId = pm.extract("userId", pType=str, allowEmpty=False)
self._user = ah.getById(userId)
if self._user == None:
raise ServiceError("ERR-U0", _("User '%s' does not exist.") % userId)
def _getAnswer(self):
self._conf.getAbstractMgr().removeAuthorizedSubmitter(self._user)
return fossilize(self._conf.getAbstractMgr().getAuthorizedSubmitterList())
methodMap = {
"changeSubmitter": ChangeAbstractSubmitter,
"lateSubmission.addExistingLateAuthUser": AddLateSubmissionAuthUser,
"lateSubmission.removeLateAuthUser": RemoveLateSubmissionAuthUser
}
| gpl-3.0 | 3,373,898,325,747,520,000 | 40.268293 | 107 | 0.710106 | false | 3.880734 | false | false | false |
lu-ci/apex-sigma-plugins | utilities/nihongo/wanikani/wanikani.py | 3 | 5298 | import json
import aiohttp
import arrow
import discord
async def wanikani(cmd, message, args):
if message.mentions:
target = message.mentions[0]
else:
target = message.author
api_document = cmd.db[cmd.db.db_cfg.database]['WaniKani'].find_one({'UserID': target.id})
if api_document:
try:
api_key = api_document['WKAPIKey']
url = f'https://www.wanikani.com/api/user/{api_key}'
async with aiohttp.ClientSession() as session:
async with session.get(url + '/srs-distribution') as data:
srs = await data.read()
srs = json.loads(srs)
username = srs['user_information']['username']
sect = srs['user_information']['title']
level = srs['user_information']['level']
avatar = srs['user_information']['gravatar']
creation_date = srs['user_information']['creation_date']
apprentice = srs['requested_information']['apprentice']['total']
guru = srs['requested_information']['guru']['total']
master = srs['requested_information']['master']['total']
enlighten = srs['requested_information']['enlighten']['total']
burned = srs['requested_information']['burned']['total']
async with aiohttp.ClientSession() as session:
async with session.get(url + '/study-queue') as data:
study = await data.read()
study = json.loads(study)
lessons_available = study['requested_information']['lessons_available']
reviews_available = study['requested_information']['reviews_available']
next_review = study['requested_information']['next_review_date']
reviews_available_next_hour = study['requested_information']['reviews_available_next_hour']
reviews_available_next_day = study['requested_information']['reviews_available_next_day']
async with aiohttp.ClientSession() as session:
async with session.get(url + '/level-progression') as data:
progression = await data.read()
progression = json.loads(progression)
radicals_progress = progression['requested_information']['radicals_progress']
radicals_total = progression['requested_information']['radicals_total']
kanji_progress = progression['requested_information']['kanji_progress']
kanji_total = progression['requested_information']['kanji_total']
level = '**Level {}** Apprentice'.format(level)
avatar = f'https://www.gravatar.com/avatar/{avatar}.jpg?s=300&d='
avatar += 'https://cdn.wanikani.com/default-avatar-300x300-20121121.png'
creation_date = arrow.get(creation_date).format('MMMM DD, YYYY')
radicals = 'Radicals: **{}**/**{}**'.format(radicals_progress, radicals_total)
kanji = 'Kanji: **{}**/**{}**'.format(kanji_progress, kanji_total)
embed = discord.Embed(color=target.color)
level_progression = level + '\n'
level_progression += radicals + '\n'
level_progression += kanji
embed.add_field(name='Level progression', value=level_progression)
srs_distibution = 'Apprentice: **{}**\n'.format(apprentice)
srs_distibution += 'Guru: **{}**\n'.format(guru)
srs_distibution += 'Master: **{}**\n'.format(master)
srs_distibution += 'Enlighten: **{}**\n'.format(enlighten)
srs_distibution += 'Burned: **{}**'.format(burned)
embed.add_field(name='SRS distribution', value=srs_distibution)
study_queue = 'Lessons available: **{}**\n'.format(lessons_available)
study_queue += 'Reviews available: **{}**\n'.format(reviews_available)
if lessons_available or reviews_available:
next_review = 'now'
else:
next_review = arrow.get(next_review).humanize()
study_queue += 'Next review date: **{}**\n'.format(next_review)
study_queue += 'Reviews in next hour: **{}**\n'.format(reviews_available_next_hour)
study_queue += 'Reviews in next day: **{}**'.format(reviews_available_next_day)
embed.add_field(name='Study queue', value=study_queue)
userinfo = '**{}** of **Sect {}**\n'.format(username, sect)
userinfo += '**Level {}** Apprentice\n'.format(level)
userinfo += 'Serving the Crabigator since {}'.format(creation_date)
embed.set_author(name='{} of Sect {}'.format(username, sect),
url='https://www.wanikani.com/community/people/{}'.format(username), icon_url=avatar)
embed.set_footer(text='Serving the Crabigator since {}'.format(creation_date))
except KeyError:
embed = discord.Embed(color=0xBE1931, title='❗ Invalid data was retrieved.')
else:
embed = discord.Embed(color=0xBE1931, title='❗ User has no Key saved.')
await message.channel.send(None, embed=embed)
| gpl-3.0 | 8,162,439,197,226,712,000 | 51.92 | 114 | 0.573318 | false | 3.975958 | false | false | false |
dmayer/time_trial | time_trial_gui/gui/trial_detail_widget.py | 1 | 4254 | __author__ = 'daniel'
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
class TrialDetailsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(TrialDetailsWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Trial Settings")
self.layout.addWidget(self.box)
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.box_layout)
self.type = QtGui.QLabel("")
self.box_layout.addRow("<b>Type</b>", self.type)
self.name = QtGui.QLabel("")
self.box_layout.addRow("<b>Name</b>", self.name)
self.description = QtGui.QLabel("")
self.box_layout.addRow("<b>Description</b>", self.description)
class EchoTrialDetailsWidget(TrialDetailsWidget):
def __init__(self, parent=None):
super(EchoTrialDetailsWidget, self).__init__(parent)
self.delay = QtGui.QLabel("")
self.box_layout.addRow("<b>Delay (ns)</b>", self.delay)
class HttpTrialDetailsWidget(TrialDetailsWidget):
def __init__(self, parent=None):
super(HttpTrialDetailsWidget, self).__init__(parent)
self.request_url = QtGui.QLabel("")
self.box_layout.addRow("<b>Request URL</b>", self.request_url)
class RacerDetailsWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(RacerDetailsWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Racer Settings")
self.layout.addWidget(self.box)
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.box_layout)
self.racer = QtGui.QLabel("")
self.box_layout.addRow("<b>Racer</b>", self.racer)
self.core_id = QtGui.QLabel("")
self.box_layout.addRow("<b>Core ID</b>", self.core_id)
self.real_time = QtGui.QLabel("")
self.box_layout.addRow("<b>Real-Time</b>", self.real_time)
class TrialStatusWidget(QtGui.QWidget):
trial_started = pyqtSignal()
trial_stopped = pyqtSignal()
trial_refreshed = pyqtSignal()
trial_edit = pyqtSignal()
def __init__(self, parent=None):
super(TrialStatusWidget, self).__init__(parent)
self.layout = QtGui.QVBoxLayout()
self.setLayout(self.layout)
self.box = QtGui.QGroupBox("Trial Status")
self.super_box_layout = QtGui.QGridLayout()
self.box_layout = QtGui.QFormLayout()
self.box_layout.setFormAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.box.setLayout(self.super_box_layout)
self.super_box_layout.addLayout(self.box_layout,0,0,1,2)
self.layout.addWidget(self.box)
self.start = QtGui.QLabel("")
self.box_layout.addRow("<b>Start</b>", self.start)
self.end = QtGui.QLabel("")
self.box_layout.addRow("<b>End</b>", self.end)
self.job_status = QtGui.QLabel("")
self.box_layout.addRow("<b>Job Status</b>", self.job_status)
self.start_trial_button = QtGui.QPushButton("Start")
self.start_trial_button.setEnabled(False)
self.start_trial_button.released.connect(self.trial_started.emit)
self.super_box_layout.addWidget(self.start_trial_button,1,0)
self.stop_trial_button = QtGui.QPushButton("Cancel and Reset")
self.stop_trial_button.setEnabled(False)
self.stop_trial_button.released.connect(self.trial_stopped.emit)
self.super_box_layout.addWidget(self.stop_trial_button,1,1)
self.refresh_trial_button = QtGui.QPushButton("Refresh")
self.refresh_trial_button.setEnabled(False)
self.refresh_trial_button.released.connect(self.trial_refreshed.emit)
self.layout.addWidget(self.refresh_trial_button)
self.edit_trial_button = QtGui.QPushButton("Edit")
self.edit_trial_button.setEnabled(False)
self.edit_trial_button.released.connect(self.trial_edit.emit)
self.layout.addWidget(self.edit_trial_button)
| mit | 7,482,381,196,374,320,000 | 30.279412 | 82 | 0.652562 | false | 3.507007 | false | false | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_shared/challenge_auth_policy.py | 4 | 5654 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Policy implementing Key Vault's challenge authentication protocol.
Normally the protocol is only used for the client's first service request, upon which:
1. The challenge authentication policy sends a copy of the request, without authorization or content.
2. Key Vault responds 401 with a header (the 'challenge') detailing how the client should authenticate such a request.
3. The policy authenticates according to the challenge and sends the original request with authorization.
The policy caches the challenge and thus knows how to authenticate future requests. However, authentication
requirements can change. For example, a vault may move to a new tenant. In such a case the policy will attempt the
protocol again.
"""
import copy
import time
from azure.core.exceptions import ServiceRequestError
from azure.core.pipeline import PipelineContext, PipelineRequest
from azure.core.pipeline.policies import HTTPPolicy
from azure.core.pipeline.transport import HttpRequest
from .http_challenge import HttpChallenge
from . import http_challenge_cache as ChallengeCache
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import AccessToken, TokenCredential
from azure.core.pipeline import PipelineResponse
def _enforce_tls(request):
# type: (PipelineRequest) -> None
if not request.http_request.url.lower().startswith("https"):
raise ServiceRequestError(
"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs."
)
def _get_challenge_request(request):
# type: (PipelineRequest) -> PipelineRequest
# The challenge request is intended to provoke an authentication challenge from Key Vault, to learn how the
# service request should be authenticated. It should be identical to the service request but with no body.
challenge_request = HttpRequest(
request.http_request.method, request.http_request.url, headers=request.http_request.headers
)
challenge_request.headers["Content-Length"] = "0"
options = copy.deepcopy(request.context.options)
context = PipelineContext(request.context.transport, **options)
return PipelineRequest(http_request=challenge_request, context=context)
def _update_challenge(request, challenger):
# type: (PipelineRequest, PipelineResponse) -> HttpChallenge
"""parse challenge from challenger, cache it, return it"""
challenge = HttpChallenge(
request.http_request.url,
challenger.http_response.headers.get("WWW-Authenticate"),
response_headers=challenger.http_response.headers,
)
ChallengeCache.set_challenge_for_url(request.http_request.url, challenge)
return challenge
class ChallengeAuthPolicyBase(object):
"""Sans I/O base for challenge authentication policies"""
def __init__(self, **kwargs):
self._token = None # type: Optional[AccessToken]
super(ChallengeAuthPolicyBase, self).__init__(**kwargs)
@property
def _need_new_token(self):
# type: () -> bool
return not self._token or self._token.expires_on - time.time() < 300
class ChallengeAuthPolicy(ChallengeAuthPolicyBase, HTTPPolicy):
"""policy for handling HTTP authentication challenges"""
def __init__(self, credential, **kwargs):
# type: (TokenCredential, **Any) -> None
self._credential = credential
super(ChallengeAuthPolicy, self).__init__(**kwargs)
def send(self, request):
# type: (PipelineRequest) -> PipelineResponse
_enforce_tls(request)
challenge = ChallengeCache.get_challenge_for_url(request.http_request.url)
if not challenge:
challenge_request = _get_challenge_request(request)
challenger = self.next.send(challenge_request)
try:
challenge = _update_challenge(request, challenger)
except ValueError:
# didn't receive the expected challenge -> nothing more this policy can do
return challenger
self._handle_challenge(request, challenge)
response = self.next.send(request)
if response.http_response.status_code == 401:
# any cached token must be invalid
self._token = None
# cached challenge could be outdated; maybe this response has a new one?
try:
challenge = _update_challenge(request, response)
except ValueError:
# 401 with no legible challenge -> nothing more this policy can do
return response
self._handle_challenge(request, challenge)
response = self.next.send(request)
return response
def _handle_challenge(self, request, challenge):
# type: (PipelineRequest, HttpChallenge) -> None
"""authenticate according to challenge, add Authorization header to request"""
if self._need_new_token:
# azure-identity credentials require an AADv2 scope but the challenge may specify an AADv1 resource
scope = challenge.get_scope() or challenge.get_resource() + "/.default"
self._token = self._credential.get_token(scope)
# ignore mypy's warning because although self._token is Optional, get_token raises when it fails to get a token
request.http_request.headers["Authorization"] = "Bearer {}".format(self._token.token) # type: ignore
| mit | -2,599,558,481,152,889,000 | 39.385714 | 119 | 0.6919 | false | 4.526821 | false | false | false |
3drepo/3drepo.io | scripts/dbMaintenance/repairFileShare.py | 1 | 4718 | import sys, os
from pymongo import MongoClient
import gridfs
import re
isPython3 = bool(sys.version_info >= (3, 0))
if isPython3:
from io import BytesIO
else:
from StringIO import StringIO
if len(sys.argv) <= 5:
print("Not enough arguments.")
print("removeOrphanFiles.py <mongoURL> <mongoPort> <userName> <password> <localFolder>")
sys.exit(0)
mongoURL = sys.argv[1]
mongoPort = sys.argv[2]
userName = sys.argv[3]
password = sys.argv[4]
localFolder = sys.argv[5]
if not os.path.exists(localFolder):
print("LocalFolder " + localFolder + " does not exist.")
sys.exit(0)
connString = "mongodb://"+ userName + ":" + password +"@"+mongoURL + ":" + mongoPort + "/"
##### Enable dry run to not commit to the database #####
dryRun = True
verbose = True
ignoreDirs = ["toy_2019-05-31"]
##### Retrieve file list from local folder #####
fileList = {}
missing = []
ignoreDirs = [os.path.normpath(os.path.join(localFolder, x)) for x in ignoreDirs]
for (dirPath, dirNames, fileNames) in os.walk(localFolder):
for fileName in fileNames:
if not dirPath in ignoreDirs:
entry = os.path.normpath(os.path.join(dirPath, fileName))
fileList[entry] = False
##### Connect to the Database #####
db = MongoClient(connString)
for database in db.database_names():
if database != "admin" and database != "local" and database != "notifications":
db = MongoClient(connString)[database]
if verbose:
print("--database:" + database)
##### Get a model ID and find entries #####
regex = re.compile(".+\.ref$");
for colName in db.collection_names():
result = regex.match(colName);
if result:
if verbose:
print("\t--collection:" + colName);
for refEntry in db[colName].find({"type": "fs"}):
filePath = os.path.normpath(os.path.join(localFolder, refEntry['link']))
inIgnoreDir= bool([ x for x in ignoreDirs if filePath.find(x) + 1 ])
if not inIgnoreDir:
fileStatus = fileList.get(filePath)
if fileStatus == None:
refInfo = database + "." + colName + ": " + refEntry["_id"]
if dryRun:
missing.append(refInfo);
else:
##### Upload missing files to FS and insert BSON #####
parentCol = colName[:-4];
fs = gridfs.GridFS(db, parentCol)
if ".stash.json_mpc" in parentCol or "stash.unity3d" in parentCol:
modelId = parentCol.split(".")[0];
if len(refEntry["_id"].split("/")) > 1:
toRepair = "/" + database + "/" + modelId + "/revision/" + refEntry["_id"]
else:
toRepair = "/" + database + "/" + modelId + "/" + refEntry["_id"]
else:
toRepair = refEntry["_id"]
gridFSEntry = fs.find_one({"filename": toRepair})
if gridFSEntry != None:
if not os.path.exists(os.path.dirname(filePath)):
os.makedirs(os.path.dirname(filePath))
file = open(filePath,'wb')
if isPython3:
file.write(BytesIO(gridFSEntry.read()).getvalue())
else:
file.write(StringIO(gridFSEntry.read()).getvalue())
file.close()
missing.append(refInfo + " (Restored to: " + filePath + ")");
else:
missing.append(refInfo + ": No backup found. Reference removed.");
db[colName].remove({"_id": refEntry["_id"]});
else:
fileList[filePath] = True
print("===== Missing Files =====");
for entry in missing:
print("\t"+ entry);
print("=========================");
print("===== Orphaned Files =====");
for filePath in fileList:
if not fileList[filePath]:
if dryRun:
print("\t"+ filePath);
else:
##### Delete orphan files #####
os.remove(filePath)
print("\t\t--Removed: " + filePath)
print("==========================");
| agpl-3.0 | 4,102,785,895,408,671,000 | 40.752212 | 114 | 0.474989 | false | 4.442561 | false | false | false |
Jofemago/Computacion-Grafica | SPACE INVADERS/Player.py | 1 | 1903 | '''
Se crea una clase jugador la cual tendra un movimiento con los teclas
y dispara
'''
import pygame
from configuraciones import *
TIEMPODESTRUCION = 8
class Vida(pygame.sprite.Sprite):
def __init__(self, img):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(img).convert()
self.rect = self.image.get_rect()
def setPos(self, x, y):
self.rect.x = x
self.rect.y = y
class Player(pygame.sprite.Sprite):
def __init__(self,img, imgdestru, vidas = 3, col = AZUL):
pygame.sprite.Sprite.__init__(self)
self.img = pygame.image.load(img).convert_alpha()
self.image = pygame.image.load(img).convert_alpha()
self.imagedestruida = pygame.image.load(imgdestru).convert_alpha()
self.rect = self.image.get_rect()
self.setPos(400,ALTO - 80)
self.disparo = pygame.mixer.Sound('Sonidos/disparojg.ogg')
#variables de movimiento
self.var_x = 0
self.vidas = vidas
#tiempo que durara destruida
self.destrucion = False
self.tiempodes = TIEMPODESTRUCION
def disparar(self):
self.disparo.play()
def setPos(self, x, y):
self.rect.x = x
self.rect.y = y
def setX(self, x):
self.rect.x = x
def setX(self, y):
self.rect.x = y
def movX(self):
if self.rect.x >= ANCHO -50 and self.var_x >= 0:
self.var_x = 0
if self.rect.x <= 0 and self.var_x <= 0:
self.var_x = 0
self.rect.x += self.var_x
def update(self):
if self.destrucion:
self.image = self.imagedestruida
if self.tiempodes <= 0:
self.image = self.img
self.destrucion = False
self.tiempodes = TIEMPODESTRUCION
self.tiempodes -= 1
else:
self.movX()
| mit | 927,243,830,971,040,500 | 21.388235 | 74 | 0.565423 | false | 3.16113 | false | false | false |
gitcoinco/web | app/quadraticlands/admin.py | 1 | 1491 | from django.contrib import admin
from quadraticlands.models import (
GTCSteward, InitialTokenDistribution, MissionStatus, QLVote, QuadLandsFAQ, SchwagCoupon,
)
class InitialTokenDistributionAdmin(admin.ModelAdmin):
raw_id_fields = ['profile']
search_fields = ['profile__handle']
list_display = ['id', 'profile', 'claim_total']
class MissionStatusAdmin(admin.ModelAdmin):
search_fields = ['profile__handle']
list_display = ['id', 'profile', 'proof_of_use', 'proof_of_receive', 'proof_of_knowledge']
raw_id_fields = ['profile']
class GTCStewardAdmin(admin.ModelAdmin):
raw_id_fields = ['profile']
search_fields = ['profile__handle']
list_display = ['id', 'profile', 'real_name', 'profile_link']
class QuadLandsFAQAdmin(admin.ModelAdmin):
list_display = ['id', 'position', 'question']
class QLVoteAdmin(admin.ModelAdmin):
raw_id_fields = ['profile']
list_display = ['id', 'profile']
class SchwagCouponAdmin(admin.ModelAdmin):
raw_id_fields = ['profile']
search_fields = ['profile__handle', 'coupon_code', 'discount_type']
list_display = ['id', 'discount_type', 'coupon_code', 'profile']
admin.site.register(InitialTokenDistribution, InitialTokenDistributionAdmin)
admin.site.register(MissionStatus, MissionStatusAdmin)
admin.site.register(QuadLandsFAQ, QuadLandsFAQAdmin)
admin.site.register(GTCSteward, GTCStewardAdmin)
admin.site.register(QLVote, QLVoteAdmin)
admin.site.register(SchwagCoupon, SchwagCouponAdmin)
| agpl-3.0 | 2,581,283,953,521,136,600 | 31.413043 | 94 | 0.724346 | false | 3.269737 | false | false | false |
ethertricity/bluesky | plugins/ilsgate.py | 2 | 3467 | """ BlueSky plugin template. The text you put here will be visible
in BlueSky as the description of your plugin. """
import numpy as np
# Import the global bluesky objects. Uncomment the ones you need
from bluesky import stack #, settings, navdb, traf, sim, scr, tools
from bluesky import navdb
from bluesky.tools.aero import ft
from bluesky.tools import geo, areafilter
### Initialization function of your plugin. Do not change the name of this
### function, as it is the way BlueSky recognises this file as a plugin.
def init_plugin():
# Addtional initilisation code
# Configuration parameters
config = {
# The name of your plugin
'plugin_name': 'ILSGATE',
# The type of this plugin. For now, only simulation plugins are possible.
'plugin_type': 'sim',
# Update interval in seconds. By default, your plugin's update function(s)
# are called every timestep of the simulation. If your plugin needs less
# frequent updates provide an update interval.
'update_interval': 0.0,
# The update function is called after traffic is updated. Use this if you
# want to do things as a result of what happens in traffic. If you need to
# something before traffic is updated please use preupdate.
'update': update,
# If your plugin has a state, you will probably need a reset function to
# clear the state in between simulations.
'reset': reset
}
stackfunctions = {
# The command name for your function
'ILSGATE': [
# A short usage string. This will be printed if you type HELP <name> in the BlueSky console
'ILSGATE Airport/runway',
# A list of the argument types your function accepts. For a description of this, see ...
'txt',
# The name of your function in this plugin
ilsgate,
# a longer help text of your function.
'Define an ILS approach area for a given runway.']
}
# init_plugin() should always return these two dicts.
return config, stackfunctions
### Periodic update functions that are called by the simulation. You can replace
### this by anything, so long as you communicate this in init_plugin
def update():
pass
def reset():
pass
### Other functions of your plugin
def ilsgate(rwyname):
if '/' not in rwyname:
return False, 'Argument is not a runway ' + rwyname
apt, rwy = rwyname.split('/RW')
rwy = rwy.lstrip('Y')
apt_thresholds = navdb.rwythresholds.get(apt)
if not apt_thresholds:
return False, 'Argument is not a runway (airport not found) ' + apt
rwy_threshold = apt_thresholds.get(rwy)
if not rwy_threshold:
return False, 'Argument is not a runway (runway not found) ' + rwy
# Extract runway threshold lat/lon, and runway heading
lat, lon, hdg = rwy_threshold
# The ILS gate is defined as a triangular area pointed away from the runway
# First calculate the two far corners in cartesian coordinates
cone_length = 50 # nautical miles
cone_angle = 20.0 # degrees
lat1, lon1 = geo.qdrpos(lat, lon, hdg - 180.0 + cone_angle, cone_length)
lat2, lon2 = geo.qdrpos(lat, lon, hdg - 180.0 - cone_angle, cone_length)
coordinates = np.array([lat, lon, lat1, lon1, lat2, lon2])
areafilter.defineArea('ILS' + rwyname, 'POLYALT', coordinates, top=4000*ft)
| gpl-3.0 | 5,524,521,760,201,754,000 | 37.522222 | 103 | 0.659937 | false | 3.899888 | false | false | false |
nirik/python-fedora | fedora/tg/visit/jsonfasvisit1.py | 4 | 4242 | # -*- coding: utf-8 -*-
#
# Copyright © 2007-2008 Red Hat, Inc. All rights reserved.
#
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
# Adapted from code in the TurboGears project licensed under the MIT license.
'''
This plugin provides integration with the Fedora Account System using JSON
calls to the account system server.
.. moduleauthor:: Toshio Kuratomi <[email protected]>
'''
from turbogears import config
from turbogears.visit.api import Visit, BaseVisitManager
from fedora.client import FasProxyClient
from fedora import _, __version__
import logging
log = logging.getLogger("turbogears.identity.savisit")
class JsonFasVisitManager(BaseVisitManager):
'''
This proxies visit requests to the Account System Server running remotely.
'''
fas_url = config.get('fas.url', 'https://admin.fedoraproject.org/accounts')
fas = None
def __init__(self, timeout):
self.debug = config.get('jsonfas.debug', False)
if not self.fas:
self.fas = FasProxyClient(
self.fas_url, debug=self.debug,
session_name=config.get('visit.cookie.name', 'tg-visit'),
useragent='JsonFasVisitManager/%s' % __version__)
BaseVisitManager.__init__(self, timeout)
log.debug('JsonFasVisitManager.__init__: exit')
def create_model(self):
'''
Create the Visit table if it doesn't already exist.
Not needed as the visit tables reside remotely in the FAS2 database.
'''
pass
def new_visit_with_key(self, visit_key):
'''
Return a new Visit object with the given key.
'''
log.debug('JsonFasVisitManager.new_visit_with_key: enter')
# Hit any URL in fas2 with the visit_key set. That will call the
# new_visit method in fas2
# We only need to get the session cookie from this request
request_data = self.fas.refresh_session(visit_key)
session_id = request_data[0]
log.debug('JsonFasVisitManager.new_visit_with_key: exit')
return Visit(session_id, True)
def visit_for_key(self, visit_key):
'''
Return the visit for this key or None if the visit doesn't exist or has
expired.
'''
log.debug('JsonFasVisitManager.visit_for_key: enter')
# Hit any URL in fas2 with the visit_key set. That will call the
# new_visit method in fas2
# We only need to get the session cookie from this request
request_data = self.fas.refresh_session(visit_key)
session_id = request_data[0]
# Knowing what happens in turbogears/visit/api.py when this is called,
# we can shortcircuit this step and avoid a round trip to the FAS
# server.
# if visit_key != session_id:
# # visit has expired
# return None
# # Hitting FAS has already updated the visit.
# return Visit(visit_key, False)
log.debug('JsonFasVisitManager.visit_for_key: exit')
if visit_key != session_id:
return Visit(session_id, True)
else:
return Visit(visit_key, False)
def update_queued_visits(self, queue):
'''Update the visit information on the server'''
log.debug('JsonFasVisitManager.update_queued_visits: enter')
# Hit any URL in fas with each visit_key to update the sessions
for visit_key in queue:
log.info(_('updating visit (%s)'), visit_key)
self.fas.refresh_session(visit_key)
log.debug('JsonFasVisitManager.update_queued_visits: exit')
| gpl-2.0 | 6,948,084,201,747,106,000 | 37.207207 | 79 | 0.660457 | false | 3.789991 | false | false | false |
Vagab0nd/SiCKRAGE | lib3/twilio/rest/preview/understand/assistant/task/sample.py | 2 | 18621 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SampleList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the SampleList
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleList
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleList
"""
super(SampleList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution)
def stream(self, language=values.unset, limit=None, page_size=None):
"""
Streams SampleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(language=language, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, ))
def page(self, language=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SampleInstance records from the API.
Request is executed immediately
:param unicode language: An ISO language-country string of the sample.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
data = values.of({
'Language': language,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SamplePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SampleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SamplePage(self._version, response, self._solution)
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The created SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def get(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SampleList>'
class SamplePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the SamplePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SamplePage
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
super(SamplePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SampleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SamplePage>'
class SampleContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid, sid):
"""
Initialize the SampleContext
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
super(SampleContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleContext {}>'.format(context)
class SampleInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, assistant_sid, task_sid, sid=None):
"""
Initialize the SampleInstance
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
super(SampleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'task_sid': payload.get('task_sid'),
'language': payload.get('language'),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'tagged_text': payload.get('tagged_text'),
'url': payload.get('url'),
'source_channel': payload.get('source_channel'),
}
# Context
self._context = None
self._solution = {
'assistant_sid': assistant_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SampleContext for this SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
if self._context is None:
self._context = SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account that created this Sample.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date that this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def task_sid(self):
"""
:returns: The unique ID of the Task associated with this Sample.
:rtype: unicode
"""
return self._properties['task_sid']
@property
def language(self):
"""
:returns: An ISO language-country string of the sample.
:rtype: unicode
"""
return self._properties['language']
@property
def assistant_sid(self):
"""
:returns: The unique ID of the Assistant.
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def tagged_text(self):
"""
:returns: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:rtype: unicode
"""
return self._properties['tagged_text']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def source_channel(self):
"""
:returns: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:rtype: unicode
"""
return self._properties['source_channel']
def fetch(self):
"""
Fetch the SampleInstance
:returns: The fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.fetch()
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: The updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, )
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleInstance {}>'.format(context)
| gpl-3.0 | -6,238,906,540,563,084,000 | 36.694332 | 192 | 0.626873 | false | 4.450526 | false | false | false |
mshubian/BAK_open-hackathon | open-hackathon-adminUI/test/app/admin/test_admin_mgr.py | 1 | 3886 | import sys
sys.path.append("../src/")
import unittest
from app.admin.admin_mgr import AdminManager
from app.database.models import AdminUser, AdminEmail, AdminUserHackathonRel
from hackathon import app
from mock import Mock, ANY
from flask import g
class AdminManagerTest(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
def tearDown(self):
pass
'''test method: get_hackid_from_adminid'''
def test_get_hackid_by_adminid(self):
admin_email_test = [AdminEmail(email='[email protected]')]
admin_user_hackathon_rel = [AdminUserHackathonRel(hackathon_id=-1)]
mock_db = Mock()
mock_db.find_all_objects_by.return_value = admin_email_test
mock_db.find_all_objects.return_value = admin_user_hackathon_rel
am = AdminManager(mock_db)
self.assertEqual(am.get_hackid_from_adminid(1), [-1L])
mock_db.find_all_objects_by.assert_called_once_with(AdminEmail, admin_id=1)
mock_db.find_all_objects.assert_called_once_with(AdminUserHackathonRel, ANY)
'''test method: check_role for decorators'''
def test_check_role_super_admin_success(self):
admin_email_test = [AdminEmail(email='[email protected]')]
admin_user_hackathon_rel = [AdminUserHackathonRel(hackathon_id=-1)]
mock_db = Mock()
mock_db.find_all_objects_by.return_value = admin_email_test
mock_db.find_all_objects.return_value = admin_user_hackathon_rel
am = AdminManager(mock_db)
with app.test_request_context('/'):
g.admin = AdminUser(id=1, name='testadmin')
self.assertTrue(am.check_role(0))
mock_db.find_all_objects_by.assert_called_once_with(AdminEmail, admin_id=1)
mock_db.find_all_objects.assert_called_once_with(AdminUserHackathonRel, ANY)
def test_check_role_super_admin_faild(self):
admin_email_test = [AdminEmail(email='[email protected]')]
admin_user_hackathon_rel = [AdminUserHackathonRel(hackathon_id=2)]
mock_db = Mock()
mock_db.find_all_objects_by.return_value = admin_email_test
mock_db.find_all_objects.return_value = admin_user_hackathon_rel
am = AdminManager(mock_db)
with app.test_request_context('/'):
g.admin = AdminUser(id=1, name='testadmin')
self.assertFalse(am.check_role(0))
mock_db.find_all_objects_by.assert_called_once_with(AdminEmail, admin_id=1)
mock_db.find_all_objects.assert_called_once_with(AdminUserHackathonRel, ANY)
def test_check_role_common_admin_success(self):
admin_email_test = [AdminEmail(email='[email protected]')]
admin_user_hackathon_rel = [AdminUserHackathonRel(hackathon_id=2)]
mock_db = Mock()
mock_db.find_all_objects_by.return_value = admin_email_test
mock_db.find_all_objects.return_value = admin_user_hackathon_rel
am = AdminManager(mock_db)
with app.test_request_context('/'):
g.admin = AdminUser(id=1, name='testadmin')
self.assertTrue(am.check_role(1))
mock_db.find_all_objects_by.assert_called_once_with(AdminEmail, admin_id=1)
mock_db.find_all_objects.assert_called_once_with(AdminUserHackathonRel, ANY)
def test_check_role_common_admin_faild(self):
admin_email_test = [AdminEmail(email='[email protected]')]
admin_user_hackathon_rel = None
mock_db = Mock()
mock_db.find_all_objects_by.return_value = admin_email_test
mock_db.find_all_objects.return_value = admin_user_hackathon_rel
am = AdminManager(mock_db)
with app.test_request_context('/'):
g.admin = AdminUser(id=1, name='testadmin')
self.assertFalse(am.check_role(1))
mock_db.find_all_objects_by.assert_called_once_with(AdminEmail, admin_id=1)
| apache-2.0 | -5,152,764,983,523,254,000 | 39.479167 | 88 | 0.65826 | false | 3.279325 | true | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.